fw-sbp2.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073
  1. /* -*- c-basic-offset: 8 -*-
  2. * fw-sbp2.c -- SBP2 driver (SCSI over IEEE1394)
  3. *
  4. * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/mod_devicetable.h>
  23. #include <linux/device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/dma-mapping.h>
  26. #include <scsi/scsi.h>
  27. #include <scsi/scsi_cmnd.h>
  28. #include <scsi/scsi_dbg.h>
  29. #include <scsi/scsi_device.h>
  30. #include <scsi/scsi_host.h>
  31. #include "fw-transaction.h"
  32. #include "fw-topology.h"
  33. #include "fw-device.h"
  34. /* I don't know why the SCSI stack doesn't define something like this... */
  35. typedef void (*scsi_done_fn_t) (struct scsi_cmnd *);
  36. static const char sbp2_driver_name[] = "sbp2";
  37. struct sbp2_device {
  38. struct fw_unit *unit;
  39. struct fw_address_handler address_handler;
  40. struct list_head orb_list;
  41. u64 management_agent_address;
  42. u64 command_block_agent_address;
  43. u32 workarounds;
  44. int login_id;
  45. /* We cache these addresses and only update them once we've
  46. * logged in or reconnected to the sbp2 device. That way, any
  47. * IO to the device will automatically fail and get retried if
  48. * it happens in a window where the device is not ready to
  49. * handle it (e.g. after a bus reset but before we reconnect). */
  50. int node_id;
  51. int address_high;
  52. int generation;
  53. struct work_struct work;
  54. struct Scsi_Host *scsi_host;
  55. };
  56. #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
  57. #define SBP2_MAX_SECTORS 255 /* Max sectors supported */
  58. #define SBP2_ORB_NULL 0x80000000
  59. #define SBP2_DIRECTION_TO_MEDIA 0x0
  60. #define SBP2_DIRECTION_FROM_MEDIA 0x1
  61. /* Unit directory keys */
  62. #define SBP2_COMMAND_SET_SPECIFIER 0x38
  63. #define SBP2_COMMAND_SET 0x39
  64. #define SBP2_COMMAND_SET_REVISION 0x3b
  65. #define SBP2_FIRMWARE_REVISION 0x3c
  66. /* Flags for detected oddities and brokeness */
  67. #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
  68. #define SBP2_WORKAROUND_INQUIRY_36 0x2
  69. #define SBP2_WORKAROUND_MODE_SENSE_8 0x4
  70. #define SBP2_WORKAROUND_FIX_CAPACITY 0x8
  71. #define SBP2_WORKAROUND_OVERRIDE 0x100
  72. /* Management orb opcodes */
  73. #define SBP2_LOGIN_REQUEST 0x0
  74. #define SBP2_QUERY_LOGINS_REQUEST 0x1
  75. #define SBP2_RECONNECT_REQUEST 0x3
  76. #define SBP2_SET_PASSWORD_REQUEST 0x4
  77. #define SBP2_LOGOUT_REQUEST 0x7
  78. #define SBP2_ABORT_TASK_REQUEST 0xb
  79. #define SBP2_ABORT_TASK_SET 0xc
  80. #define SBP2_LOGICAL_UNIT_RESET 0xe
  81. #define SBP2_TARGET_RESET_REQUEST 0xf
  82. /* Offsets for command block agent registers */
  83. #define SBP2_AGENT_STATE 0x00
  84. #define SBP2_AGENT_RESET 0x04
  85. #define SBP2_ORB_POINTER 0x08
  86. #define SBP2_DOORBELL 0x10
  87. #define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
  88. /* Status write response codes */
  89. #define SBP2_STATUS_REQUEST_COMPLETE 0x0
  90. #define SBP2_STATUS_TRANSPORT_FAILURE 0x1
  91. #define SBP2_STATUS_ILLEGAL_REQUEST 0x2
  92. #define SBP2_STATUS_VENDOR_DEPENDENT 0x3
  93. #define status_get_orb_high(v) ((v).status & 0xffff)
  94. #define status_get_sbp_status(v) (((v).status >> 16) & 0xff)
  95. #define status_get_len(v) (((v).status >> 24) & 0x07)
  96. #define status_get_dead(v) (((v).status >> 27) & 0x01)
  97. #define status_get_response(v) (((v).status >> 28) & 0x03)
  98. #define status_get_source(v) (((v).status >> 30) & 0x03)
  99. #define status_get_orb_low(v) ((v).orb_low)
  100. #define status_get_data(v) ((v).data)
  101. struct sbp2_status {
  102. u32 status;
  103. u32 orb_low;
  104. u8 data[24];
  105. };
  106. struct sbp2_pointer {
  107. u32 high;
  108. u32 low;
  109. };
  110. struct sbp2_orb {
  111. struct fw_transaction t;
  112. dma_addr_t request_bus;
  113. int rcode;
  114. struct sbp2_pointer pointer;
  115. void (*callback) (struct sbp2_orb * orb, struct sbp2_status * status);
  116. struct list_head link;
  117. };
  118. #define management_orb_lun(v) ((v))
  119. #define management_orb_function(v) ((v) << 16)
  120. #define management_orb_reconnect(v) ((v) << 20)
  121. #define management_orb_exclusive ((1) << 28)
  122. #define management_orb_request_format(v) ((v) << 29)
  123. #define management_orb_notify ((1) << 31)
  124. #define management_orb_response_length(v) ((v))
  125. #define management_orb_password_length(v) ((v) << 16)
  126. struct sbp2_management_orb {
  127. struct sbp2_orb base;
  128. struct {
  129. struct sbp2_pointer password;
  130. struct sbp2_pointer response;
  131. u32 misc;
  132. u32 length;
  133. struct sbp2_pointer status_fifo;
  134. } request;
  135. __be32 response[4];
  136. dma_addr_t response_bus;
  137. struct completion done;
  138. struct sbp2_status status;
  139. };
  140. #define login_response_get_login_id(v) ((v).misc & 0xffff)
  141. #define login_response_get_length(v) (((v).misc >> 16) & 0xffff)
  142. struct sbp2_login_response {
  143. u32 misc;
  144. struct sbp2_pointer command_block_agent;
  145. u32 reconnect_hold;
  146. };
  147. #define command_orb_data_size(v) ((v))
  148. #define command_orb_page_size(v) ((v) << 16)
  149. #define command_orb_page_table_present ((1) << 19)
  150. #define command_orb_max_payload(v) ((v) << 20)
  151. #define command_orb_speed(v) ((v) << 24)
  152. #define command_orb_direction(v) ((v) << 27)
  153. #define command_orb_request_format(v) ((v) << 29)
  154. #define command_orb_notify ((1) << 31)
  155. struct sbp2_command_orb {
  156. struct sbp2_orb base;
  157. struct {
  158. struct sbp2_pointer next;
  159. struct sbp2_pointer data_descriptor;
  160. u32 misc;
  161. u8 command_block[12];
  162. } request;
  163. struct scsi_cmnd *cmd;
  164. scsi_done_fn_t done;
  165. struct fw_unit *unit;
  166. struct sbp2_pointer page_table[SG_ALL];
  167. dma_addr_t page_table_bus;
  168. dma_addr_t request_buffer_bus;
  169. };
  170. /*
  171. * List of devices with known bugs.
  172. *
  173. * The firmware_revision field, masked with 0xffff00, is the best
  174. * indicator for the type of bridge chip of a device. It yields a few
  175. * false positives but this did not break correctly behaving devices
  176. * so far. We use ~0 as a wildcard, since the 24 bit values we get
  177. * from the config rom can never match that.
  178. */
  179. static const struct {
  180. u32 firmware_revision;
  181. u32 model;
  182. unsigned workarounds;
  183. } sbp2_workarounds_table[] = {
  184. /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
  185. .firmware_revision = 0x002800,
  186. .model = 0x001010,
  187. .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
  188. SBP2_WORKAROUND_MODE_SENSE_8,
  189. },
  190. /* Initio bridges, actually only needed for some older ones */ {
  191. .firmware_revision = 0x000200,
  192. .model = ~0,
  193. .workarounds = SBP2_WORKAROUND_INQUIRY_36,
  194. },
  195. /* Symbios bridge */ {
  196. .firmware_revision = 0xa0b800,
  197. .model = ~0,
  198. .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
  199. },
  200. /* There are iPods (2nd gen, 3rd gen) with model_id == 0, but
  201. * these iPods do not feature the read_capacity bug according
  202. * to one report. Read_capacity behaviour as well as model_id
  203. * could change due to Apple-supplied firmware updates though. */
  204. /* iPod 4th generation. */ {
  205. .firmware_revision = 0x0a2700,
  206. .model = 0x000021,
  207. .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
  208. },
  209. /* iPod mini */ {
  210. .firmware_revision = 0x0a2700,
  211. .model = 0x000023,
  212. .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
  213. },
  214. /* iPod Photo */ {
  215. .firmware_revision = 0x0a2700,
  216. .model = 0x00007e,
  217. .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
  218. }
  219. };
  220. static void
  221. sbp2_status_write(struct fw_card *card, struct fw_request *request,
  222. int tcode, int destination, int source,
  223. int generation, int speed,
  224. unsigned long long offset,
  225. void *payload, size_t length, void *callback_data)
  226. {
  227. struct sbp2_device *sd = callback_data;
  228. struct sbp2_orb *orb;
  229. struct sbp2_status status;
  230. size_t header_size;
  231. unsigned long flags;
  232. if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
  233. length == 0 || length > sizeof status) {
  234. fw_send_response(card, request, RCODE_TYPE_ERROR);
  235. return;
  236. }
  237. header_size = min(length, 2 * sizeof(u32));
  238. fw_memcpy_from_be32(&status, payload, header_size);
  239. if (length > header_size)
  240. memcpy(status.data, payload + 8, length - header_size);
  241. if (status_get_source(status) == 2 || status_get_source(status) == 3) {
  242. fw_notify("non-orb related status write, not handled\n");
  243. fw_send_response(card, request, RCODE_COMPLETE);
  244. return;
  245. }
  246. /* Lookup the orb corresponding to this status write. */
  247. spin_lock_irqsave(&card->lock, flags);
  248. list_for_each_entry(orb, &sd->orb_list, link) {
  249. if (status_get_orb_high(status) == 0 &&
  250. status_get_orb_low(status) == orb->request_bus) {
  251. list_del(&orb->link);
  252. break;
  253. }
  254. }
  255. spin_unlock_irqrestore(&card->lock, flags);
  256. if (&orb->link != &sd->orb_list)
  257. orb->callback(orb, &status);
  258. else
  259. fw_error("status write for unknown orb\n");
  260. fw_send_response(card, request, RCODE_COMPLETE);
  261. }
  262. static void
  263. complete_transaction(struct fw_card *card, int rcode,
  264. void *payload, size_t length, void *data)
  265. {
  266. struct sbp2_orb *orb = data;
  267. unsigned long flags;
  268. orb->rcode = rcode;
  269. if (rcode != RCODE_COMPLETE) {
  270. spin_lock_irqsave(&card->lock, flags);
  271. list_del(&orb->link);
  272. spin_unlock_irqrestore(&card->lock, flags);
  273. orb->callback(orb, NULL);
  274. }
  275. }
  276. static void
  277. sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
  278. int node_id, int generation, u64 offset)
  279. {
  280. struct fw_device *device = fw_device(unit->device.parent);
  281. struct sbp2_device *sd = unit->device.driver_data;
  282. unsigned long flags;
  283. orb->pointer.high = 0;
  284. orb->pointer.low = orb->request_bus;
  285. fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof orb->pointer);
  286. spin_lock_irqsave(&device->card->lock, flags);
  287. list_add_tail(&orb->link, &sd->orb_list);
  288. spin_unlock_irqrestore(&device->card->lock, flags);
  289. fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
  290. node_id | LOCAL_BUS, generation,
  291. device->node->max_speed, offset,
  292. &orb->pointer, sizeof orb->pointer,
  293. complete_transaction, orb);
  294. }
  295. static void sbp2_cancel_orbs(struct fw_unit *unit)
  296. {
  297. struct fw_device *device = fw_device(unit->device.parent);
  298. struct sbp2_device *sd = unit->device.driver_data;
  299. struct sbp2_orb *orb, *next;
  300. struct list_head list;
  301. unsigned long flags;
  302. INIT_LIST_HEAD(&list);
  303. spin_lock_irqsave(&device->card->lock, flags);
  304. list_splice_init(&sd->orb_list, &list);
  305. spin_unlock_irqrestore(&device->card->lock, flags);
  306. list_for_each_entry_safe(orb, next, &list, link) {
  307. orb->rcode = RCODE_CANCELLED;
  308. orb->callback(orb, NULL);
  309. }
  310. }
  311. static void
  312. complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
  313. {
  314. struct sbp2_management_orb *orb =
  315. (struct sbp2_management_orb *)base_orb;
  316. if (status)
  317. memcpy(&orb->status, status, sizeof *status);
  318. complete(&orb->done);
  319. }
  320. static int
  321. sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
  322. int function, int lun, void *response)
  323. {
  324. struct fw_device *device = fw_device(unit->device.parent);
  325. struct sbp2_device *sd = unit->device.driver_data;
  326. struct sbp2_management_orb *orb;
  327. unsigned long timeout;
  328. int retval = -ENOMEM;
  329. orb = kzalloc(sizeof *orb, GFP_ATOMIC);
  330. if (orb == NULL)
  331. return -ENOMEM;
  332. /* The sbp2 device is going to send a block read request to
  333. * read out the request from host memory, so map it for
  334. * dma. */
  335. orb->base.request_bus =
  336. dma_map_single(device->card->device, &orb->request,
  337. sizeof orb->request, DMA_TO_DEVICE);
  338. if (orb->base.request_bus == 0)
  339. goto out;
  340. orb->response_bus =
  341. dma_map_single(device->card->device, &orb->response,
  342. sizeof orb->response, DMA_FROM_DEVICE);
  343. if (orb->response_bus == 0)
  344. goto out;
  345. orb->request.response.high = 0;
  346. orb->request.response.low = orb->response_bus;
  347. orb->request.misc =
  348. management_orb_notify |
  349. management_orb_function(function) |
  350. management_orb_lun(lun);
  351. orb->request.length =
  352. management_orb_response_length(sizeof orb->response);
  353. orb->request.status_fifo.high = sd->address_handler.offset >> 32;
  354. orb->request.status_fifo.low = sd->address_handler.offset;
  355. /* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
  356. * login and 1 second reconnect time. The reconnect setting
  357. * is probably fine, but the exclusive login should be an
  358. * option. */
  359. if (function == SBP2_LOGIN_REQUEST) {
  360. orb->request.misc |=
  361. management_orb_exclusive |
  362. management_orb_reconnect(0);
  363. }
  364. fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request);
  365. init_completion(&orb->done);
  366. orb->base.callback = complete_management_orb;
  367. sbp2_send_orb(&orb->base, unit,
  368. node_id, generation, sd->management_agent_address);
  369. timeout = wait_for_completion_timeout(&orb->done, 10 * HZ);
  370. /* FIXME: Handle bus reset race here. */
  371. retval = -EIO;
  372. if (orb->base.rcode != RCODE_COMPLETE) {
  373. fw_error("management write failed, rcode 0x%02x\n",
  374. orb->base.rcode);
  375. goto out;
  376. }
  377. if (timeout == 0) {
  378. fw_error("orb reply timed out, rcode=0x%02x\n",
  379. orb->base.rcode);
  380. goto out;
  381. }
  382. if (status_get_response(orb->status) != 0 ||
  383. status_get_sbp_status(orb->status) != 0) {
  384. fw_error("error status: %d:%d\n",
  385. status_get_response(orb->status),
  386. status_get_sbp_status(orb->status));
  387. goto out;
  388. }
  389. retval = 0;
  390. out:
  391. dma_unmap_single(device->card->device, orb->base.request_bus,
  392. sizeof orb->request, DMA_TO_DEVICE);
  393. dma_unmap_single(device->card->device, orb->response_bus,
  394. sizeof orb->response, DMA_FROM_DEVICE);
  395. if (response)
  396. fw_memcpy_from_be32(response,
  397. orb->response, sizeof orb->response);
  398. kfree(orb);
  399. return retval;
  400. }
  401. static void
  402. complete_agent_reset_write(struct fw_card *card, int rcode,
  403. void *payload, size_t length, void *data)
  404. {
  405. struct fw_transaction *t = data;
  406. fw_notify("agent reset write rcode=%d\n", rcode);
  407. kfree(t);
  408. }
  409. static int sbp2_agent_reset(struct fw_unit *unit)
  410. {
  411. struct fw_device *device = fw_device(unit->device.parent);
  412. struct sbp2_device *sd = unit->device.driver_data;
  413. struct fw_transaction *t;
  414. static u32 zero;
  415. t = kzalloc(sizeof *t, GFP_ATOMIC);
  416. if (t == NULL)
  417. return -ENOMEM;
  418. fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
  419. sd->node_id | LOCAL_BUS, sd->generation, SCODE_400,
  420. sd->command_block_agent_address + SBP2_AGENT_RESET,
  421. &zero, sizeof zero, complete_agent_reset_write, t);
  422. return 0;
  423. }
  424. static int add_scsi_devices(struct fw_unit *unit);
  425. static void remove_scsi_devices(struct fw_unit *unit);
  426. static int sbp2_probe(struct device *dev)
  427. {
  428. struct fw_unit *unit = fw_unit(dev);
  429. struct fw_device *device = fw_device(unit->device.parent);
  430. struct sbp2_device *sd;
  431. struct fw_csr_iterator ci;
  432. int i, key, value, lun, retval;
  433. int node_id, generation, local_node_id;
  434. struct sbp2_login_response response;
  435. u32 model, firmware_revision;
  436. sd = kzalloc(sizeof *sd, GFP_KERNEL);
  437. if (sd == NULL)
  438. return -ENOMEM;
  439. unit->device.driver_data = sd;
  440. sd->unit = unit;
  441. INIT_LIST_HEAD(&sd->orb_list);
  442. sd->address_handler.length = 0x100;
  443. sd->address_handler.address_callback = sbp2_status_write;
  444. sd->address_handler.callback_data = sd;
  445. if (fw_core_add_address_handler(&sd->address_handler,
  446. &fw_high_memory_region) < 0) {
  447. kfree(sd);
  448. return -EBUSY;
  449. }
  450. if (fw_device_enable_phys_dma(device) < 0) {
  451. fw_core_remove_address_handler(&sd->address_handler);
  452. kfree(sd);
  453. return -EBUSY;
  454. }
  455. /* Scan unit directory to get management agent address,
  456. * firmware revison and model. Initialize firmware_revision
  457. * and model to values that wont match anything in our table. */
  458. firmware_revision = 0xff000000;
  459. model = 0xff000000;
  460. fw_csr_iterator_init(&ci, unit->directory);
  461. while (fw_csr_iterator_next(&ci, &key, &value)) {
  462. switch (key) {
  463. case CSR_DEPENDENT_INFO | CSR_OFFSET:
  464. sd->management_agent_address =
  465. 0xfffff0000000ULL + 4 * value;
  466. break;
  467. case SBP2_FIRMWARE_REVISION:
  468. firmware_revision = value;
  469. break;
  470. case CSR_MODEL:
  471. model = value;
  472. break;
  473. }
  474. }
  475. for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
  476. if (sbp2_workarounds_table[i].firmware_revision !=
  477. (firmware_revision & 0xffffff00))
  478. continue;
  479. if (sbp2_workarounds_table[i].model != model &&
  480. sbp2_workarounds_table[i].model != ~0)
  481. continue;
  482. sd->workarounds |= sbp2_workarounds_table[i].workarounds;
  483. break;
  484. }
  485. if (sd->workarounds)
  486. fw_notify("Workarounds for node %s: 0x%x "
  487. "(firmware_revision 0x%06x, model_id 0x%06x)\n",
  488. unit->device.bus_id,
  489. sd->workarounds, firmware_revision, model);
  490. /* FIXME: Make this work for multi-lun devices. */
  491. lun = 0;
  492. generation = device->card->generation;
  493. node_id = device->node->node_id;
  494. local_node_id = device->card->local_node->node_id;
  495. /* FIXME: We should probably do this from a keventd callback
  496. * and handle retries by rescheduling the work. */
  497. if (sbp2_send_management_orb(unit, node_id, generation,
  498. SBP2_LOGIN_REQUEST, lun, &response) < 0) {
  499. fw_core_remove_address_handler(&sd->address_handler);
  500. kfree(sd);
  501. return -EBUSY;
  502. }
  503. sd->generation = generation;
  504. sd->node_id = node_id;
  505. sd->address_high = (LOCAL_BUS | local_node_id) << 16;
  506. /* Get command block agent offset and login id. */
  507. sd->command_block_agent_address =
  508. ((u64) response.command_block_agent.high << 32) |
  509. response.command_block_agent.low;
  510. sd->login_id = login_response_get_login_id(response);
  511. fw_notify("logged in to sbp2 unit %s\n", unit->device.bus_id);
  512. fw_notify(" - management_agent_address: 0x%012llx\n",
  513. (unsigned long long) sd->management_agent_address);
  514. fw_notify(" - command_block_agent_address: 0x%012llx\n",
  515. (unsigned long long) sd->command_block_agent_address);
  516. fw_notify(" - status write address: 0x%012llx\n",
  517. (unsigned long long) sd->address_handler.offset);
  518. #if 0
  519. /* FIXME: The linux1394 sbp2 does this last step. */
  520. sbp2_set_busy_timeout(scsi_id);
  521. #endif
  522. sbp2_agent_reset(unit);
  523. retval = add_scsi_devices(unit);
  524. if (retval < 0) {
  525. sbp2_send_management_orb(unit, sd->node_id, sd->generation,
  526. SBP2_LOGOUT_REQUEST, sd->login_id,
  527. NULL);
  528. fw_core_remove_address_handler(&sd->address_handler);
  529. kfree(sd);
  530. return retval;
  531. }
  532. return 0;
  533. }
  534. static int sbp2_remove(struct device *dev)
  535. {
  536. struct fw_unit *unit = fw_unit(dev);
  537. struct sbp2_device *sd = unit->device.driver_data;
  538. sbp2_send_management_orb(unit, sd->node_id, sd->generation,
  539. SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
  540. remove_scsi_devices(unit);
  541. fw_core_remove_address_handler(&sd->address_handler);
  542. kfree(sd);
  543. fw_notify("removed sbp2 unit %s\n", dev->bus_id);
  544. return 0;
  545. }
  546. static void sbp2_reconnect(struct work_struct *work)
  547. {
  548. struct sbp2_device *sd = container_of(work, struct sbp2_device, work);
  549. struct fw_unit *unit = sd->unit;
  550. struct fw_device *device = fw_device(unit->device.parent);
  551. int generation, node_id, local_node_id;
  552. fw_notify("in sbp2_reconnect, reconnecting to unit %s\n",
  553. unit->device.bus_id);
  554. generation = device->card->generation;
  555. node_id = device->node->node_id;
  556. local_node_id = device->card->local_node->node_id;
  557. sbp2_send_management_orb(unit, node_id, generation,
  558. SBP2_RECONNECT_REQUEST, sd->login_id, NULL);
  559. /* FIXME: handle reconnect failures. */
  560. sbp2_cancel_orbs(unit);
  561. sd->generation = generation;
  562. sd->node_id = node_id;
  563. sd->address_high = (LOCAL_BUS | local_node_id) << 16;
  564. }
  565. static void sbp2_update(struct fw_unit *unit)
  566. {
  567. struct fw_device *device = fw_device(unit->device.parent);
  568. struct sbp2_device *sd = unit->device.driver_data;
  569. fw_device_enable_phys_dma(device);
  570. INIT_WORK(&sd->work, sbp2_reconnect);
  571. schedule_work(&sd->work);
  572. }
  573. #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
  574. #define SBP2_SW_VERSION_ENTRY 0x00010483
  575. static const struct fw_device_id sbp2_id_table[] = {
  576. {
  577. .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
  578. .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
  579. .version = SBP2_SW_VERSION_ENTRY,
  580. },
  581. { }
  582. };
  583. static struct fw_driver sbp2_driver = {
  584. .driver = {
  585. .owner = THIS_MODULE,
  586. .name = sbp2_driver_name,
  587. .bus = &fw_bus_type,
  588. .probe = sbp2_probe,
  589. .remove = sbp2_remove,
  590. },
  591. .update = sbp2_update,
  592. .id_table = sbp2_id_table,
  593. };
  594. static unsigned int sbp2_status_to_sense_data(u8 * sbp2_status, u8 * sense_data)
  595. {
  596. sense_data[0] = 0x70;
  597. sense_data[1] = 0x0;
  598. sense_data[2] = sbp2_status[1];
  599. sense_data[3] = sbp2_status[4];
  600. sense_data[4] = sbp2_status[5];
  601. sense_data[5] = sbp2_status[6];
  602. sense_data[6] = sbp2_status[7];
  603. sense_data[7] = 10;
  604. sense_data[8] = sbp2_status[8];
  605. sense_data[9] = sbp2_status[9];
  606. sense_data[10] = sbp2_status[10];
  607. sense_data[11] = sbp2_status[11];
  608. sense_data[12] = sbp2_status[2];
  609. sense_data[13] = sbp2_status[3];
  610. sense_data[14] = sbp2_status[12];
  611. sense_data[15] = sbp2_status[13];
  612. switch (sbp2_status[0] & 0x3f) {
  613. case SAM_STAT_GOOD:
  614. return DID_OK;
  615. case SAM_STAT_CHECK_CONDITION:
  616. /* return CHECK_CONDITION << 1 | DID_OK << 16; */
  617. return DID_OK;
  618. case SAM_STAT_BUSY:
  619. return DID_BUS_BUSY;
  620. case SAM_STAT_CONDITION_MET:
  621. case SAM_STAT_RESERVATION_CONFLICT:
  622. case SAM_STAT_COMMAND_TERMINATED:
  623. default:
  624. return DID_ERROR;
  625. }
  626. }
  627. static void
  628. complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
  629. {
  630. struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
  631. struct fw_unit *unit = orb->unit;
  632. struct fw_device *device = fw_device(unit->device.parent);
  633. struct scatterlist *sg;
  634. int result;
  635. if (status != NULL) {
  636. if (status_get_dead(*status)) {
  637. fw_notify("agent died, issuing agent reset\n");
  638. sbp2_agent_reset(unit);
  639. }
  640. switch (status_get_response(*status)) {
  641. case SBP2_STATUS_REQUEST_COMPLETE:
  642. result = DID_OK;
  643. break;
  644. case SBP2_STATUS_TRANSPORT_FAILURE:
  645. result = DID_BUS_BUSY;
  646. break;
  647. case SBP2_STATUS_ILLEGAL_REQUEST:
  648. case SBP2_STATUS_VENDOR_DEPENDENT:
  649. default:
  650. result = DID_ERROR;
  651. break;
  652. }
  653. if (result == DID_OK && status_get_len(*status) > 1)
  654. result = sbp2_status_to_sense_data(status_get_data(*status),
  655. orb->cmd->sense_buffer);
  656. } else {
  657. /* If the orb completes with status == NULL, something
  658. * went wrong, typically a bus reset happened mid-orb
  659. * or when sending the write (less likely). */
  660. fw_notify("no command orb status, rcode=%d\n",
  661. orb->base.rcode);
  662. result = DID_ERROR;
  663. }
  664. dma_unmap_single(device->card->device, orb->base.request_bus,
  665. sizeof orb->request, DMA_TO_DEVICE);
  666. if (orb->cmd->use_sg > 0) {
  667. sg = (struct scatterlist *)orb->cmd->request_buffer;
  668. dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
  669. orb->cmd->sc_data_direction);
  670. }
  671. if (orb->page_table_bus != 0)
  672. dma_unmap_single(device->card->device, orb->page_table_bus,
  673. sizeof orb->page_table_bus, DMA_TO_DEVICE);
  674. if (orb->request_buffer_bus != 0)
  675. dma_unmap_single(device->card->device, orb->request_buffer_bus,
  676. sizeof orb->request_buffer_bus,
  677. DMA_FROM_DEVICE);
  678. orb->cmd->result = result << 16;
  679. orb->done(orb->cmd);
  680. kfree(orb);
  681. }
  682. static void sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
  683. {
  684. struct fw_unit *unit =
  685. (struct fw_unit *)orb->cmd->device->host->hostdata[0];
  686. struct fw_device *device = fw_device(unit->device.parent);
  687. struct sbp2_device *sd = unit->device.driver_data;
  688. struct scatterlist *sg;
  689. int sg_len, l, i, j, count;
  690. size_t size;
  691. dma_addr_t sg_addr;
  692. sg = (struct scatterlist *)orb->cmd->request_buffer;
  693. count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
  694. orb->cmd->sc_data_direction);
  695. /* Handle the special case where there is only one element in
  696. * the scatter list by converting it to an immediate block
  697. * request. This is also a workaround for broken devices such
  698. * as the second generation iPod which doesn't support page
  699. * tables. */
  700. if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
  701. orb->request.data_descriptor.high = sd->address_high;
  702. orb->request.data_descriptor.low = sg_dma_address(sg);
  703. orb->request.misc |=
  704. command_orb_data_size(sg_dma_len(sg));
  705. return;
  706. }
  707. /* Convert the scatterlist to an sbp2 page table. If any
  708. * scatterlist entries are too big for sbp2 we split the as we go. */
  709. for (i = 0, j = 0; i < count; i++) {
  710. sg_len = sg_dma_len(sg + i);
  711. sg_addr = sg_dma_address(sg + i);
  712. while (sg_len) {
  713. l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
  714. orb->page_table[j].low = sg_addr;
  715. orb->page_table[j].high = (l << 16);
  716. sg_addr += l;
  717. sg_len -= l;
  718. j++;
  719. }
  720. }
  721. size = sizeof orb->page_table[0] * j;
  722. /* The data_descriptor pointer is the one case where we need
  723. * to fill in the node ID part of the address. All other
  724. * pointers assume that the data referenced reside on the
  725. * initiator (i.e. us), but data_descriptor can refer to data
  726. * on other nodes so we need to put our ID in descriptor.high. */
  727. orb->page_table_bus =
  728. dma_map_single(device->card->device, orb->page_table,
  729. size, DMA_TO_DEVICE);
  730. orb->request.data_descriptor.high = sd->address_high;
  731. orb->request.data_descriptor.low = orb->page_table_bus;
  732. orb->request.misc |=
  733. command_orb_page_table_present |
  734. command_orb_data_size(j);
  735. fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
  736. }
  737. static void sbp2_command_orb_map_buffer(struct sbp2_command_orb *orb)
  738. {
  739. struct fw_unit *unit =
  740. (struct fw_unit *)orb->cmd->device->host->hostdata[0];
  741. struct fw_device *device = fw_device(unit->device.parent);
  742. struct sbp2_device *sd = unit->device.driver_data;
  743. /* As for map_scatterlist, we need to fill in the high bits of
  744. * the data_descriptor pointer. */
  745. orb->request_buffer_bus =
  746. dma_map_single(device->card->device,
  747. orb->cmd->request_buffer,
  748. orb->cmd->request_bufflen,
  749. orb->cmd->sc_data_direction);
  750. orb->request.data_descriptor.high = sd->address_high;
  751. orb->request.data_descriptor.low = orb->request_buffer_bus;
  752. orb->request.misc |=
  753. command_orb_data_size(orb->cmd->request_bufflen);
  754. }
  755. /* SCSI stack integration */
  756. static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
  757. {
  758. struct fw_unit *unit = (struct fw_unit *)cmd->device->host->hostdata[0];
  759. struct fw_device *device = fw_device(unit->device.parent);
  760. struct sbp2_device *sd = unit->device.driver_data;
  761. struct sbp2_command_orb *orb;
  762. /* Bidirectional commands are not yet implemented, and unknown
  763. * transfer direction not handled. */
  764. if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
  765. fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
  766. cmd->result = DID_ERROR << 16;
  767. done(cmd);
  768. return 0;
  769. }
  770. orb = kzalloc(sizeof *orb, GFP_ATOMIC);
  771. if (orb == NULL) {
  772. fw_notify("failed to alloc orb\n");
  773. cmd->result = DID_NO_CONNECT << 16;
  774. done(cmd);
  775. return 0;
  776. }
  777. orb->base.request_bus =
  778. dma_map_single(device->card->device, &orb->request,
  779. sizeof orb->request, DMA_TO_DEVICE);
  780. orb->unit = unit;
  781. orb->done = done;
  782. orb->cmd = cmd;
  783. orb->request.next.high = SBP2_ORB_NULL;
  784. orb->request.next.low = 0x0;
  785. /* At speed 100 we can do 512 bytes per packet, at speed 200,
  786. * 1024 bytes per packet etc. The SBP-2 max_payload field
  787. * specifies the max payload size as 2 ^ (max_payload + 2), so
  788. * if we set this to max_speed + 7, we get the right value. */
  789. orb->request.misc =
  790. command_orb_max_payload(device->node->max_speed + 7) |
  791. command_orb_speed(device->node->max_speed) |
  792. command_orb_notify;
  793. if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  794. orb->request.misc |=
  795. command_orb_direction(SBP2_DIRECTION_FROM_MEDIA);
  796. else if (cmd->sc_data_direction == DMA_TO_DEVICE)
  797. orb->request.misc |=
  798. command_orb_direction(SBP2_DIRECTION_TO_MEDIA);
  799. if (cmd->use_sg) {
  800. sbp2_command_orb_map_scatterlist(orb);
  801. } else if (cmd->request_bufflen > SBP2_MAX_SG_ELEMENT_LENGTH) {
  802. /* FIXME: Need to split this into a sg list... but
  803. * could we get the scsi or blk layer to do that by
  804. * reporting our max supported block size? */
  805. fw_error("command > 64k\n");
  806. cmd->result = DID_ERROR << 16;
  807. done(cmd);
  808. return 0;
  809. } else if (cmd->request_bufflen > 0) {
  810. sbp2_command_orb_map_buffer(orb);
  811. }
  812. fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request);
  813. memset(orb->request.command_block,
  814. 0, sizeof orb->request.command_block);
  815. memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
  816. orb->base.callback = complete_command_orb;
  817. sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
  818. sd->command_block_agent_address + SBP2_ORB_POINTER);
  819. return 0;
  820. }
  821. static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
  822. {
  823. struct fw_unit *unit = (struct fw_unit *)sdev->host->hostdata[0];
  824. struct sbp2_device *sd = unit->device.driver_data;
  825. if (sdev->type == TYPE_DISK &&
  826. sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
  827. sdev->skip_ms_page_8 = 1;
  828. if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
  829. fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
  830. sdev->fix_capacity = 1;
  831. }
  832. return 0;
  833. }
  834. /*
  835. * Called by scsi stack when something has really gone wrong. Usually
  836. * called when a command has timed-out for some reason.
  837. */
  838. static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
  839. {
  840. struct fw_unit *unit = (struct fw_unit *)cmd->device->host->hostdata[0];
  841. fw_notify("sbp2_scsi_abort\n");
  842. sbp2_cancel_orbs(unit);
  843. return SUCCESS;
  844. }
  845. static struct scsi_host_template scsi_driver_template = {
  846. .module = THIS_MODULE,
  847. .name = "SBP-2 IEEE-1394",
  848. .proc_name = (char *)sbp2_driver_name,
  849. .queuecommand = sbp2_scsi_queuecommand,
  850. .slave_configure = sbp2_scsi_slave_configure,
  851. .eh_abort_handler = sbp2_scsi_abort,
  852. .this_id = -1,
  853. .sg_tablesize = SG_ALL,
  854. .use_clustering = ENABLE_CLUSTERING,
  855. .cmd_per_lun = 1,
  856. .can_queue = 1,
  857. };
  858. static int add_scsi_devices(struct fw_unit *unit)
  859. {
  860. struct sbp2_device *sd = unit->device.driver_data;
  861. int retval, lun;
  862. sd->scsi_host = scsi_host_alloc(&scsi_driver_template,
  863. sizeof(unsigned long));
  864. if (sd->scsi_host == NULL) {
  865. fw_error("failed to register scsi host\n");
  866. return -1;
  867. }
  868. sd->scsi_host->hostdata[0] = (unsigned long)unit;
  869. retval = scsi_add_host(sd->scsi_host, &unit->device);
  870. if (retval < 0) {
  871. fw_error("failed to add scsi host\n");
  872. scsi_host_put(sd->scsi_host);
  873. return retval;
  874. }
  875. /* FIXME: Loop over luns here. */
  876. lun = 0;
  877. retval = scsi_add_device(sd->scsi_host, 0, 0, lun);
  878. if (retval < 0) {
  879. fw_error("failed to add scsi device\n");
  880. scsi_remove_host(sd->scsi_host);
  881. scsi_host_put(sd->scsi_host);
  882. return retval;
  883. }
  884. return 0;
  885. }
  886. static void remove_scsi_devices(struct fw_unit *unit)
  887. {
  888. struct sbp2_device *sd = unit->device.driver_data;
  889. scsi_remove_host(sd->scsi_host);
  890. scsi_host_put(sd->scsi_host);
  891. }
  892. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  893. MODULE_DESCRIPTION("SCSI over IEEE1394");
  894. MODULE_LICENSE("GPL");
  895. MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
  896. static int __init sbp2_init(void)
  897. {
  898. return driver_register(&sbp2_driver.driver);
  899. }
  900. static void __exit sbp2_cleanup(void)
  901. {
  902. driver_unregister(&sbp2_driver.driver);
  903. }
  904. module_init(sbp2_init);
  905. module_exit(sbp2_cleanup);