core-card.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #include <linux/bug.h>
  19. #include <linux/completion.h>
  20. #include <linux/crc-itu-t.h>
  21. #include <linux/device.h>
  22. #include <linux/errno.h>
  23. #include <linux/firewire.h>
  24. #include <linux/firewire-constants.h>
  25. #include <linux/jiffies.h>
  26. #include <linux/kernel.h>
  27. #include <linux/kref.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <linux/mutex.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/workqueue.h>
  33. #include <asm/atomic.h>
  34. #include <asm/byteorder.h>
  35. #include "core.h"
  36. int fw_compute_block_crc(__be32 *block)
  37. {
  38. int length;
  39. u16 crc;
  40. length = (be32_to_cpu(block[0]) >> 16) & 0xff;
  41. crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
  42. *block |= cpu_to_be32(crc);
  43. return length;
  44. }
  45. static DEFINE_MUTEX(card_mutex);
  46. static LIST_HEAD(card_list);
  47. static LIST_HEAD(descriptor_list);
  48. static int descriptor_count;
  49. static __be32 tmp_config_rom[256];
  50. /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
  51. static size_t config_rom_length = 1 + 4 + 1 + 1;
  52. #define BIB_CRC(v) ((v) << 0)
  53. #define BIB_CRC_LENGTH(v) ((v) << 16)
  54. #define BIB_INFO_LENGTH(v) ((v) << 24)
  55. #define BIB_BUS_NAME 0x31333934 /* "1394" */
  56. #define BIB_LINK_SPEED(v) ((v) << 0)
  57. #define BIB_GENERATION(v) ((v) << 4)
  58. #define BIB_MAX_ROM(v) ((v) << 8)
  59. #define BIB_MAX_RECEIVE(v) ((v) << 12)
  60. #define BIB_CYC_CLK_ACC(v) ((v) << 16)
  61. #define BIB_PMC ((1) << 27)
  62. #define BIB_BMC ((1) << 28)
  63. #define BIB_ISC ((1) << 29)
  64. #define BIB_CMC ((1) << 30)
  65. #define BIB_IRMC ((1) << 31)
  66. #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
  67. static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
  68. {
  69. struct fw_descriptor *desc;
  70. int i, j, k, length;
  71. /*
  72. * Initialize contents of config rom buffer. On the OHCI
  73. * controller, block reads to the config rom accesses the host
  74. * memory, but quadlet read access the hardware bus info block
  75. * registers. That's just crack, but it means we should make
  76. * sure the contents of bus info block in host memory matches
  77. * the version stored in the OHCI registers.
  78. */
  79. config_rom[0] = cpu_to_be32(
  80. BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
  81. config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
  82. config_rom[2] = cpu_to_be32(
  83. BIB_LINK_SPEED(card->link_speed) |
  84. BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
  85. BIB_MAX_ROM(2) |
  86. BIB_MAX_RECEIVE(card->max_receive) |
  87. BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
  88. config_rom[3] = cpu_to_be32(card->guid >> 32);
  89. config_rom[4] = cpu_to_be32(card->guid);
  90. /* Generate root directory. */
  91. config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
  92. i = 7;
  93. j = 7 + descriptor_count;
  94. /* Generate root directory entries for descriptors. */
  95. list_for_each_entry (desc, &descriptor_list, link) {
  96. if (desc->immediate > 0)
  97. config_rom[i++] = cpu_to_be32(desc->immediate);
  98. config_rom[i] = cpu_to_be32(desc->key | (j - i));
  99. i++;
  100. j += desc->length;
  101. }
  102. /* Update root directory length. */
  103. config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
  104. /* End of root directory, now copy in descriptors. */
  105. list_for_each_entry (desc, &descriptor_list, link) {
  106. for (k = 0; k < desc->length; k++)
  107. config_rom[i + k] = cpu_to_be32(desc->data[k]);
  108. i += desc->length;
  109. }
  110. /* Calculate CRCs for all blocks in the config rom. This
  111. * assumes that CRC length and info length are identical for
  112. * the bus info block, which is always the case for this
  113. * implementation. */
  114. for (i = 0; i < j; i += length + 1)
  115. length = fw_compute_block_crc(config_rom + i);
  116. WARN_ON(j != config_rom_length);
  117. }
  118. static void update_config_roms(void)
  119. {
  120. struct fw_card *card;
  121. list_for_each_entry (card, &card_list, link) {
  122. generate_config_rom(card, tmp_config_rom);
  123. card->driver->set_config_rom(card, tmp_config_rom,
  124. config_rom_length);
  125. }
  126. }
  127. static size_t required_space(struct fw_descriptor *desc)
  128. {
  129. /* descriptor + entry into root dir + optional immediate entry */
  130. return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
  131. }
  132. int fw_core_add_descriptor(struct fw_descriptor *desc)
  133. {
  134. size_t i;
  135. int ret;
  136. /*
  137. * Check descriptor is valid; the length of all blocks in the
  138. * descriptor has to add up to exactly the length of the
  139. * block.
  140. */
  141. i = 0;
  142. while (i < desc->length)
  143. i += (desc->data[i] >> 16) + 1;
  144. if (i != desc->length)
  145. return -EINVAL;
  146. mutex_lock(&card_mutex);
  147. if (config_rom_length + required_space(desc) > 256) {
  148. ret = -EBUSY;
  149. } else {
  150. list_add_tail(&desc->link, &descriptor_list);
  151. config_rom_length += required_space(desc);
  152. descriptor_count++;
  153. if (desc->immediate > 0)
  154. descriptor_count++;
  155. update_config_roms();
  156. ret = 0;
  157. }
  158. mutex_unlock(&card_mutex);
  159. return ret;
  160. }
  161. EXPORT_SYMBOL(fw_core_add_descriptor);
  162. void fw_core_remove_descriptor(struct fw_descriptor *desc)
  163. {
  164. mutex_lock(&card_mutex);
  165. list_del(&desc->link);
  166. config_rom_length -= required_space(desc);
  167. descriptor_count--;
  168. if (desc->immediate > 0)
  169. descriptor_count--;
  170. update_config_roms();
  171. mutex_unlock(&card_mutex);
  172. }
  173. EXPORT_SYMBOL(fw_core_remove_descriptor);
  174. static int reset_bus(struct fw_card *card, bool short_reset)
  175. {
  176. int reg = short_reset ? 5 : 1;
  177. int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
  178. return card->driver->update_phy_reg(card, reg, 0, bit);
  179. }
  180. void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
  181. {
  182. /* We don't try hard to sort out requests of long vs. short resets. */
  183. card->br_short = short_reset;
  184. /* Use an arbitrary short delay to combine multiple reset requests. */
  185. fw_card_get(card);
  186. if (!schedule_delayed_work(&card->br_work,
  187. delayed ? DIV_ROUND_UP(HZ, 100) : 0))
  188. fw_card_put(card);
  189. }
  190. EXPORT_SYMBOL(fw_schedule_bus_reset);
  191. static void br_work(struct work_struct *work)
  192. {
  193. struct fw_card *card = container_of(work, struct fw_card, br_work.work);
  194. /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
  195. if (card->reset_jiffies != 0 &&
  196. time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
  197. if (!schedule_delayed_work(&card->br_work, 2 * HZ))
  198. fw_card_put(card);
  199. return;
  200. }
  201. fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
  202. FW_PHY_CONFIG_CURRENT_GAP_COUNT);
  203. reset_bus(card, card->br_short);
  204. fw_card_put(card);
  205. }
  206. static void allocate_broadcast_channel(struct fw_card *card, int generation)
  207. {
  208. int channel, bandwidth = 0;
  209. if (!card->broadcast_channel_allocated) {
  210. fw_iso_resource_manage(card, generation, 1ULL << 31,
  211. &channel, &bandwidth, true,
  212. card->bm_transaction_data);
  213. if (channel != 31) {
  214. fw_notify("failed to allocate broadcast channel\n");
  215. return;
  216. }
  217. card->broadcast_channel_allocated = true;
  218. }
  219. device_for_each_child(card->device, (void *)(long)generation,
  220. fw_device_set_broadcast_channel);
  221. }
  222. static const char gap_count_table[] = {
  223. 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
  224. };
  225. void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
  226. {
  227. fw_card_get(card);
  228. if (!schedule_delayed_work(&card->bm_work, delay))
  229. fw_card_put(card);
  230. }
  231. static void bm_work(struct work_struct *work)
  232. {
  233. struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
  234. struct fw_device *root_device;
  235. struct fw_node *root_node;
  236. int root_id, new_root_id, irm_id, bm_id, local_id;
  237. int gap_count, generation, grace, rcode;
  238. bool do_reset = false;
  239. bool root_device_is_running;
  240. bool root_device_is_cmc;
  241. spin_lock_irq(&card->lock);
  242. if (card->local_node == NULL) {
  243. spin_unlock_irq(&card->lock);
  244. goto out_put_card;
  245. }
  246. generation = card->generation;
  247. root_node = card->root_node;
  248. fw_node_get(root_node);
  249. root_device = root_node->data;
  250. root_device_is_running = root_device &&
  251. atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
  252. root_device_is_cmc = root_device && root_device->cmc;
  253. root_id = root_node->node_id;
  254. irm_id = card->irm_node->node_id;
  255. local_id = card->local_node->node_id;
  256. grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
  257. if ((is_next_generation(generation, card->bm_generation) &&
  258. !card->bm_abdicate) ||
  259. (card->bm_generation != generation && grace)) {
  260. /*
  261. * This first step is to figure out who is IRM and
  262. * then try to become bus manager. If the IRM is not
  263. * well defined (e.g. does not have an active link
  264. * layer or does not responds to our lock request, we
  265. * will have to do a little vigilante bus management.
  266. * In that case, we do a goto into the gap count logic
  267. * so that when we do the reset, we still optimize the
  268. * gap count. That could well save a reset in the
  269. * next generation.
  270. */
  271. if (!card->irm_node->link_on) {
  272. new_root_id = local_id;
  273. fw_notify("IRM has link off, making local node (%02x) root.\n",
  274. new_root_id);
  275. goto pick_me;
  276. }
  277. card->bm_transaction_data[0] = cpu_to_be32(0x3f);
  278. card->bm_transaction_data[1] = cpu_to_be32(local_id);
  279. spin_unlock_irq(&card->lock);
  280. rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  281. irm_id, generation, SCODE_100,
  282. CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
  283. card->bm_transaction_data, 8);
  284. if (rcode == RCODE_GENERATION)
  285. /* Another bus reset, BM work has been rescheduled. */
  286. goto out;
  287. bm_id = be32_to_cpu(card->bm_transaction_data[0]);
  288. spin_lock_irq(&card->lock);
  289. if (rcode == RCODE_COMPLETE && generation == card->generation)
  290. card->bm_node_id =
  291. bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
  292. spin_unlock_irq(&card->lock);
  293. if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
  294. /* Somebody else is BM. Only act as IRM. */
  295. if (local_id == irm_id)
  296. allocate_broadcast_channel(card, generation);
  297. goto out;
  298. }
  299. if (rcode == RCODE_SEND_ERROR) {
  300. /*
  301. * We have been unable to send the lock request due to
  302. * some local problem. Let's try again later and hope
  303. * that the problem has gone away by then.
  304. */
  305. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  306. goto out;
  307. }
  308. spin_lock_irq(&card->lock);
  309. if (rcode != RCODE_COMPLETE) {
  310. /*
  311. * The lock request failed, maybe the IRM
  312. * isn't really IRM capable after all. Let's
  313. * do a bus reset and pick the local node as
  314. * root, and thus, IRM.
  315. */
  316. new_root_id = local_id;
  317. fw_notify("BM lock failed, making local node (%02x) root.\n",
  318. new_root_id);
  319. goto pick_me;
  320. }
  321. } else if (card->bm_generation != generation) {
  322. /*
  323. * We weren't BM in the last generation, and the last
  324. * bus reset is less than 125ms ago. Reschedule this job.
  325. */
  326. spin_unlock_irq(&card->lock);
  327. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  328. goto out;
  329. }
  330. /*
  331. * We're bus manager for this generation, so next step is to
  332. * make sure we have an active cycle master and do gap count
  333. * optimization.
  334. */
  335. card->bm_generation = generation;
  336. if (root_device == NULL) {
  337. /*
  338. * Either link_on is false, or we failed to read the
  339. * config rom. In either case, pick another root.
  340. */
  341. new_root_id = local_id;
  342. } else if (!root_device_is_running) {
  343. /*
  344. * If we haven't probed this device yet, bail out now
  345. * and let's try again once that's done.
  346. */
  347. spin_unlock_irq(&card->lock);
  348. goto out;
  349. } else if (root_device_is_cmc) {
  350. /*
  351. * We will send out a force root packet for this
  352. * node as part of the gap count optimization.
  353. */
  354. new_root_id = root_id;
  355. } else {
  356. /*
  357. * Current root has an active link layer and we
  358. * successfully read the config rom, but it's not
  359. * cycle master capable.
  360. */
  361. new_root_id = local_id;
  362. }
  363. pick_me:
  364. /*
  365. * Pick a gap count from 1394a table E-1. The table doesn't cover
  366. * the typically much larger 1394b beta repeater delays though.
  367. */
  368. if (!card->beta_repeaters_present &&
  369. root_node->max_hops < ARRAY_SIZE(gap_count_table))
  370. gap_count = gap_count_table[root_node->max_hops];
  371. else
  372. gap_count = 63;
  373. /*
  374. * Finally, figure out if we should do a reset or not. If we have
  375. * done less than 5 resets with the same physical topology and we
  376. * have either a new root or a new gap count setting, let's do it.
  377. */
  378. if (card->bm_retries++ < 5 &&
  379. (card->gap_count != gap_count || new_root_id != root_id))
  380. do_reset = true;
  381. spin_unlock_irq(&card->lock);
  382. if (do_reset) {
  383. fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
  384. card->index, new_root_id, gap_count);
  385. fw_send_phy_config(card, new_root_id, generation, gap_count);
  386. reset_bus(card, true);
  387. /* Will allocate broadcast channel after the reset. */
  388. goto out;
  389. }
  390. if (root_device_is_cmc) {
  391. /*
  392. * Make sure that the cycle master sends cycle start packets.
  393. */
  394. card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
  395. rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
  396. root_id, generation, SCODE_100,
  397. CSR_REGISTER_BASE + CSR_STATE_SET,
  398. card->bm_transaction_data, 4);
  399. if (rcode == RCODE_GENERATION)
  400. goto out;
  401. }
  402. if (local_id == irm_id)
  403. allocate_broadcast_channel(card, generation);
  404. out:
  405. fw_node_put(root_node);
  406. out_put_card:
  407. fw_card_put(card);
  408. }
  409. void fw_card_initialize(struct fw_card *card,
  410. const struct fw_card_driver *driver,
  411. struct device *device)
  412. {
  413. static atomic_t index = ATOMIC_INIT(-1);
  414. card->index = atomic_inc_return(&index);
  415. card->driver = driver;
  416. card->device = device;
  417. card->current_tlabel = 0;
  418. card->tlabel_mask = 0;
  419. card->split_timeout_hi = 0;
  420. card->split_timeout_lo = 800 << 19;
  421. card->split_timeout_cycles = 800;
  422. card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
  423. card->color = 0;
  424. card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
  425. kref_init(&card->kref);
  426. init_completion(&card->done);
  427. INIT_LIST_HEAD(&card->transaction_list);
  428. INIT_LIST_HEAD(&card->phy_receiver_list);
  429. spin_lock_init(&card->lock);
  430. card->local_node = NULL;
  431. INIT_DELAYED_WORK(&card->br_work, br_work);
  432. INIT_DELAYED_WORK(&card->bm_work, bm_work);
  433. }
  434. EXPORT_SYMBOL(fw_card_initialize);
  435. int fw_card_add(struct fw_card *card,
  436. u32 max_receive, u32 link_speed, u64 guid)
  437. {
  438. int ret;
  439. card->max_receive = max_receive;
  440. card->link_speed = link_speed;
  441. card->guid = guid;
  442. mutex_lock(&card_mutex);
  443. generate_config_rom(card, tmp_config_rom);
  444. ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
  445. if (ret == 0)
  446. list_add_tail(&card->link, &card_list);
  447. mutex_unlock(&card_mutex);
  448. return ret;
  449. }
  450. EXPORT_SYMBOL(fw_card_add);
  451. /*
  452. * The next few functions implement a dummy driver that is used once a card
  453. * driver shuts down an fw_card. This allows the driver to cleanly unload,
  454. * as all IO to the card will be handled (and failed) by the dummy driver
  455. * instead of calling into the module. Only functions for iso context
  456. * shutdown still need to be provided by the card driver.
  457. *
  458. * .read/write_csr() should never be called anymore after the dummy driver
  459. * was bound since they are only used within request handler context.
  460. * .set_config_rom() is never called since the card is taken out of card_list
  461. * before switching to the dummy driver.
  462. */
  463. static int dummy_read_phy_reg(struct fw_card *card, int address)
  464. {
  465. return -ENODEV;
  466. }
  467. static int dummy_update_phy_reg(struct fw_card *card, int address,
  468. int clear_bits, int set_bits)
  469. {
  470. return -ENODEV;
  471. }
  472. static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
  473. {
  474. packet->callback(packet, card, RCODE_CANCELLED);
  475. }
  476. static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
  477. {
  478. packet->callback(packet, card, RCODE_CANCELLED);
  479. }
  480. static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  481. {
  482. return -ENOENT;
  483. }
  484. static int dummy_enable_phys_dma(struct fw_card *card,
  485. int node_id, int generation)
  486. {
  487. return -ENODEV;
  488. }
  489. static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
  490. int type, int channel, size_t header_size)
  491. {
  492. return ERR_PTR(-ENODEV);
  493. }
  494. static int dummy_start_iso(struct fw_iso_context *ctx,
  495. s32 cycle, u32 sync, u32 tags)
  496. {
  497. return -ENODEV;
  498. }
  499. static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
  500. {
  501. return -ENODEV;
  502. }
  503. static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
  504. struct fw_iso_buffer *buffer, unsigned long payload)
  505. {
  506. return -ENODEV;
  507. }
  508. static const struct fw_card_driver dummy_driver_template = {
  509. .read_phy_reg = dummy_read_phy_reg,
  510. .update_phy_reg = dummy_update_phy_reg,
  511. .send_request = dummy_send_request,
  512. .send_response = dummy_send_response,
  513. .cancel_packet = dummy_cancel_packet,
  514. .enable_phys_dma = dummy_enable_phys_dma,
  515. .allocate_iso_context = dummy_allocate_iso_context,
  516. .start_iso = dummy_start_iso,
  517. .set_iso_channels = dummy_set_iso_channels,
  518. .queue_iso = dummy_queue_iso,
  519. };
  520. void fw_card_release(struct kref *kref)
  521. {
  522. struct fw_card *card = container_of(kref, struct fw_card, kref);
  523. complete(&card->done);
  524. }
  525. void fw_core_remove_card(struct fw_card *card)
  526. {
  527. struct fw_card_driver dummy_driver = dummy_driver_template;
  528. card->driver->update_phy_reg(card, 4,
  529. PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
  530. fw_schedule_bus_reset(card, false, true);
  531. mutex_lock(&card_mutex);
  532. list_del_init(&card->link);
  533. mutex_unlock(&card_mutex);
  534. /* Switch off most of the card driver interface. */
  535. dummy_driver.free_iso_context = card->driver->free_iso_context;
  536. dummy_driver.stop_iso = card->driver->stop_iso;
  537. card->driver = &dummy_driver;
  538. fw_destroy_nodes(card);
  539. /* Wait for all users, especially device workqueue jobs, to finish. */
  540. fw_card_put(card);
  541. wait_for_completion(&card->done);
  542. WARN_ON(!list_empty(&card->transaction_list));
  543. }
  544. EXPORT_SYMBOL(fw_core_remove_card);