fw-card.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #include <linux/completion.h>
  19. #include <linux/crc-itu-t.h>
  20. #include <linux/delay.h>
  21. #include <linux/device.h>
  22. #include <linux/errno.h>
  23. #include <linux/kref.h>
  24. #include <linux/module.h>
  25. #include <linux/mutex.h>
  26. #include "fw-transaction.h"
  27. #include "fw-topology.h"
  28. #include "fw-device.h"
  29. int fw_compute_block_crc(u32 *block)
  30. {
  31. __be32 be32_block[256];
  32. int i, length;
  33. length = (*block >> 16) & 0xff;
  34. for (i = 0; i < length; i++)
  35. be32_block[i] = cpu_to_be32(block[i + 1]);
  36. *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
  37. return length;
  38. }
  39. static DEFINE_MUTEX(card_mutex);
  40. static LIST_HEAD(card_list);
  41. static LIST_HEAD(descriptor_list);
  42. static int descriptor_count;
  43. #define BIB_CRC(v) ((v) << 0)
  44. #define BIB_CRC_LENGTH(v) ((v) << 16)
  45. #define BIB_INFO_LENGTH(v) ((v) << 24)
  46. #define BIB_LINK_SPEED(v) ((v) << 0)
  47. #define BIB_GENERATION(v) ((v) << 4)
  48. #define BIB_MAX_ROM(v) ((v) << 8)
  49. #define BIB_MAX_RECEIVE(v) ((v) << 12)
  50. #define BIB_CYC_CLK_ACC(v) ((v) << 16)
  51. #define BIB_PMC ((1) << 27)
  52. #define BIB_BMC ((1) << 28)
  53. #define BIB_ISC ((1) << 29)
  54. #define BIB_CMC ((1) << 30)
  55. #define BIB_IMC ((1) << 31)
  56. static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
  57. {
  58. struct fw_descriptor *desc;
  59. static u32 config_rom[256];
  60. int i, j, length;
  61. /*
  62. * Initialize contents of config rom buffer. On the OHCI
  63. * controller, block reads to the config rom accesses the host
  64. * memory, but quadlet read access the hardware bus info block
  65. * registers. That's just crack, but it means we should make
  66. * sure the contents of bus info block in host memory matches
  67. * the version stored in the OHCI registers.
  68. */
  69. memset(config_rom, 0, sizeof(config_rom));
  70. config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
  71. config_rom[1] = 0x31333934;
  72. config_rom[2] =
  73. BIB_LINK_SPEED(card->link_speed) |
  74. BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
  75. BIB_MAX_ROM(2) |
  76. BIB_MAX_RECEIVE(card->max_receive) |
  77. BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
  78. config_rom[3] = card->guid >> 32;
  79. config_rom[4] = card->guid;
  80. /* Generate root directory. */
  81. i = 5;
  82. config_rom[i++] = 0;
  83. config_rom[i++] = 0x0c0083c0; /* node capabilities */
  84. j = i + descriptor_count;
  85. /* Generate root directory entries for descriptors. */
  86. list_for_each_entry (desc, &descriptor_list, link) {
  87. if (desc->immediate > 0)
  88. config_rom[i++] = desc->immediate;
  89. config_rom[i] = desc->key | (j - i);
  90. i++;
  91. j += desc->length;
  92. }
  93. /* Update root directory length. */
  94. config_rom[5] = (i - 5 - 1) << 16;
  95. /* End of root directory, now copy in descriptors. */
  96. list_for_each_entry (desc, &descriptor_list, link) {
  97. memcpy(&config_rom[i], desc->data, desc->length * 4);
  98. i += desc->length;
  99. }
  100. /* Calculate CRCs for all blocks in the config rom. This
  101. * assumes that CRC length and info length are identical for
  102. * the bus info block, which is always the case for this
  103. * implementation. */
  104. for (i = 0; i < j; i += length + 1)
  105. length = fw_compute_block_crc(config_rom + i);
  106. *config_rom_length = j;
  107. return config_rom;
  108. }
  109. static void update_config_roms(void)
  110. {
  111. struct fw_card *card;
  112. u32 *config_rom;
  113. size_t length;
  114. list_for_each_entry (card, &card_list, link) {
  115. config_rom = generate_config_rom(card, &length);
  116. card->driver->set_config_rom(card, config_rom, length);
  117. }
  118. }
  119. int fw_core_add_descriptor(struct fw_descriptor *desc)
  120. {
  121. size_t i;
  122. /*
  123. * Check descriptor is valid; the length of all blocks in the
  124. * descriptor has to add up to exactly the length of the
  125. * block.
  126. */
  127. i = 0;
  128. while (i < desc->length)
  129. i += (desc->data[i] >> 16) + 1;
  130. if (i != desc->length)
  131. return -EINVAL;
  132. mutex_lock(&card_mutex);
  133. list_add_tail(&desc->link, &descriptor_list);
  134. descriptor_count++;
  135. if (desc->immediate > 0)
  136. descriptor_count++;
  137. update_config_roms();
  138. mutex_unlock(&card_mutex);
  139. return 0;
  140. }
  141. void fw_core_remove_descriptor(struct fw_descriptor *desc)
  142. {
  143. mutex_lock(&card_mutex);
  144. list_del(&desc->link);
  145. descriptor_count--;
  146. if (desc->immediate > 0)
  147. descriptor_count--;
  148. update_config_roms();
  149. mutex_unlock(&card_mutex);
  150. }
  151. /* ------------------------------------------------------------------ */
  152. /* Code to handle 1394a broadcast channel */
  153. #define THIRTY_TWO_CHANNELS (0xFFFFFFFFU)
  154. #define IRM_RETRIES 2
  155. /*
  156. * The abi is set by device_for_each_child(), even though we have no use
  157. * for data, nor do we have a meaningful return value.
  158. */
  159. int fw_irm_set_broadcast_channel_register(struct device *dev, void *data)
  160. {
  161. struct fw_device *d;
  162. int rcode;
  163. int node_id;
  164. int max_speed;
  165. int retries;
  166. int generation;
  167. __be32 regval;
  168. struct fw_card *card;
  169. d = fw_device(dev);
  170. /* FIXME: do we need locking here? */
  171. generation = d->generation;
  172. smp_rmb(); /* Ensure generation is at least as old as node_id */
  173. node_id = d->node_id;
  174. max_speed = d->max_speed;
  175. retries = IRM_RETRIES;
  176. card = d->card;
  177. tryagain_r:
  178. rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
  179. node_id, generation, max_speed,
  180. CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
  181. &regval, 4);
  182. switch (rcode) {
  183. case RCODE_BUSY:
  184. if (retries--)
  185. goto tryagain_r;
  186. fw_notify("node %x read broadcast channel busy\n",
  187. node_id);
  188. return 0;
  189. default:
  190. fw_notify("node %x read broadcast channel failed %x\n",
  191. node_id, rcode);
  192. return 0;
  193. case RCODE_COMPLETE:
  194. /*
  195. * Paranoid reporting of nonstandard broadcast channel
  196. * contents goes here
  197. */
  198. if (regval != cpu_to_be32(BROADCAST_CHANNEL_INITIAL))
  199. return 0;
  200. break;
  201. }
  202. retries = IRM_RETRIES;
  203. regval = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
  204. BROADCAST_CHANNEL_VALID);
  205. tryagain_w:
  206. rcode = fw_run_transaction(card,
  207. TCODE_WRITE_QUADLET_REQUEST, node_id,
  208. generation, max_speed,
  209. CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
  210. &regval, 4);
  211. switch (rcode) {
  212. case RCODE_BUSY:
  213. if (retries--)
  214. goto tryagain_w;
  215. fw_notify("node %x write broadcast channel busy\n",
  216. node_id);
  217. return 0;
  218. default:
  219. fw_notify("node %x write broadcast channel failed %x\n",
  220. node_id, rcode);
  221. return 0;
  222. case RCODE_COMPLETE:
  223. return 0;
  224. }
  225. return 0;
  226. }
  227. static void
  228. irm_allocate_broadcast(struct fw_device *irm_dev, struct device *locald)
  229. {
  230. u32 generation;
  231. u32 node_id;
  232. u32 max_speed;
  233. u32 retries;
  234. __be32 old_data;
  235. __be32 lock_data[2];
  236. int rcode;
  237. /*
  238. * The device we are updating is the IRM, so we must do
  239. * some extra work.
  240. */
  241. retries = IRM_RETRIES;
  242. generation = irm_dev->generation;
  243. /* FIXME: do we need locking here? */
  244. smp_rmb();
  245. node_id = irm_dev->node_id;
  246. max_speed = irm_dev->max_speed;
  247. lock_data[0] = cpu_to_be32(THIRTY_TWO_CHANNELS);
  248. lock_data[1] = cpu_to_be32(THIRTY_TWO_CHANNELS & ~1);
  249. tryagain:
  250. old_data = lock_data[0];
  251. rcode = fw_run_transaction(irm_dev->card, TCODE_LOCK_COMPARE_SWAP,
  252. node_id, generation, max_speed,
  253. CSR_REGISTER_BASE+CSR_CHANNELS_AVAILABLE_HI,
  254. &lock_data[0], 8);
  255. switch (rcode) {
  256. case RCODE_BUSY:
  257. if (retries--)
  258. goto tryagain;
  259. /* fallthrough */
  260. default:
  261. fw_error("node %x: allocate broadcast channel failed (%x)\n",
  262. node_id, rcode);
  263. return;
  264. case RCODE_COMPLETE:
  265. if (lock_data[0] == old_data)
  266. break;
  267. if (retries--) {
  268. lock_data[1] = cpu_to_be32(be32_to_cpu(lock_data[0])&~1);
  269. goto tryagain;
  270. }
  271. fw_error("node %x: allocate broadcast channel failed: too many"
  272. " retries\n", node_id);
  273. return;
  274. }
  275. irm_dev->card->is_irm = true;
  276. device_for_each_child(locald, NULL, fw_irm_set_broadcast_channel_register);
  277. }
  278. /* ------------------------------------------------------------------ */
  279. static const char gap_count_table[] = {
  280. 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
  281. };
  282. void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
  283. {
  284. int scheduled;
  285. fw_card_get(card);
  286. scheduled = schedule_delayed_work(&card->work, delay);
  287. if (!scheduled)
  288. fw_card_put(card);
  289. }
  290. static void fw_card_bm_work(struct work_struct *work)
  291. {
  292. struct fw_card *card = container_of(work, struct fw_card, work.work);
  293. struct fw_device *root_device, *irm_device, *local_device;
  294. struct fw_node *root_node, *local_node, *irm_node;
  295. unsigned long flags;
  296. int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
  297. bool do_reset = false;
  298. bool root_device_is_running;
  299. bool root_device_is_cmc;
  300. __be32 lock_data[2];
  301. spin_lock_irqsave(&card->lock, flags);
  302. card->is_irm = false;
  303. local_node = card->local_node;
  304. root_node = card->root_node;
  305. irm_node = card->irm_node;
  306. if (local_node == NULL) {
  307. spin_unlock_irqrestore(&card->lock, flags);
  308. goto out_put_card;
  309. }
  310. fw_node_get(local_node);
  311. fw_node_get(root_node);
  312. fw_node_get(irm_node);
  313. generation = card->generation;
  314. root_device = root_node->data;
  315. root_device_is_running = root_device &&
  316. atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
  317. root_device_is_cmc = root_device && root_device->cmc;
  318. root_id = root_node->node_id;
  319. irm_device = irm_node->data;
  320. local_device = local_node->data;
  321. grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
  322. if (is_next_generation(generation, card->bm_generation) ||
  323. (card->bm_generation != generation && grace)) {
  324. /*
  325. * This first step is to figure out who is IRM and
  326. * then try to become bus manager. If the IRM is not
  327. * well defined (e.g. does not have an active link
  328. * layer or does not responds to our lock request, we
  329. * will have to do a little vigilante bus management.
  330. * In that case, we do a goto into the gap count logic
  331. * so that when we do the reset, we still optimize the
  332. * gap count. That could well save a reset in the
  333. * next generation.
  334. */
  335. irm_id = irm_node->node_id;
  336. if (!irm_node->link_on) {
  337. new_root_id = local_node->node_id;
  338. fw_notify("IRM has link off, making local node (%02x) root.\n",
  339. new_root_id);
  340. goto pick_me;
  341. }
  342. lock_data[0] = cpu_to_be32(0x3f);
  343. lock_data[1] = cpu_to_be32(local_node->node_id);
  344. spin_unlock_irqrestore(&card->lock, flags);
  345. rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  346. irm_id, generation, SCODE_100,
  347. CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
  348. lock_data, sizeof(lock_data));
  349. if (rcode == RCODE_GENERATION)
  350. /* Another bus reset, BM work has been rescheduled. */
  351. goto out;
  352. if (rcode == RCODE_COMPLETE &&
  353. lock_data[0] != cpu_to_be32(0x3f)) {
  354. /* Somebody else is BM, let them do the work. */
  355. if (irm_id == local_node->node_id) {
  356. /* But we are IRM, so do irm-y things */
  357. irm_allocate_broadcast(irm_device,
  358. card->device);
  359. }
  360. goto out;
  361. }
  362. spin_lock_irqsave(&card->lock, flags);
  363. if (rcode != RCODE_COMPLETE) {
  364. /*
  365. * The lock request failed, maybe the IRM
  366. * isn't really IRM capable after all. Let's
  367. * do a bus reset and pick the local node as
  368. * root, and thus, IRM.
  369. */
  370. new_root_id = local_node->node_id;
  371. fw_notify("BM lock failed, making local node (%02x) root.\n",
  372. new_root_id);
  373. goto pick_me;
  374. }
  375. } else if (card->bm_generation != generation) {
  376. /*
  377. * We weren't BM in the last generation, and the last
  378. * bus reset is less than 125ms ago. Reschedule this job.
  379. */
  380. spin_unlock_irqrestore(&card->lock, flags);
  381. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  382. goto out;
  383. }
  384. /*
  385. * We're bus manager for this generation, so next step is to
  386. * make sure we have an active cycle master and do gap count
  387. * optimization.
  388. */
  389. card->bm_generation = generation;
  390. if (root_device == NULL) {
  391. /*
  392. * Either link_on is false, or we failed to read the
  393. * config rom. In either case, pick another root.
  394. */
  395. new_root_id = local_node->node_id;
  396. } else if (!root_device_is_running) {
  397. /*
  398. * If we haven't probed this device yet, bail out now
  399. * and let's try again once that's done.
  400. */
  401. spin_unlock_irqrestore(&card->lock, flags);
  402. goto out;
  403. } else if (root_device_is_cmc) {
  404. /*
  405. * FIXME: I suppose we should set the cmstr bit in the
  406. * STATE_CLEAR register of this node, as described in
  407. * 1394-1995, 8.4.2.6. Also, send out a force root
  408. * packet for this node.
  409. */
  410. new_root_id = root_id;
  411. } else {
  412. /*
  413. * Current root has an active link layer and we
  414. * successfully read the config rom, but it's not
  415. * cycle master capable.
  416. */
  417. new_root_id = local_node->node_id;
  418. }
  419. pick_me:
  420. /*
  421. * Pick a gap count from 1394a table E-1. The table doesn't cover
  422. * the typically much larger 1394b beta repeater delays though.
  423. */
  424. if (!card->beta_repeaters_present &&
  425. root_node->max_hops < ARRAY_SIZE(gap_count_table))
  426. gap_count = gap_count_table[root_node->max_hops];
  427. else
  428. gap_count = 63;
  429. /*
  430. * Finally, figure out if we should do a reset or not. If we have
  431. * done less than 5 resets with the same physical topology and we
  432. * have either a new root or a new gap count setting, let's do it.
  433. */
  434. if (card->bm_retries++ < 5 &&
  435. (card->gap_count != gap_count || new_root_id != root_id))
  436. do_reset = true;
  437. spin_unlock_irqrestore(&card->lock, flags);
  438. if (do_reset) {
  439. fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
  440. card->index, new_root_id, gap_count);
  441. fw_send_phy_config(card, new_root_id, generation, gap_count);
  442. fw_core_initiate_bus_reset(card, 1);
  443. } else if (irm_node->node_id == local_node->node_id) {
  444. /*
  445. * We are IRM, so do irm-y things.
  446. * There's no reason to do this if we're doing a reset. . .
  447. * We'll be back.
  448. */
  449. irm_allocate_broadcast(irm_device, card->device);
  450. }
  451. out:
  452. fw_node_put(root_node);
  453. fw_node_put(local_node);
  454. fw_node_put(irm_node);
  455. out_put_card:
  456. fw_card_put(card);
  457. }
  458. static void flush_timer_callback(unsigned long data)
  459. {
  460. struct fw_card *card = (struct fw_card *)data;
  461. fw_flush_transactions(card);
  462. }
  463. void fw_card_initialize(struct fw_card *card,
  464. const struct fw_card_driver *driver,
  465. struct device *device)
  466. {
  467. static atomic_t index = ATOMIC_INIT(-1);
  468. card->index = atomic_inc_return(&index);
  469. card->driver = driver;
  470. card->device = device;
  471. card->current_tlabel = 0;
  472. card->tlabel_mask = 0;
  473. card->color = 0;
  474. card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
  475. kref_init(&card->kref);
  476. init_completion(&card->done);
  477. INIT_LIST_HEAD(&card->transaction_list);
  478. spin_lock_init(&card->lock);
  479. setup_timer(&card->flush_timer,
  480. flush_timer_callback, (unsigned long)card);
  481. card->local_node = NULL;
  482. INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
  483. }
  484. EXPORT_SYMBOL(fw_card_initialize);
  485. int fw_card_add(struct fw_card *card,
  486. u32 max_receive, u32 link_speed, u64 guid)
  487. {
  488. u32 *config_rom;
  489. size_t length;
  490. int ret;
  491. card->max_receive = max_receive;
  492. card->link_speed = link_speed;
  493. card->guid = guid;
  494. mutex_lock(&card_mutex);
  495. config_rom = generate_config_rom(card, &length);
  496. list_add_tail(&card->link, &card_list);
  497. mutex_unlock(&card_mutex);
  498. ret = card->driver->enable(card, config_rom, length);
  499. if (ret < 0) {
  500. mutex_lock(&card_mutex);
  501. list_del(&card->link);
  502. mutex_unlock(&card_mutex);
  503. }
  504. return ret;
  505. }
  506. EXPORT_SYMBOL(fw_card_add);
  507. /*
  508. * The next few functions implements a dummy driver that use once a
  509. * card driver shuts down an fw_card. This allows the driver to
  510. * cleanly unload, as all IO to the card will be handled by the dummy
  511. * driver instead of calling into the (possibly) unloaded module. The
  512. * dummy driver just fails all IO.
  513. */
  514. static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
  515. {
  516. BUG();
  517. return -1;
  518. }
  519. static int dummy_update_phy_reg(struct fw_card *card, int address,
  520. int clear_bits, int set_bits)
  521. {
  522. return -ENODEV;
  523. }
  524. static int dummy_set_config_rom(struct fw_card *card,
  525. u32 *config_rom, size_t length)
  526. {
  527. /*
  528. * We take the card out of card_list before setting the dummy
  529. * driver, so this should never get called.
  530. */
  531. BUG();
  532. return -1;
  533. }
  534. static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
  535. {
  536. packet->callback(packet, card, -ENODEV);
  537. }
  538. static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
  539. {
  540. packet->callback(packet, card, -ENODEV);
  541. }
  542. static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  543. {
  544. return -ENOENT;
  545. }
  546. static int dummy_enable_phys_dma(struct fw_card *card,
  547. int node_id, int generation)
  548. {
  549. return -ENODEV;
  550. }
  551. static struct fw_card_driver dummy_driver = {
  552. .enable = dummy_enable,
  553. .update_phy_reg = dummy_update_phy_reg,
  554. .set_config_rom = dummy_set_config_rom,
  555. .send_request = dummy_send_request,
  556. .cancel_packet = dummy_cancel_packet,
  557. .send_response = dummy_send_response,
  558. .enable_phys_dma = dummy_enable_phys_dma,
  559. };
  560. void fw_card_release(struct kref *kref)
  561. {
  562. struct fw_card *card = container_of(kref, struct fw_card, kref);
  563. complete(&card->done);
  564. }
  565. void fw_core_remove_card(struct fw_card *card)
  566. {
  567. card->driver->update_phy_reg(card, 4,
  568. PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
  569. fw_core_initiate_bus_reset(card, 1);
  570. mutex_lock(&card_mutex);
  571. list_del_init(&card->link);
  572. mutex_unlock(&card_mutex);
  573. /* Set up the dummy driver. */
  574. card->driver = &dummy_driver;
  575. fw_destroy_nodes(card);
  576. /* Wait for all users, especially device workqueue jobs, to finish. */
  577. fw_card_put(card);
  578. wait_for_completion(&card->done);
  579. WARN_ON(!list_empty(&card->transaction_list));
  580. del_timer_sync(&card->flush_timer);
  581. }
  582. EXPORT_SYMBOL(fw_core_remove_card);
  583. int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
  584. {
  585. int reg = short_reset ? 5 : 1;
  586. int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
  587. return card->driver->update_phy_reg(card, reg, 0, bit);
  588. }
  589. EXPORT_SYMBOL(fw_core_initiate_bus_reset);