core-transaction.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. * Core IEEE1394 transaction logic
  3. *
  4. * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/bug.h>
  21. #include <linux/completion.h>
  22. #include <linux/device.h>
  23. #include <linux/errno.h>
  24. #include <linux/firewire.h>
  25. #include <linux/firewire-constants.h>
  26. #include <linux/fs.h>
  27. #include <linux/init.h>
  28. #include <linux/idr.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/kernel.h>
  31. #include <linux/list.h>
  32. #include <linux/module.h>
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/string.h>
  36. #include <linux/timer.h>
  37. #include <linux/types.h>
  38. #include <asm/byteorder.h>
  39. #include "core.h"
  40. #define HEADER_PRI(pri) ((pri) << 0)
  41. #define HEADER_TCODE(tcode) ((tcode) << 4)
  42. #define HEADER_RETRY(retry) ((retry) << 8)
  43. #define HEADER_TLABEL(tlabel) ((tlabel) << 10)
  44. #define HEADER_DESTINATION(destination) ((destination) << 16)
  45. #define HEADER_SOURCE(source) ((source) << 16)
  46. #define HEADER_RCODE(rcode) ((rcode) << 12)
  47. #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
  48. #define HEADER_DATA_LENGTH(length) ((length) << 16)
  49. #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
  50. #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
  51. #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
  52. #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
  53. #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
  54. #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
  55. #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
  56. #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
  57. #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
  58. #define HEADER_DESTINATION_IS_BROADCAST(q) \
  59. (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
  60. #define PHY_PACKET_CONFIG 0x0
  61. #define PHY_PACKET_LINK_ON 0x1
  62. #define PHY_PACKET_SELF_ID 0x2
  63. #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
  64. #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
  65. #define PHY_IDENTIFIER(id) ((id) << 30)
  66. static int close_transaction(struct fw_transaction *transaction,
  67. struct fw_card *card, int rcode)
  68. {
  69. struct fw_transaction *t;
  70. unsigned long flags;
  71. spin_lock_irqsave(&card->lock, flags);
  72. list_for_each_entry(t, &card->transaction_list, link) {
  73. if (t == transaction) {
  74. list_del(&t->link);
  75. card->tlabel_mask &= ~(1ULL << t->tlabel);
  76. break;
  77. }
  78. }
  79. spin_unlock_irqrestore(&card->lock, flags);
  80. if (&t->link != &card->transaction_list) {
  81. t->callback(card, rcode, NULL, 0, t->callback_data);
  82. return 0;
  83. }
  84. return -ENOENT;
  85. }
  86. /*
  87. * Only valid for transactions that are potentially pending (ie have
  88. * been sent).
  89. */
  90. int fw_cancel_transaction(struct fw_card *card,
  91. struct fw_transaction *transaction)
  92. {
  93. /*
  94. * Cancel the packet transmission if it's still queued. That
  95. * will call the packet transmission callback which cancels
  96. * the transaction.
  97. */
  98. if (card->driver->cancel_packet(card, &transaction->packet) == 0)
  99. return 0;
  100. /*
  101. * If the request packet has already been sent, we need to see
  102. * if the transaction is still pending and remove it in that case.
  103. */
  104. return close_transaction(transaction, card, RCODE_CANCELLED);
  105. }
  106. EXPORT_SYMBOL(fw_cancel_transaction);
  107. static void transmit_complete_callback(struct fw_packet *packet,
  108. struct fw_card *card, int status)
  109. {
  110. struct fw_transaction *t =
  111. container_of(packet, struct fw_transaction, packet);
  112. switch (status) {
  113. case ACK_COMPLETE:
  114. close_transaction(t, card, RCODE_COMPLETE);
  115. break;
  116. case ACK_PENDING:
  117. t->timestamp = packet->timestamp;
  118. break;
  119. case ACK_BUSY_X:
  120. case ACK_BUSY_A:
  121. case ACK_BUSY_B:
  122. close_transaction(t, card, RCODE_BUSY);
  123. break;
  124. case ACK_DATA_ERROR:
  125. close_transaction(t, card, RCODE_DATA_ERROR);
  126. break;
  127. case ACK_TYPE_ERROR:
  128. close_transaction(t, card, RCODE_TYPE_ERROR);
  129. break;
  130. default:
  131. /*
  132. * In this case the ack is really a juju specific
  133. * rcode, so just forward that to the callback.
  134. */
  135. close_transaction(t, card, status);
  136. break;
  137. }
  138. }
  139. static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
  140. int destination_id, int source_id, int generation, int speed,
  141. unsigned long long offset, void *payload, size_t length)
  142. {
  143. int ext_tcode;
  144. if (tcode == TCODE_STREAM_DATA) {
  145. packet->header[0] =
  146. HEADER_DATA_LENGTH(length) |
  147. destination_id |
  148. HEADER_TCODE(TCODE_STREAM_DATA);
  149. packet->header_length = 4;
  150. packet->payload = payload;
  151. packet->payload_length = length;
  152. goto common;
  153. }
  154. if (tcode > 0x10) {
  155. ext_tcode = tcode & ~0x10;
  156. tcode = TCODE_LOCK_REQUEST;
  157. } else
  158. ext_tcode = 0;
  159. packet->header[0] =
  160. HEADER_RETRY(RETRY_X) |
  161. HEADER_TLABEL(tlabel) |
  162. HEADER_TCODE(tcode) |
  163. HEADER_DESTINATION(destination_id);
  164. packet->header[1] =
  165. HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
  166. packet->header[2] =
  167. offset;
  168. switch (tcode) {
  169. case TCODE_WRITE_QUADLET_REQUEST:
  170. packet->header[3] = *(u32 *)payload;
  171. packet->header_length = 16;
  172. packet->payload_length = 0;
  173. break;
  174. case TCODE_LOCK_REQUEST:
  175. case TCODE_WRITE_BLOCK_REQUEST:
  176. packet->header[3] =
  177. HEADER_DATA_LENGTH(length) |
  178. HEADER_EXTENDED_TCODE(ext_tcode);
  179. packet->header_length = 16;
  180. packet->payload = payload;
  181. packet->payload_length = length;
  182. break;
  183. case TCODE_READ_QUADLET_REQUEST:
  184. packet->header_length = 12;
  185. packet->payload_length = 0;
  186. break;
  187. case TCODE_READ_BLOCK_REQUEST:
  188. packet->header[3] =
  189. HEADER_DATA_LENGTH(length) |
  190. HEADER_EXTENDED_TCODE(ext_tcode);
  191. packet->header_length = 16;
  192. packet->payload_length = 0;
  193. break;
  194. default:
  195. WARN(1, KERN_ERR "wrong tcode %d", tcode);
  196. }
  197. common:
  198. packet->speed = speed;
  199. packet->generation = generation;
  200. packet->ack = 0;
  201. packet->payload_mapped = false;
  202. }
  203. /**
  204. * This function provides low-level access to the IEEE1394 transaction
  205. * logic. Most C programs would use either fw_read(), fw_write() or
  206. * fw_lock() instead - those function are convenience wrappers for
  207. * this function. The fw_send_request() function is primarily
  208. * provided as a flexible, one-stop entry point for languages bindings
  209. * and protocol bindings.
  210. *
  211. * FIXME: Document this function further, in particular the possible
  212. * values for rcode in the callback. In short, we map ACK_COMPLETE to
  213. * RCODE_COMPLETE, internal errors set errno and set rcode to
  214. * RCODE_SEND_ERROR (which is out of range for standard ieee1394
  215. * rcodes). All other rcodes are forwarded unchanged. For all
  216. * errors, payload is NULL, length is 0.
  217. *
  218. * Can not expect the callback to be called before the function
  219. * returns, though this does happen in some cases (ACK_COMPLETE and
  220. * errors).
  221. *
  222. * The payload is only used for write requests and must not be freed
  223. * until the callback has been called.
  224. *
  225. * @param card the card from which to send the request
  226. * @param tcode the tcode for this transaction. Do not use
  227. * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
  228. * etc. to specify tcode and ext_tcode.
  229. * @param node_id the destination node ID (bus ID and PHY ID concatenated)
  230. * @param generation the generation for which node_id is valid
  231. * @param speed the speed to use for sending the request
  232. * @param offset the 48 bit offset on the destination node
  233. * @param payload the data payload for the request subaction
  234. * @param length the length in bytes of the data to read
  235. * @param callback function to be called when the transaction is completed
  236. * @param callback_data pointer to arbitrary data, which will be
  237. * passed to the callback
  238. *
  239. * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
  240. * needs to synthesize @destination_id with fw_stream_packet_destination_id().
  241. */
  242. void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
  243. int destination_id, int generation, int speed,
  244. unsigned long long offset, void *payload, size_t length,
  245. fw_transaction_callback_t callback, void *callback_data)
  246. {
  247. unsigned long flags;
  248. int tlabel;
  249. /*
  250. * Bump the flush timer up 100ms first of all so we
  251. * don't race with a flush timer callback.
  252. */
  253. mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
  254. /*
  255. * Allocate tlabel from the bitmap and put the transaction on
  256. * the list while holding the card spinlock.
  257. */
  258. spin_lock_irqsave(&card->lock, flags);
  259. tlabel = card->current_tlabel;
  260. if (card->tlabel_mask & (1ULL << tlabel)) {
  261. spin_unlock_irqrestore(&card->lock, flags);
  262. callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
  263. return;
  264. }
  265. card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
  266. card->tlabel_mask |= (1ULL << tlabel);
  267. t->node_id = destination_id;
  268. t->tlabel = tlabel;
  269. t->callback = callback;
  270. t->callback_data = callback_data;
  271. fw_fill_request(&t->packet, tcode, t->tlabel,
  272. destination_id, card->node_id, generation,
  273. speed, offset, payload, length);
  274. t->packet.callback = transmit_complete_callback;
  275. list_add_tail(&t->link, &card->transaction_list);
  276. spin_unlock_irqrestore(&card->lock, flags);
  277. card->driver->send_request(card, &t->packet);
  278. }
  279. EXPORT_SYMBOL(fw_send_request);
  280. struct transaction_callback_data {
  281. struct completion done;
  282. void *payload;
  283. int rcode;
  284. };
  285. static void transaction_callback(struct fw_card *card, int rcode,
  286. void *payload, size_t length, void *data)
  287. {
  288. struct transaction_callback_data *d = data;
  289. if (rcode == RCODE_COMPLETE)
  290. memcpy(d->payload, payload, length);
  291. d->rcode = rcode;
  292. complete(&d->done);
  293. }
  294. /**
  295. * fw_run_transaction - send request and sleep until transaction is completed
  296. *
  297. * Returns the RCODE.
  298. */
  299. int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
  300. int generation, int speed, unsigned long long offset,
  301. void *payload, size_t length)
  302. {
  303. struct transaction_callback_data d;
  304. struct fw_transaction t;
  305. init_completion(&d.done);
  306. d.payload = payload;
  307. fw_send_request(card, &t, tcode, destination_id, generation, speed,
  308. offset, payload, length, transaction_callback, &d);
  309. wait_for_completion(&d.done);
  310. return d.rcode;
  311. }
  312. EXPORT_SYMBOL(fw_run_transaction);
  313. static DEFINE_MUTEX(phy_config_mutex);
  314. static DECLARE_COMPLETION(phy_config_done);
  315. static void transmit_phy_packet_callback(struct fw_packet *packet,
  316. struct fw_card *card, int status)
  317. {
  318. complete(&phy_config_done);
  319. }
  320. static struct fw_packet phy_config_packet = {
  321. .header_length = 8,
  322. .payload_length = 0,
  323. .speed = SCODE_100,
  324. .callback = transmit_phy_packet_callback,
  325. };
  326. void fw_send_phy_config(struct fw_card *card,
  327. int node_id, int generation, int gap_count)
  328. {
  329. long timeout = DIV_ROUND_UP(HZ, 10);
  330. u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
  331. PHY_CONFIG_ROOT_ID(node_id) |
  332. PHY_CONFIG_GAP_COUNT(gap_count);
  333. mutex_lock(&phy_config_mutex);
  334. phy_config_packet.header[0] = data;
  335. phy_config_packet.header[1] = ~data;
  336. phy_config_packet.generation = generation;
  337. INIT_COMPLETION(phy_config_done);
  338. card->driver->send_request(card, &phy_config_packet);
  339. wait_for_completion_timeout(&phy_config_done, timeout);
  340. mutex_unlock(&phy_config_mutex);
  341. }
  342. void fw_flush_transactions(struct fw_card *card)
  343. {
  344. struct fw_transaction *t, *next;
  345. struct list_head list;
  346. unsigned long flags;
  347. INIT_LIST_HEAD(&list);
  348. spin_lock_irqsave(&card->lock, flags);
  349. list_splice_init(&card->transaction_list, &list);
  350. card->tlabel_mask = 0;
  351. spin_unlock_irqrestore(&card->lock, flags);
  352. list_for_each_entry_safe(t, next, &list, link) {
  353. card->driver->cancel_packet(card, &t->packet);
  354. /*
  355. * At this point cancel_packet will never call the
  356. * transaction callback, since we just took all the
  357. * transactions out of the list. So do it here.
  358. */
  359. t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
  360. }
  361. }
  362. static struct fw_address_handler *lookup_overlapping_address_handler(
  363. struct list_head *list, unsigned long long offset, size_t length)
  364. {
  365. struct fw_address_handler *handler;
  366. list_for_each_entry(handler, list, link) {
  367. if (handler->offset < offset + length &&
  368. offset < handler->offset + handler->length)
  369. return handler;
  370. }
  371. return NULL;
  372. }
  373. static struct fw_address_handler *lookup_enclosing_address_handler(
  374. struct list_head *list, unsigned long long offset, size_t length)
  375. {
  376. struct fw_address_handler *handler;
  377. list_for_each_entry(handler, list, link) {
  378. if (handler->offset <= offset &&
  379. offset + length <= handler->offset + handler->length)
  380. return handler;
  381. }
  382. return NULL;
  383. }
  384. static DEFINE_SPINLOCK(address_handler_lock);
  385. static LIST_HEAD(address_handler_list);
  386. const struct fw_address_region fw_high_memory_region =
  387. { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
  388. EXPORT_SYMBOL(fw_high_memory_region);
  389. #if 0
  390. const struct fw_address_region fw_low_memory_region =
  391. { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
  392. const struct fw_address_region fw_private_region =
  393. { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
  394. const struct fw_address_region fw_csr_region =
  395. { .start = CSR_REGISTER_BASE,
  396. .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
  397. const struct fw_address_region fw_unit_space_region =
  398. { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
  399. #endif /* 0 */
  400. /**
  401. * fw_core_add_address_handler - register for incoming requests
  402. * @handler: callback
  403. * @region: region in the IEEE 1212 node space address range
  404. *
  405. * region->start, ->end, and handler->length have to be quadlet-aligned.
  406. *
  407. * When a request is received that falls within the specified address range,
  408. * the specified callback is invoked. The parameters passed to the callback
  409. * give the details of the particular request.
  410. *
  411. * Return value: 0 on success, non-zero otherwise.
  412. * The start offset of the handler's address region is determined by
  413. * fw_core_add_address_handler() and is returned in handler->offset.
  414. */
  415. int fw_core_add_address_handler(struct fw_address_handler *handler,
  416. const struct fw_address_region *region)
  417. {
  418. struct fw_address_handler *other;
  419. unsigned long flags;
  420. int ret = -EBUSY;
  421. if (region->start & 0xffff000000000003ULL ||
  422. region->end & 0xffff000000000003ULL ||
  423. region->start >= region->end ||
  424. handler->length & 3 ||
  425. handler->length == 0)
  426. return -EINVAL;
  427. spin_lock_irqsave(&address_handler_lock, flags);
  428. handler->offset = region->start;
  429. while (handler->offset + handler->length <= region->end) {
  430. other =
  431. lookup_overlapping_address_handler(&address_handler_list,
  432. handler->offset,
  433. handler->length);
  434. if (other != NULL) {
  435. handler->offset += other->length;
  436. } else {
  437. list_add_tail(&handler->link, &address_handler_list);
  438. ret = 0;
  439. break;
  440. }
  441. }
  442. spin_unlock_irqrestore(&address_handler_lock, flags);
  443. return ret;
  444. }
  445. EXPORT_SYMBOL(fw_core_add_address_handler);
  446. /**
  447. * fw_core_remove_address_handler - unregister an address handler
  448. */
  449. void fw_core_remove_address_handler(struct fw_address_handler *handler)
  450. {
  451. unsigned long flags;
  452. spin_lock_irqsave(&address_handler_lock, flags);
  453. list_del(&handler->link);
  454. spin_unlock_irqrestore(&address_handler_lock, flags);
  455. }
  456. EXPORT_SYMBOL(fw_core_remove_address_handler);
  457. struct fw_request {
  458. struct fw_packet response;
  459. u32 request_header[4];
  460. int ack;
  461. u32 length;
  462. u32 data[0];
  463. };
  464. static void free_response_callback(struct fw_packet *packet,
  465. struct fw_card *card, int status)
  466. {
  467. struct fw_request *request;
  468. request = container_of(packet, struct fw_request, response);
  469. kfree(request);
  470. }
  471. void fw_fill_response(struct fw_packet *response, u32 *request_header,
  472. int rcode, void *payload, size_t length)
  473. {
  474. int tcode, tlabel, extended_tcode, source, destination;
  475. tcode = HEADER_GET_TCODE(request_header[0]);
  476. tlabel = HEADER_GET_TLABEL(request_header[0]);
  477. source = HEADER_GET_DESTINATION(request_header[0]);
  478. destination = HEADER_GET_SOURCE(request_header[1]);
  479. extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
  480. response->header[0] =
  481. HEADER_RETRY(RETRY_1) |
  482. HEADER_TLABEL(tlabel) |
  483. HEADER_DESTINATION(destination);
  484. response->header[1] =
  485. HEADER_SOURCE(source) |
  486. HEADER_RCODE(rcode);
  487. response->header[2] = 0;
  488. switch (tcode) {
  489. case TCODE_WRITE_QUADLET_REQUEST:
  490. case TCODE_WRITE_BLOCK_REQUEST:
  491. response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
  492. response->header_length = 12;
  493. response->payload_length = 0;
  494. break;
  495. case TCODE_READ_QUADLET_REQUEST:
  496. response->header[0] |=
  497. HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
  498. if (payload != NULL)
  499. response->header[3] = *(u32 *)payload;
  500. else
  501. response->header[3] = 0;
  502. response->header_length = 16;
  503. response->payload_length = 0;
  504. break;
  505. case TCODE_READ_BLOCK_REQUEST:
  506. case TCODE_LOCK_REQUEST:
  507. response->header[0] |= HEADER_TCODE(tcode + 2);
  508. response->header[3] =
  509. HEADER_DATA_LENGTH(length) |
  510. HEADER_EXTENDED_TCODE(extended_tcode);
  511. response->header_length = 16;
  512. response->payload = payload;
  513. response->payload_length = length;
  514. break;
  515. default:
  516. WARN(1, KERN_ERR "wrong tcode %d", tcode);
  517. }
  518. response->payload_mapped = false;
  519. }
  520. EXPORT_SYMBOL(fw_fill_response);
  521. static struct fw_request *allocate_request(struct fw_packet *p)
  522. {
  523. struct fw_request *request;
  524. u32 *data, length;
  525. int request_tcode, t;
  526. request_tcode = HEADER_GET_TCODE(p->header[0]);
  527. switch (request_tcode) {
  528. case TCODE_WRITE_QUADLET_REQUEST:
  529. data = &p->header[3];
  530. length = 4;
  531. break;
  532. case TCODE_WRITE_BLOCK_REQUEST:
  533. case TCODE_LOCK_REQUEST:
  534. data = p->payload;
  535. length = HEADER_GET_DATA_LENGTH(p->header[3]);
  536. break;
  537. case TCODE_READ_QUADLET_REQUEST:
  538. data = NULL;
  539. length = 4;
  540. break;
  541. case TCODE_READ_BLOCK_REQUEST:
  542. data = NULL;
  543. length = HEADER_GET_DATA_LENGTH(p->header[3]);
  544. break;
  545. default:
  546. fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
  547. p->header[0], p->header[1], p->header[2]);
  548. return NULL;
  549. }
  550. request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
  551. if (request == NULL)
  552. return NULL;
  553. t = (p->timestamp & 0x1fff) + 4000;
  554. if (t >= 8000)
  555. t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
  556. else
  557. t = (p->timestamp & ~0x1fff) + t;
  558. request->response.speed = p->speed;
  559. request->response.timestamp = t;
  560. request->response.generation = p->generation;
  561. request->response.ack = 0;
  562. request->response.callback = free_response_callback;
  563. request->ack = p->ack;
  564. request->length = length;
  565. if (data)
  566. memcpy(request->data, data, length);
  567. memcpy(request->request_header, p->header, sizeof(p->header));
  568. return request;
  569. }
  570. void fw_send_response(struct fw_card *card,
  571. struct fw_request *request, int rcode)
  572. {
  573. /* unified transaction or broadcast transaction: don't respond */
  574. if (request->ack != ACK_PENDING ||
  575. HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
  576. kfree(request);
  577. return;
  578. }
  579. if (rcode == RCODE_COMPLETE)
  580. fw_fill_response(&request->response, request->request_header,
  581. rcode, request->data, request->length);
  582. else
  583. fw_fill_response(&request->response, request->request_header,
  584. rcode, NULL, 0);
  585. card->driver->send_response(card, &request->response);
  586. }
  587. EXPORT_SYMBOL(fw_send_response);
  588. void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
  589. {
  590. struct fw_address_handler *handler;
  591. struct fw_request *request;
  592. unsigned long long offset;
  593. unsigned long flags;
  594. int tcode, destination, source;
  595. if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
  596. return;
  597. request = allocate_request(p);
  598. if (request == NULL) {
  599. /* FIXME: send statically allocated busy packet. */
  600. return;
  601. }
  602. offset =
  603. ((unsigned long long)
  604. HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
  605. tcode = HEADER_GET_TCODE(p->header[0]);
  606. destination = HEADER_GET_DESTINATION(p->header[0]);
  607. source = HEADER_GET_SOURCE(p->header[1]);
  608. spin_lock_irqsave(&address_handler_lock, flags);
  609. handler = lookup_enclosing_address_handler(&address_handler_list,
  610. offset, request->length);
  611. spin_unlock_irqrestore(&address_handler_lock, flags);
  612. /*
  613. * FIXME: lookup the fw_node corresponding to the sender of
  614. * this request and pass that to the address handler instead
  615. * of the node ID. We may also want to move the address
  616. * allocations to fw_node so we only do this callback if the
  617. * upper layers registered it for this node.
  618. */
  619. if (handler == NULL)
  620. fw_send_response(card, request, RCODE_ADDRESS_ERROR);
  621. else
  622. handler->address_callback(card, request,
  623. tcode, destination, source,
  624. p->generation, p->speed, offset,
  625. request->data, request->length,
  626. handler->callback_data);
  627. }
  628. EXPORT_SYMBOL(fw_core_handle_request);
  629. void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
  630. {
  631. struct fw_transaction *t;
  632. unsigned long flags;
  633. u32 *data;
  634. size_t data_length;
  635. int tcode, tlabel, destination, source, rcode;
  636. tcode = HEADER_GET_TCODE(p->header[0]);
  637. tlabel = HEADER_GET_TLABEL(p->header[0]);
  638. destination = HEADER_GET_DESTINATION(p->header[0]);
  639. source = HEADER_GET_SOURCE(p->header[1]);
  640. rcode = HEADER_GET_RCODE(p->header[1]);
  641. spin_lock_irqsave(&card->lock, flags);
  642. list_for_each_entry(t, &card->transaction_list, link) {
  643. if (t->node_id == source && t->tlabel == tlabel) {
  644. list_del(&t->link);
  645. card->tlabel_mask &= ~(1 << t->tlabel);
  646. break;
  647. }
  648. }
  649. spin_unlock_irqrestore(&card->lock, flags);
  650. if (&t->link == &card->transaction_list) {
  651. fw_notify("Unsolicited response (source %x, tlabel %x)\n",
  652. source, tlabel);
  653. return;
  654. }
  655. /*
  656. * FIXME: sanity check packet, is length correct, does tcodes
  657. * and addresses match.
  658. */
  659. switch (tcode) {
  660. case TCODE_READ_QUADLET_RESPONSE:
  661. data = (u32 *) &p->header[3];
  662. data_length = 4;
  663. break;
  664. case TCODE_WRITE_RESPONSE:
  665. data = NULL;
  666. data_length = 0;
  667. break;
  668. case TCODE_READ_BLOCK_RESPONSE:
  669. case TCODE_LOCK_RESPONSE:
  670. data = p->payload;
  671. data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
  672. break;
  673. default:
  674. /* Should never happen, this is just to shut up gcc. */
  675. data = NULL;
  676. data_length = 0;
  677. break;
  678. }
  679. /*
  680. * The response handler may be executed while the request handler
  681. * is still pending. Cancel the request handler.
  682. */
  683. card->driver->cancel_packet(card, &t->packet);
  684. t->callback(card, rcode, data, data_length, t->callback_data);
  685. }
  686. EXPORT_SYMBOL(fw_core_handle_response);
  687. static const struct fw_address_region topology_map_region =
  688. { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
  689. .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
  690. static void handle_topology_map(struct fw_card *card, struct fw_request *request,
  691. int tcode, int destination, int source, int generation,
  692. int speed, unsigned long long offset,
  693. void *payload, size_t length, void *callback_data)
  694. {
  695. int start;
  696. if (!TCODE_IS_READ_REQUEST(tcode)) {
  697. fw_send_response(card, request, RCODE_TYPE_ERROR);
  698. return;
  699. }
  700. if ((offset & 3) > 0 || (length & 3) > 0) {
  701. fw_send_response(card, request, RCODE_ADDRESS_ERROR);
  702. return;
  703. }
  704. start = (offset - topology_map_region.start) / 4;
  705. memcpy(payload, &card->topology_map[start], length);
  706. fw_send_response(card, request, RCODE_COMPLETE);
  707. }
  708. static struct fw_address_handler topology_map = {
  709. .length = 0x400,
  710. .address_callback = handle_topology_map,
  711. };
  712. static const struct fw_address_region registers_region =
  713. { .start = CSR_REGISTER_BASE,
  714. .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
  715. static void handle_registers(struct fw_card *card, struct fw_request *request,
  716. int tcode, int destination, int source, int generation,
  717. int speed, unsigned long long offset,
  718. void *payload, size_t length, void *callback_data)
  719. {
  720. int reg = offset & ~CSR_REGISTER_BASE;
  721. unsigned long long bus_time;
  722. __be32 *data = payload;
  723. int rcode = RCODE_COMPLETE;
  724. switch (reg) {
  725. case CSR_CYCLE_TIME:
  726. case CSR_BUS_TIME:
  727. if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
  728. rcode = RCODE_TYPE_ERROR;
  729. break;
  730. }
  731. bus_time = card->driver->get_bus_time(card);
  732. if (reg == CSR_CYCLE_TIME)
  733. *data = cpu_to_be32(bus_time);
  734. else
  735. *data = cpu_to_be32(bus_time >> 25);
  736. break;
  737. case CSR_BROADCAST_CHANNEL:
  738. if (tcode == TCODE_READ_QUADLET_REQUEST)
  739. *data = cpu_to_be32(card->broadcast_channel);
  740. else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
  741. card->broadcast_channel =
  742. (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
  743. BROADCAST_CHANNEL_INITIAL;
  744. else
  745. rcode = RCODE_TYPE_ERROR;
  746. break;
  747. case CSR_BUS_MANAGER_ID:
  748. case CSR_BANDWIDTH_AVAILABLE:
  749. case CSR_CHANNELS_AVAILABLE_HI:
  750. case CSR_CHANNELS_AVAILABLE_LO:
  751. /*
  752. * FIXME: these are handled by the OHCI hardware and
  753. * the stack never sees these request. If we add
  754. * support for a new type of controller that doesn't
  755. * handle this in hardware we need to deal with these
  756. * transactions.
  757. */
  758. BUG();
  759. break;
  760. case CSR_BUSY_TIMEOUT:
  761. /* FIXME: Implement this. */
  762. default:
  763. rcode = RCODE_ADDRESS_ERROR;
  764. break;
  765. }
  766. fw_send_response(card, request, rcode);
  767. }
  768. static struct fw_address_handler registers = {
  769. .length = 0x400,
  770. .address_callback = handle_registers,
  771. };
  772. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  773. MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
  774. MODULE_LICENSE("GPL");
  775. static const u32 vendor_textual_descriptor[] = {
  776. /* textual descriptor leaf () */
  777. 0x00060000,
  778. 0x00000000,
  779. 0x00000000,
  780. 0x4c696e75, /* L i n u */
  781. 0x78204669, /* x F i */
  782. 0x72657769, /* r e w i */
  783. 0x72650000, /* r e */
  784. };
  785. static const u32 model_textual_descriptor[] = {
  786. /* model descriptor leaf () */
  787. 0x00030000,
  788. 0x00000000,
  789. 0x00000000,
  790. 0x4a756a75, /* J u j u */
  791. };
  792. static struct fw_descriptor vendor_id_descriptor = {
  793. .length = ARRAY_SIZE(vendor_textual_descriptor),
  794. .immediate = 0x03d00d1e,
  795. .key = 0x81000000,
  796. .data = vendor_textual_descriptor,
  797. };
  798. static struct fw_descriptor model_id_descriptor = {
  799. .length = ARRAY_SIZE(model_textual_descriptor),
  800. .immediate = 0x17000001,
  801. .key = 0x81000000,
  802. .data = model_textual_descriptor,
  803. };
  804. static int __init fw_core_init(void)
  805. {
  806. int ret;
  807. ret = bus_register(&fw_bus_type);
  808. if (ret < 0)
  809. return ret;
  810. fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
  811. if (fw_cdev_major < 0) {
  812. bus_unregister(&fw_bus_type);
  813. return fw_cdev_major;
  814. }
  815. fw_core_add_address_handler(&topology_map, &topology_map_region);
  816. fw_core_add_address_handler(&registers, &registers_region);
  817. fw_core_add_descriptor(&vendor_id_descriptor);
  818. fw_core_add_descriptor(&model_id_descriptor);
  819. return 0;
  820. }
  821. static void __exit fw_core_cleanup(void)
  822. {
  823. unregister_chrdev(fw_cdev_major, "firewire");
  824. bus_unregister(&fw_bus_type);
  825. idr_destroy(&fw_device_idr);
  826. }
  827. module_init(fw_core_init);
  828. module_exit(fw_core_cleanup);