firewire.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #ifndef _LINUX_FIREWIRE_H
  2. #define _LINUX_FIREWIRE_H
  3. #include <linux/completion.h>
  4. #include <linux/device.h>
  5. #include <linux/dma-mapping.h>
  6. #include <linux/kernel.h>
  7. #include <linux/kref.h>
  8. #include <linux/list.h>
  9. #include <linux/mutex.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/sysfs.h>
  12. #include <linux/timer.h>
  13. #include <linux/types.h>
  14. #include <linux/workqueue.h>
  15. #include <asm/atomic.h>
  16. #include <asm/byteorder.h>
  17. #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
  18. #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
  19. static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
  20. {
  21. u32 *dst = _dst;
  22. __be32 *src = _src;
  23. int i;
  24. for (i = 0; i < size / 4; i++)
  25. dst[i] = be32_to_cpu(src[i]);
  26. }
  27. static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
  28. {
  29. fw_memcpy_from_be32(_dst, _src, size);
  30. }
  31. #define CSR_REGISTER_BASE 0xfffff0000000ULL
  32. /* register offsets are relative to CSR_REGISTER_BASE */
  33. #define CSR_STATE_CLEAR 0x0
  34. #define CSR_STATE_SET 0x4
  35. #define CSR_NODE_IDS 0x8
  36. #define CSR_RESET_START 0xc
  37. #define CSR_SPLIT_TIMEOUT_HI 0x18
  38. #define CSR_SPLIT_TIMEOUT_LO 0x1c
  39. #define CSR_CYCLE_TIME 0x200
  40. #define CSR_BUS_TIME 0x204
  41. #define CSR_BUSY_TIMEOUT 0x210
  42. #define CSR_BUS_MANAGER_ID 0x21c
  43. #define CSR_BANDWIDTH_AVAILABLE 0x220
  44. #define CSR_CHANNELS_AVAILABLE 0x224
  45. #define CSR_CHANNELS_AVAILABLE_HI 0x224
  46. #define CSR_CHANNELS_AVAILABLE_LO 0x228
  47. #define CSR_BROADCAST_CHANNEL 0x234
  48. #define CSR_CONFIG_ROM 0x400
  49. #define CSR_CONFIG_ROM_END 0x800
  50. #define CSR_FCP_COMMAND 0xB00
  51. #define CSR_FCP_RESPONSE 0xD00
  52. #define CSR_FCP_END 0xF00
  53. #define CSR_TOPOLOGY_MAP 0x1000
  54. #define CSR_TOPOLOGY_MAP_END 0x1400
  55. #define CSR_SPEED_MAP 0x2000
  56. #define CSR_SPEED_MAP_END 0x3000
  57. #define CSR_OFFSET 0x40
  58. #define CSR_LEAF 0x80
  59. #define CSR_DIRECTORY 0xc0
  60. #define CSR_DESCRIPTOR 0x01
  61. #define CSR_VENDOR 0x03
  62. #define CSR_HARDWARE_VERSION 0x04
  63. #define CSR_NODE_CAPABILITIES 0x0c
  64. #define CSR_UNIT 0x11
  65. #define CSR_SPECIFIER_ID 0x12
  66. #define CSR_VERSION 0x13
  67. #define CSR_DEPENDENT_INFO 0x14
  68. #define CSR_MODEL 0x17
  69. #define CSR_INSTANCE 0x18
  70. #define CSR_DIRECTORY_ID 0x20
  71. struct fw_csr_iterator {
  72. u32 *p;
  73. u32 *end;
  74. };
  75. void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
  76. int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
  77. extern struct bus_type fw_bus_type;
  78. struct fw_card_driver;
  79. struct fw_node;
  80. struct fw_card {
  81. const struct fw_card_driver *driver;
  82. struct device *device;
  83. struct kref kref;
  84. struct completion done;
  85. int node_id;
  86. int generation;
  87. int current_tlabel;
  88. u64 tlabel_mask;
  89. struct list_head transaction_list;
  90. struct timer_list flush_timer;
  91. unsigned long reset_jiffies;
  92. unsigned long long guid;
  93. unsigned max_receive;
  94. int link_speed;
  95. int config_rom_generation;
  96. spinlock_t lock; /* Take this lock when handling the lists in
  97. * this struct. */
  98. struct fw_node *local_node;
  99. struct fw_node *root_node;
  100. struct fw_node *irm_node;
  101. u8 color; /* must be u8 to match the definition in struct fw_node */
  102. int gap_count;
  103. bool beta_repeaters_present;
  104. int index;
  105. struct list_head link;
  106. /* Work struct for BM duties. */
  107. struct delayed_work work;
  108. int bm_retries;
  109. int bm_generation;
  110. bool broadcast_channel_allocated;
  111. u32 broadcast_channel;
  112. u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
  113. /* Only non-NULL if firewire-ipv4 is active on this card. */
  114. void *netdev;
  115. /*
  116. * The nodes get probed before the card, so we need a place to store
  117. * them independent of card->netdev
  118. */
  119. struct list_head ipv4_nodes;
  120. };
  121. static inline struct fw_card *fw_card_get(struct fw_card *card)
  122. {
  123. kref_get(&card->kref);
  124. return card;
  125. }
  126. void fw_card_release(struct kref *kref);
  127. static inline void fw_card_put(struct fw_card *card)
  128. {
  129. kref_put(&card->kref, fw_card_release);
  130. }
  131. struct fw_attribute_group {
  132. struct attribute_group *groups[2];
  133. struct attribute_group group;
  134. struct attribute *attrs[12];
  135. };
  136. enum fw_device_state {
  137. FW_DEVICE_INITIALIZING,
  138. FW_DEVICE_RUNNING,
  139. FW_DEVICE_GONE,
  140. FW_DEVICE_SHUTDOWN,
  141. };
  142. /*
  143. * Note, fw_device.generation always has to be read before fw_device.node_id.
  144. * Use SMP memory barriers to ensure this. Otherwise requests will be sent
  145. * to an outdated node_id if the generation was updated in the meantime due
  146. * to a bus reset.
  147. *
  148. * Likewise, fw-core will take care to update .node_id before .generation so
  149. * that whenever fw_device.generation is current WRT the actual bus generation,
  150. * fw_device.node_id is guaranteed to be current too.
  151. *
  152. * The same applies to fw_device.card->node_id vs. fw_device.generation.
  153. *
  154. * fw_device.config_rom and fw_device.config_rom_length may be accessed during
  155. * the lifetime of any fw_unit belonging to the fw_device, before device_del()
  156. * was called on the last fw_unit. Alternatively, they may be accessed while
  157. * holding fw_device_rwsem.
  158. */
  159. struct fw_device {
  160. atomic_t state;
  161. struct fw_node *node;
  162. int node_id;
  163. int generation;
  164. unsigned max_speed;
  165. struct fw_card *card;
  166. struct device device;
  167. struct mutex client_list_mutex;
  168. struct list_head client_list;
  169. u32 *config_rom;
  170. size_t config_rom_length;
  171. int config_rom_retries;
  172. unsigned is_local:1;
  173. unsigned max_rec:4;
  174. unsigned cmc:1;
  175. unsigned irmc:1;
  176. unsigned bc_implemented:2;
  177. struct delayed_work work;
  178. struct fw_attribute_group attribute_group;
  179. };
  180. static inline struct fw_device *fw_device(struct device *dev)
  181. {
  182. return container_of(dev, struct fw_device, device);
  183. }
  184. static inline int fw_device_is_shutdown(struct fw_device *device)
  185. {
  186. return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
  187. }
  188. static inline struct fw_device *fw_device_get(struct fw_device *device)
  189. {
  190. get_device(&device->device);
  191. return device;
  192. }
  193. static inline void fw_device_put(struct fw_device *device)
  194. {
  195. put_device(&device->device);
  196. }
  197. int fw_device_enable_phys_dma(struct fw_device *device);
  198. /*
  199. * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
  200. */
  201. struct fw_unit {
  202. struct device device;
  203. u32 *directory;
  204. struct fw_attribute_group attribute_group;
  205. };
  206. static inline struct fw_unit *fw_unit(struct device *dev)
  207. {
  208. return container_of(dev, struct fw_unit, device);
  209. }
  210. static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
  211. {
  212. get_device(&unit->device);
  213. return unit;
  214. }
  215. static inline void fw_unit_put(struct fw_unit *unit)
  216. {
  217. put_device(&unit->device);
  218. }
  219. static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
  220. {
  221. return fw_device(unit->device.parent);
  222. }
  223. struct ieee1394_device_id;
  224. struct fw_driver {
  225. struct device_driver driver;
  226. /* Called when the parent device sits through a bus reset. */
  227. void (*update)(struct fw_unit *unit);
  228. const struct ieee1394_device_id *id_table;
  229. };
  230. struct fw_packet;
  231. struct fw_request;
  232. typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
  233. struct fw_card *card, int status);
  234. typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
  235. void *data, size_t length,
  236. void *callback_data);
  237. /*
  238. * Important note: The callback must guarantee that either fw_send_response()
  239. * or kfree() is called on the @request.
  240. */
  241. typedef void (*fw_address_callback_t)(struct fw_card *card,
  242. struct fw_request *request,
  243. int tcode, int destination, int source,
  244. int generation, int speed,
  245. unsigned long long offset,
  246. void *data, size_t length,
  247. void *callback_data);
  248. struct fw_packet {
  249. int speed;
  250. int generation;
  251. u32 header[4];
  252. size_t header_length;
  253. void *payload;
  254. size_t payload_length;
  255. dma_addr_t payload_bus;
  256. u32 timestamp;
  257. /*
  258. * This callback is called when the packet transmission has
  259. * completed; for successful transmission, the status code is
  260. * the ack received from the destination, otherwise it's a
  261. * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
  262. * The callback can be called from tasklet context and thus
  263. * must never block.
  264. */
  265. fw_packet_callback_t callback;
  266. int ack;
  267. struct list_head link;
  268. void *driver_data;
  269. };
  270. struct fw_transaction {
  271. int node_id; /* The generation is implied; it is always the current. */
  272. int tlabel;
  273. int timestamp;
  274. struct list_head link;
  275. struct fw_packet packet;
  276. /*
  277. * The data passed to the callback is valid only during the
  278. * callback.
  279. */
  280. fw_transaction_callback_t callback;
  281. void *callback_data;
  282. };
  283. struct fw_address_handler {
  284. u64 offset;
  285. size_t length;
  286. fw_address_callback_t address_callback;
  287. void *callback_data;
  288. struct list_head link;
  289. };
  290. struct fw_address_region {
  291. u64 start;
  292. u64 end;
  293. };
  294. extern const struct fw_address_region fw_high_memory_region;
  295. int fw_core_add_address_handler(struct fw_address_handler *handler,
  296. const struct fw_address_region *region);
  297. void fw_core_remove_address_handler(struct fw_address_handler *handler);
  298. void fw_send_response(struct fw_card *card,
  299. struct fw_request *request, int rcode);
  300. void fw_send_request(struct fw_card *card, struct fw_transaction *t,
  301. int tcode, int destination_id, int generation, int speed,
  302. unsigned long long offset, void *payload, size_t length,
  303. fw_transaction_callback_t callback, void *callback_data);
  304. int fw_cancel_transaction(struct fw_card *card,
  305. struct fw_transaction *transaction);
  306. int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
  307. int generation, int speed, unsigned long long offset,
  308. void *payload, size_t length);
  309. static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
  310. {
  311. return tag << 14 | channel << 8 | sy;
  312. }
  313. struct fw_descriptor {
  314. struct list_head link;
  315. size_t length;
  316. u32 immediate;
  317. u32 key;
  318. const u32 *data;
  319. };
  320. int fw_core_add_descriptor(struct fw_descriptor *desc);
  321. void fw_core_remove_descriptor(struct fw_descriptor *desc);
  322. /*
  323. * The iso packet format allows for an immediate header/payload part
  324. * stored in 'header' immediately after the packet info plus an
  325. * indirect payload part that is pointer to by the 'payload' field.
  326. * Applications can use one or the other or both to implement simple
  327. * low-bandwidth streaming (e.g. audio) or more advanced
  328. * scatter-gather streaming (e.g. assembling video frame automatically).
  329. */
  330. struct fw_iso_packet {
  331. u16 payload_length; /* Length of indirect payload. */
  332. u32 interrupt:1; /* Generate interrupt on this packet */
  333. u32 skip:1; /* Set to not send packet at all. */
  334. u32 tag:2;
  335. u32 sy:4;
  336. u32 header_length:8; /* Length of immediate header. */
  337. u32 header[0];
  338. };
  339. #define FW_ISO_CONTEXT_TRANSMIT 0
  340. #define FW_ISO_CONTEXT_RECEIVE 1
  341. #define FW_ISO_CONTEXT_MATCH_TAG0 1
  342. #define FW_ISO_CONTEXT_MATCH_TAG1 2
  343. #define FW_ISO_CONTEXT_MATCH_TAG2 4
  344. #define FW_ISO_CONTEXT_MATCH_TAG3 8
  345. #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
  346. /*
  347. * An iso buffer is just a set of pages mapped for DMA in the
  348. * specified direction. Since the pages are to be used for DMA, they
  349. * are not mapped into the kernel virtual address space. We store the
  350. * DMA address in the page private. The helper function
  351. * fw_iso_buffer_map() will map the pages into a given vma.
  352. */
  353. struct fw_iso_buffer {
  354. enum dma_data_direction direction;
  355. struct page **pages;
  356. int page_count;
  357. };
  358. int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
  359. int page_count, enum dma_data_direction direction);
  360. void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
  361. struct fw_iso_context;
  362. typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
  363. u32 cycle, size_t header_length,
  364. void *header, void *data);
  365. struct fw_iso_context {
  366. struct fw_card *card;
  367. int type;
  368. int channel;
  369. int speed;
  370. size_t header_size;
  371. fw_iso_callback_t callback;
  372. void *callback_data;
  373. };
  374. struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
  375. int type, int channel, int speed, size_t header_size,
  376. fw_iso_callback_t callback, void *callback_data);
  377. int fw_iso_context_queue(struct fw_iso_context *ctx,
  378. struct fw_iso_packet *packet,
  379. struct fw_iso_buffer *buffer,
  380. unsigned long payload);
  381. int fw_iso_context_start(struct fw_iso_context *ctx,
  382. int cycle, int sync, int tags);
  383. int fw_iso_context_stop(struct fw_iso_context *ctx);
  384. void fw_iso_context_destroy(struct fw_iso_context *ctx);
  385. #endif /* _LINUX_FIREWIRE_H */