fw-device-cdev.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /* -*- c-basic-offset: 8 -*-
  2. *
  3. * fw-device-cdev.c - Char device for device raw access
  4. *
  5. * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software Foundation,
  19. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/wait.h>
  24. #include <linux/errno.h>
  25. #include <linux/device.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/poll.h>
  28. #include <linux/delay.h>
  29. #include <linux/mm.h>
  30. #include <linux/compat.h>
  31. #include <asm/uaccess.h>
  32. #include "fw-transaction.h"
  33. #include "fw-topology.h"
  34. #include "fw-device.h"
  35. #include "fw-device-cdev.h"
  36. /*
  37. * todo
  38. *
  39. * - bus resets sends a new packet with new generation and node id
  40. *
  41. */
  42. /* dequeue_event() just kfree()'s the event, so the event has to be
  43. * the first field in the struct. */
  44. struct event {
  45. struct { void *data; size_t size; } v[2];
  46. struct list_head link;
  47. };
  48. struct response {
  49. struct event event;
  50. struct fw_transaction transaction;
  51. struct client *client;
  52. struct fw_cdev_event_response response;
  53. };
  54. struct iso_interrupt {
  55. struct event event;
  56. struct fw_cdev_event_iso_interrupt interrupt;
  57. };
  58. struct client {
  59. struct fw_device *device;
  60. spinlock_t lock;
  61. struct list_head handler_list;
  62. struct list_head request_list;
  63. u32 request_serial;
  64. struct list_head event_list;
  65. struct semaphore event_list_sem;
  66. wait_queue_head_t wait;
  67. struct fw_iso_context *iso_context;
  68. struct fw_iso_buffer buffer;
  69. unsigned long vm_start;
  70. };
  71. static inline void __user *
  72. u64_to_uptr(__u64 value)
  73. {
  74. return (void __user *)(unsigned long)value;
  75. }
  76. static inline __u64
  77. uptr_to_u64(void __user *ptr)
  78. {
  79. return (__u64)(unsigned long)ptr;
  80. }
  81. static int fw_device_op_open(struct inode *inode, struct file *file)
  82. {
  83. struct fw_device *device;
  84. struct client *client;
  85. device = container_of(inode->i_cdev, struct fw_device, cdev);
  86. client = kzalloc(sizeof *client, GFP_KERNEL);
  87. if (client == NULL)
  88. return -ENOMEM;
  89. client->device = fw_device_get(device);
  90. INIT_LIST_HEAD(&client->event_list);
  91. sema_init(&client->event_list_sem, 0);
  92. INIT_LIST_HEAD(&client->handler_list);
  93. INIT_LIST_HEAD(&client->request_list);
  94. spin_lock_init(&client->lock);
  95. init_waitqueue_head(&client->wait);
  96. file->private_data = client;
  97. return 0;
  98. }
  99. static void queue_event(struct client *client, struct event *event,
  100. void *data0, size_t size0, void *data1, size_t size1)
  101. {
  102. unsigned long flags;
  103. event->v[0].data = data0;
  104. event->v[0].size = size0;
  105. event->v[1].data = data1;
  106. event->v[1].size = size1;
  107. spin_lock_irqsave(&client->lock, flags);
  108. list_add_tail(&event->link, &client->event_list);
  109. up(&client->event_list_sem);
  110. wake_up_interruptible(&client->wait);
  111. spin_unlock_irqrestore(&client->lock, flags);
  112. }
  113. static int dequeue_event(struct client *client, char __user *buffer, size_t count)
  114. {
  115. unsigned long flags;
  116. struct event *event;
  117. size_t size, total;
  118. int i, retval = -EFAULT;
  119. if (down_interruptible(&client->event_list_sem) < 0)
  120. return -EINTR;
  121. spin_lock_irqsave(&client->lock, flags);
  122. event = container_of(client->event_list.next, struct event, link);
  123. list_del(&event->link);
  124. spin_unlock_irqrestore(&client->lock, flags);
  125. if (buffer == NULL)
  126. goto out;
  127. total = 0;
  128. for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
  129. size = min(event->v[i].size, count - total);
  130. if (copy_to_user(buffer + total, event->v[i].data, size))
  131. goto out;
  132. total += size;
  133. }
  134. retval = total;
  135. out:
  136. kfree(event);
  137. return retval;
  138. }
  139. static ssize_t
  140. fw_device_op_read(struct file *file,
  141. char __user *buffer, size_t count, loff_t *offset)
  142. {
  143. struct client *client = file->private_data;
  144. return dequeue_event(client, buffer, count);
  145. }
  146. static int ioctl_config_rom(struct client *client, void __user *arg)
  147. {
  148. struct fw_cdev_get_config_rom rom;
  149. rom.length = client->device->config_rom_length;
  150. memcpy(rom.data, client->device->config_rom, rom.length * 4);
  151. if (copy_to_user(arg, &rom,
  152. (char *)&rom.data[rom.length] - (char *)&rom))
  153. return -EFAULT;
  154. return 0;
  155. }
  156. static void
  157. complete_transaction(struct fw_card *card, int rcode,
  158. void *payload, size_t length, void *data)
  159. {
  160. struct response *response = data;
  161. struct client *client = response->client;
  162. if (length < response->response.length)
  163. response->response.length = length;
  164. if (rcode == RCODE_COMPLETE)
  165. memcpy(response->response.data, payload,
  166. response->response.length);
  167. response->response.type = FW_CDEV_EVENT_RESPONSE;
  168. response->response.rcode = rcode;
  169. queue_event(client, &response->event,
  170. &response->response, sizeof response->response,
  171. response->response.data, response->response.length);
  172. }
  173. static ssize_t ioctl_send_request(struct client *client, void __user *arg)
  174. {
  175. struct fw_device *device = client->device;
  176. struct fw_cdev_send_request request;
  177. struct response *response;
  178. if (copy_from_user(&request, arg, sizeof request))
  179. return -EFAULT;
  180. /* What is the biggest size we'll accept, really? */
  181. if (request.length > 4096)
  182. return -EINVAL;
  183. response = kmalloc(sizeof *response + request.length, GFP_KERNEL);
  184. if (response == NULL)
  185. return -ENOMEM;
  186. response->client = client;
  187. response->response.length = request.length;
  188. response->response.closure = request.closure;
  189. if (request.data &&
  190. copy_from_user(response->response.data,
  191. u64_to_uptr(request.data), request.length)) {
  192. kfree(response);
  193. return -EFAULT;
  194. }
  195. fw_send_request(device->card, &response->transaction,
  196. request.tcode,
  197. device->node->node_id,
  198. device->card->generation,
  199. device->node->max_speed,
  200. request.offset,
  201. response->response.data, request.length,
  202. complete_transaction, response);
  203. if (request.data)
  204. return sizeof request + request.length;
  205. else
  206. return sizeof request;
  207. }
  208. struct address_handler {
  209. struct fw_address_handler handler;
  210. __u64 closure;
  211. struct client *client;
  212. struct list_head link;
  213. };
  214. struct request {
  215. struct fw_request *request;
  216. void *data;
  217. size_t length;
  218. u32 serial;
  219. struct list_head link;
  220. };
  221. struct request_event {
  222. struct event event;
  223. struct fw_cdev_event_request request;
  224. };
  225. static void
  226. handle_request(struct fw_card *card, struct fw_request *r,
  227. int tcode, int destination, int source,
  228. int generation, int speed,
  229. unsigned long long offset,
  230. void *payload, size_t length, void *callback_data)
  231. {
  232. struct address_handler *handler = callback_data;
  233. struct request *request;
  234. struct request_event *e;
  235. unsigned long flags;
  236. struct client *client = handler->client;
  237. request = kmalloc(sizeof *request, GFP_ATOMIC);
  238. e = kmalloc(sizeof *e, GFP_ATOMIC);
  239. if (request == NULL || e == NULL) {
  240. kfree(request);
  241. kfree(e);
  242. fw_send_response(card, r, RCODE_CONFLICT_ERROR);
  243. return;
  244. }
  245. request->request = r;
  246. request->data = payload;
  247. request->length = length;
  248. spin_lock_irqsave(&client->lock, flags);
  249. request->serial = client->request_serial++;
  250. list_add_tail(&request->link, &client->request_list);
  251. spin_unlock_irqrestore(&client->lock, flags);
  252. e->request.type = FW_CDEV_EVENT_REQUEST;
  253. e->request.tcode = tcode;
  254. e->request.offset = offset;
  255. e->request.length = length;
  256. e->request.serial = request->serial;
  257. e->request.closure = handler->closure;
  258. queue_event(client, &e->event,
  259. &e->request, sizeof e->request, payload, length);
  260. }
  261. static int ioctl_allocate(struct client *client, void __user *arg)
  262. {
  263. struct fw_cdev_allocate request;
  264. struct address_handler *handler;
  265. unsigned long flags;
  266. struct fw_address_region region;
  267. if (copy_from_user(&request, arg, sizeof request))
  268. return -EFAULT;
  269. handler = kmalloc(sizeof *handler, GFP_KERNEL);
  270. if (handler == NULL)
  271. return -ENOMEM;
  272. region.start = request.offset;
  273. region.end = request.offset + request.length;
  274. handler->handler.length = request.length;
  275. handler->handler.address_callback = handle_request;
  276. handler->handler.callback_data = handler;
  277. handler->closure = request.closure;
  278. handler->client = client;
  279. if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
  280. kfree(handler);
  281. return -EBUSY;
  282. }
  283. spin_lock_irqsave(&client->lock, flags);
  284. list_add_tail(&handler->link, &client->handler_list);
  285. spin_unlock_irqrestore(&client->lock, flags);
  286. return 0;
  287. }
  288. static int ioctl_send_response(struct client *client, void __user *arg)
  289. {
  290. struct fw_cdev_send_response request;
  291. struct request *r;
  292. unsigned long flags;
  293. if (copy_from_user(&request, arg, sizeof request))
  294. return -EFAULT;
  295. spin_lock_irqsave(&client->lock, flags);
  296. list_for_each_entry(r, &client->request_list, link) {
  297. if (r->serial == request.serial) {
  298. list_del(&r->link);
  299. break;
  300. }
  301. }
  302. spin_unlock_irqrestore(&client->lock, flags);
  303. if (&r->link == &client->request_list)
  304. return -EINVAL;
  305. if (request.length < r->length)
  306. r->length = request.length;
  307. if (copy_from_user(r->data, u64_to_uptr(request.data), r->length))
  308. return -EFAULT;
  309. fw_send_response(client->device->card, r->request, request.rcode);
  310. kfree(r);
  311. return 0;
  312. }
  313. static void
  314. iso_callback(struct fw_iso_context *context, int status, u32 cycle, void *data)
  315. {
  316. struct client *client = data;
  317. struct iso_interrupt *interrupt;
  318. interrupt = kzalloc(sizeof *interrupt, GFP_ATOMIC);
  319. if (interrupt == NULL)
  320. return;
  321. interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
  322. interrupt->interrupt.closure = 0;
  323. interrupt->interrupt.cycle = cycle;
  324. queue_event(client, &interrupt->event,
  325. &interrupt->interrupt, sizeof interrupt->interrupt, NULL, 0);
  326. }
  327. static int ioctl_create_iso_context(struct client *client, void __user *arg)
  328. {
  329. struct fw_cdev_create_iso_context request;
  330. if (copy_from_user(&request, arg, sizeof request))
  331. return -EFAULT;
  332. if (request.type > FW_ISO_CONTEXT_RECEIVE)
  333. return -EINVAL;
  334. client->iso_context = fw_iso_context_create(client->device->card,
  335. request.type,
  336. request.header_size,
  337. iso_callback, client);
  338. if (IS_ERR(client->iso_context))
  339. return PTR_ERR(client->iso_context);
  340. return 0;
  341. }
  342. static int ioctl_queue_iso(struct client *client, void __user *arg)
  343. {
  344. struct fw_cdev_queue_iso request;
  345. struct fw_cdev_iso_packet __user *p, *end, *next;
  346. unsigned long payload, payload_end, header_length;
  347. int count;
  348. struct {
  349. struct fw_iso_packet packet;
  350. u8 header[256];
  351. } u;
  352. if (client->iso_context == NULL)
  353. return -EINVAL;
  354. if (copy_from_user(&request, arg, sizeof request))
  355. return -EFAULT;
  356. /* If the user passes a non-NULL data pointer, has mmap()'ed
  357. * the iso buffer, and the pointer points inside the buffer,
  358. * we setup the payload pointers accordingly. Otherwise we
  359. * set them both to 0, which will still let packets with
  360. * payload_length == 0 through. In other words, if no packets
  361. * use the indirect payload, the iso buffer need not be mapped
  362. * and the request.data pointer is ignored.*/
  363. payload = (unsigned long)request.data - client->vm_start;
  364. payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
  365. if (request.data == 0 || client->buffer.pages == NULL ||
  366. payload >= payload_end) {
  367. payload = 0;
  368. payload_end = 0;
  369. }
  370. if (!access_ok(VERIFY_READ, request.packets, request.size))
  371. return -EFAULT;
  372. p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request.packets);
  373. end = (void __user *)p + request.size;
  374. count = 0;
  375. while (p < end) {
  376. if (__copy_from_user(&u.packet, p, sizeof *p))
  377. return -EFAULT;
  378. if (client->iso_context->type == FW_ISO_CONTEXT_TRANSMIT) {
  379. header_length = u.packet.header_length;
  380. } else {
  381. /* We require that header_length is a multiple of
  382. * the fixed header size, ctx->header_size */
  383. if (u.packet.header_length % client->iso_context->header_size != 0)
  384. return -EINVAL;
  385. header_length = 0;
  386. }
  387. next = (struct fw_cdev_iso_packet __user *)
  388. &p->header[header_length / 4];
  389. if (next > end)
  390. return -EINVAL;
  391. if (__copy_from_user
  392. (u.packet.header, p->header, header_length))
  393. return -EFAULT;
  394. if (u.packet.skip &&
  395. u.packet.header_length + u.packet.payload_length > 0)
  396. return -EINVAL;
  397. if (payload + u.packet.payload_length > payload_end)
  398. return -EINVAL;
  399. if (fw_iso_context_queue(client->iso_context,
  400. &u.packet, &client->buffer, payload))
  401. break;
  402. p = next;
  403. payload += u.packet.payload_length;
  404. count++;
  405. }
  406. request.size -= uptr_to_u64(p) - request.packets;
  407. request.packets = uptr_to_u64(p);
  408. request.data = client->vm_start + payload;
  409. if (copy_to_user(arg, &request, sizeof request))
  410. return -EFAULT;
  411. return count;
  412. }
  413. static int ioctl_start_iso(struct client *client, void __user *arg)
  414. {
  415. struct fw_cdev_start_iso request;
  416. if (copy_from_user(&request, arg, sizeof request))
  417. return -EFAULT;
  418. return fw_iso_context_start(client->iso_context, request.channel,
  419. request.speed, request.cycle);
  420. }
  421. static int ioctl_stop_iso(struct client *client, void __user *arg)
  422. {
  423. return fw_iso_context_stop(client->iso_context);
  424. }
  425. static int
  426. dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
  427. {
  428. switch (cmd) {
  429. case FW_CDEV_IOC_GET_CONFIG_ROM:
  430. return ioctl_config_rom(client, arg);
  431. case FW_CDEV_IOC_SEND_REQUEST:
  432. return ioctl_send_request(client, arg);
  433. case FW_CDEV_IOC_ALLOCATE:
  434. return ioctl_allocate(client, arg);
  435. case FW_CDEV_IOC_SEND_RESPONSE:
  436. return ioctl_send_response(client, arg);
  437. case FW_CDEV_IOC_CREATE_ISO_CONTEXT:
  438. return ioctl_create_iso_context(client, arg);
  439. case FW_CDEV_IOC_QUEUE_ISO:
  440. return ioctl_queue_iso(client, arg);
  441. case FW_CDEV_IOC_START_ISO:
  442. return ioctl_start_iso(client, arg);
  443. case FW_CDEV_IOC_STOP_ISO:
  444. return ioctl_stop_iso(client, arg);
  445. default:
  446. return -EINVAL;
  447. }
  448. }
  449. static long
  450. fw_device_op_ioctl(struct file *file,
  451. unsigned int cmd, unsigned long arg)
  452. {
  453. struct client *client = file->private_data;
  454. return dispatch_ioctl(client, cmd, (void __user *) arg);
  455. }
  456. #ifdef CONFIG_COMPAT
  457. static long
  458. fw_device_op_compat_ioctl(struct file *file,
  459. unsigned int cmd, unsigned long arg)
  460. {
  461. struct client *client = file->private_data;
  462. return dispatch_ioctl(client, cmd, compat_ptr(arg));
  463. }
  464. #endif
  465. static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
  466. {
  467. struct client *client = file->private_data;
  468. enum dma_data_direction direction;
  469. unsigned long size;
  470. int page_count, retval;
  471. /* FIXME: We could support multiple buffers, but we don't. */
  472. if (client->buffer.pages != NULL)
  473. return -EBUSY;
  474. if (!(vma->vm_flags & VM_SHARED))
  475. return -EINVAL;
  476. if (vma->vm_start & ~PAGE_MASK)
  477. return -EINVAL;
  478. client->vm_start = vma->vm_start;
  479. size = vma->vm_end - vma->vm_start;
  480. page_count = size >> PAGE_SHIFT;
  481. if (size & ~PAGE_MASK)
  482. return -EINVAL;
  483. if (vma->vm_flags & VM_WRITE)
  484. direction = DMA_TO_DEVICE;
  485. else
  486. direction = DMA_FROM_DEVICE;
  487. retval = fw_iso_buffer_init(&client->buffer, client->device->card,
  488. page_count, direction);
  489. if (retval < 0)
  490. return retval;
  491. retval = fw_iso_buffer_map(&client->buffer, vma);
  492. if (retval < 0)
  493. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  494. return retval;
  495. }
  496. static int fw_device_op_release(struct inode *inode, struct file *file)
  497. {
  498. struct client *client = file->private_data;
  499. struct address_handler *h, *next;
  500. struct request *r, *next_r;
  501. if (client->buffer.pages)
  502. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  503. if (client->iso_context)
  504. fw_iso_context_destroy(client->iso_context);
  505. list_for_each_entry_safe(h, next, &client->handler_list, link) {
  506. fw_core_remove_address_handler(&h->handler);
  507. kfree(h);
  508. }
  509. list_for_each_entry_safe(r, next_r, &client->request_list, link) {
  510. fw_send_response(client->device->card, r->request,
  511. RCODE_CONFLICT_ERROR);
  512. kfree(r);
  513. }
  514. /* TODO: wait for all transactions to finish so
  515. * complete_transaction doesn't try to queue up responses
  516. * after we free client. */
  517. while (!list_empty(&client->event_list))
  518. dequeue_event(client, NULL, 0);
  519. fw_device_put(client->device);
  520. kfree(client);
  521. return 0;
  522. }
  523. static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
  524. {
  525. struct client *client = file->private_data;
  526. poll_wait(file, &client->wait, pt);
  527. if (!list_empty(&client->event_list))
  528. return POLLIN | POLLRDNORM;
  529. else
  530. return 0;
  531. }
  532. const struct file_operations fw_device_ops = {
  533. .owner = THIS_MODULE,
  534. .open = fw_device_op_open,
  535. .read = fw_device_op_read,
  536. .unlocked_ioctl = fw_device_op_ioctl,
  537. .poll = fw_device_op_poll,
  538. .release = fw_device_op_release,
  539. .mmap = fw_device_op_mmap,
  540. #ifdef CONFIG_COMPAT
  541. .compat_ioctl = fw_device_op_compat_ioctl,
  542. #endif
  543. };