fw-cdev.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * Char device for device raw access
  3. *
  4. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/module.h>
  21. #include <linux/kernel.h>
  22. #include <linux/wait.h>
  23. #include <linux/errno.h>
  24. #include <linux/device.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/poll.h>
  27. #include <linux/preempt.h>
  28. #include <linux/time.h>
  29. #include <linux/delay.h>
  30. #include <linux/mm.h>
  31. #include <linux/idr.h>
  32. #include <linux/compat.h>
  33. #include <linux/firewire-cdev.h>
  34. #include <asm/system.h>
  35. #include <asm/uaccess.h>
  36. #include "fw-transaction.h"
  37. #include "fw-topology.h"
  38. #include "fw-device.h"
  39. struct client;
  40. struct client_resource {
  41. struct list_head link;
  42. void (*release)(struct client *client, struct client_resource *r);
  43. u32 handle;
  44. };
  45. /*
  46. * dequeue_event() just kfree()'s the event, so the event has to be
  47. * the first field in the struct.
  48. */
  49. struct event {
  50. struct { void *data; size_t size; } v[2];
  51. struct list_head link;
  52. };
  53. struct bus_reset {
  54. struct event event;
  55. struct fw_cdev_event_bus_reset reset;
  56. };
  57. struct response {
  58. struct event event;
  59. struct fw_transaction transaction;
  60. struct client *client;
  61. struct client_resource resource;
  62. struct fw_cdev_event_response response;
  63. };
  64. struct iso_interrupt {
  65. struct event event;
  66. struct fw_cdev_event_iso_interrupt interrupt;
  67. };
  68. struct client {
  69. u32 version;
  70. struct fw_device *device;
  71. spinlock_t lock;
  72. u32 resource_handle;
  73. struct list_head resource_list;
  74. struct list_head event_list;
  75. wait_queue_head_t wait;
  76. u64 bus_reset_closure;
  77. struct fw_iso_context *iso_context;
  78. u64 iso_closure;
  79. struct fw_iso_buffer buffer;
  80. unsigned long vm_start;
  81. struct list_head link;
  82. };
  83. static inline void __user *
  84. u64_to_uptr(__u64 value)
  85. {
  86. return (void __user *)(unsigned long)value;
  87. }
  88. static inline __u64
  89. uptr_to_u64(void __user *ptr)
  90. {
  91. return (__u64)(unsigned long)ptr;
  92. }
  93. static int fw_device_op_open(struct inode *inode, struct file *file)
  94. {
  95. struct fw_device *device;
  96. struct client *client;
  97. unsigned long flags;
  98. device = fw_device_get_by_devt(inode->i_rdev);
  99. if (device == NULL)
  100. return -ENODEV;
  101. client = kzalloc(sizeof(*client), GFP_KERNEL);
  102. if (client == NULL) {
  103. fw_device_put(device);
  104. return -ENOMEM;
  105. }
  106. client->device = device;
  107. INIT_LIST_HEAD(&client->event_list);
  108. INIT_LIST_HEAD(&client->resource_list);
  109. spin_lock_init(&client->lock);
  110. init_waitqueue_head(&client->wait);
  111. file->private_data = client;
  112. spin_lock_irqsave(&device->card->lock, flags);
  113. list_add_tail(&client->link, &device->client_list);
  114. spin_unlock_irqrestore(&device->card->lock, flags);
  115. return 0;
  116. }
  117. static void queue_event(struct client *client, struct event *event,
  118. void *data0, size_t size0, void *data1, size_t size1)
  119. {
  120. unsigned long flags;
  121. event->v[0].data = data0;
  122. event->v[0].size = size0;
  123. event->v[1].data = data1;
  124. event->v[1].size = size1;
  125. spin_lock_irqsave(&client->lock, flags);
  126. list_add_tail(&event->link, &client->event_list);
  127. spin_unlock_irqrestore(&client->lock, flags);
  128. wake_up_interruptible(&client->wait);
  129. }
  130. static int
  131. dequeue_event(struct client *client, char __user *buffer, size_t count)
  132. {
  133. unsigned long flags;
  134. struct event *event;
  135. size_t size, total;
  136. int i, retval;
  137. retval = wait_event_interruptible(client->wait,
  138. !list_empty(&client->event_list) ||
  139. fw_device_is_shutdown(client->device));
  140. if (retval < 0)
  141. return retval;
  142. if (list_empty(&client->event_list) &&
  143. fw_device_is_shutdown(client->device))
  144. return -ENODEV;
  145. spin_lock_irqsave(&client->lock, flags);
  146. event = container_of(client->event_list.next, struct event, link);
  147. list_del(&event->link);
  148. spin_unlock_irqrestore(&client->lock, flags);
  149. total = 0;
  150. for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
  151. size = min(event->v[i].size, count - total);
  152. if (copy_to_user(buffer + total, event->v[i].data, size)) {
  153. retval = -EFAULT;
  154. goto out;
  155. }
  156. total += size;
  157. }
  158. retval = total;
  159. out:
  160. kfree(event);
  161. return retval;
  162. }
  163. static ssize_t
  164. fw_device_op_read(struct file *file,
  165. char __user *buffer, size_t count, loff_t *offset)
  166. {
  167. struct client *client = file->private_data;
  168. return dequeue_event(client, buffer, count);
  169. }
  170. static void
  171. fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
  172. struct client *client)
  173. {
  174. struct fw_card *card = client->device->card;
  175. event->closure = client->bus_reset_closure;
  176. event->type = FW_CDEV_EVENT_BUS_RESET;
  177. event->generation = client->device->generation;
  178. smp_rmb(); /* node_id must not be older than generation */
  179. event->node_id = client->device->node_id;
  180. event->local_node_id = card->local_node->node_id;
  181. event->bm_node_id = 0; /* FIXME: We don't track the BM. */
  182. event->irm_node_id = card->irm_node->node_id;
  183. event->root_node_id = card->root_node->node_id;
  184. }
  185. static void
  186. for_each_client(struct fw_device *device,
  187. void (*callback)(struct client *client))
  188. {
  189. struct fw_card *card = device->card;
  190. struct client *c;
  191. unsigned long flags;
  192. spin_lock_irqsave(&card->lock, flags);
  193. list_for_each_entry(c, &device->client_list, link)
  194. callback(c);
  195. spin_unlock_irqrestore(&card->lock, flags);
  196. }
  197. static void
  198. queue_bus_reset_event(struct client *client)
  199. {
  200. struct bus_reset *bus_reset;
  201. bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
  202. if (bus_reset == NULL) {
  203. fw_notify("Out of memory when allocating bus reset event\n");
  204. return;
  205. }
  206. fill_bus_reset_event(&bus_reset->reset, client);
  207. queue_event(client, &bus_reset->event,
  208. &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
  209. }
  210. void fw_device_cdev_update(struct fw_device *device)
  211. {
  212. for_each_client(device, queue_bus_reset_event);
  213. }
  214. static void wake_up_client(struct client *client)
  215. {
  216. wake_up_interruptible(&client->wait);
  217. }
  218. void fw_device_cdev_remove(struct fw_device *device)
  219. {
  220. for_each_client(device, wake_up_client);
  221. }
  222. static int ioctl_get_info(struct client *client, void *buffer)
  223. {
  224. struct fw_cdev_get_info *get_info = buffer;
  225. struct fw_cdev_event_bus_reset bus_reset;
  226. client->version = get_info->version;
  227. get_info->version = FW_CDEV_VERSION;
  228. if (get_info->rom != 0) {
  229. void __user *uptr = u64_to_uptr(get_info->rom);
  230. size_t want = get_info->rom_length;
  231. size_t have = client->device->config_rom_length * 4;
  232. if (copy_to_user(uptr, client->device->config_rom,
  233. min(want, have)))
  234. return -EFAULT;
  235. }
  236. get_info->rom_length = client->device->config_rom_length * 4;
  237. client->bus_reset_closure = get_info->bus_reset_closure;
  238. if (get_info->bus_reset != 0) {
  239. void __user *uptr = u64_to_uptr(get_info->bus_reset);
  240. fill_bus_reset_event(&bus_reset, client);
  241. if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
  242. return -EFAULT;
  243. }
  244. get_info->card = client->device->card->index;
  245. return 0;
  246. }
  247. static void
  248. add_client_resource(struct client *client, struct client_resource *resource)
  249. {
  250. unsigned long flags;
  251. spin_lock_irqsave(&client->lock, flags);
  252. list_add_tail(&resource->link, &client->resource_list);
  253. resource->handle = client->resource_handle++;
  254. spin_unlock_irqrestore(&client->lock, flags);
  255. }
  256. static int
  257. release_client_resource(struct client *client, u32 handle,
  258. struct client_resource **resource)
  259. {
  260. struct client_resource *r;
  261. unsigned long flags;
  262. spin_lock_irqsave(&client->lock, flags);
  263. list_for_each_entry(r, &client->resource_list, link) {
  264. if (r->handle == handle) {
  265. list_del(&r->link);
  266. break;
  267. }
  268. }
  269. spin_unlock_irqrestore(&client->lock, flags);
  270. if (&r->link == &client->resource_list)
  271. return -EINVAL;
  272. if (resource)
  273. *resource = r;
  274. else
  275. r->release(client, r);
  276. return 0;
  277. }
  278. static void
  279. release_transaction(struct client *client, struct client_resource *resource)
  280. {
  281. struct response *response =
  282. container_of(resource, struct response, resource);
  283. fw_cancel_transaction(client->device->card, &response->transaction);
  284. }
  285. static void
  286. complete_transaction(struct fw_card *card, int rcode,
  287. void *payload, size_t length, void *data)
  288. {
  289. struct response *response = data;
  290. struct client *client = response->client;
  291. unsigned long flags;
  292. if (length < response->response.length)
  293. response->response.length = length;
  294. if (rcode == RCODE_COMPLETE)
  295. memcpy(response->response.data, payload,
  296. response->response.length);
  297. spin_lock_irqsave(&client->lock, flags);
  298. list_del(&response->resource.link);
  299. spin_unlock_irqrestore(&client->lock, flags);
  300. response->response.type = FW_CDEV_EVENT_RESPONSE;
  301. response->response.rcode = rcode;
  302. queue_event(client, &response->event,
  303. &response->response, sizeof(response->response),
  304. response->response.data, response->response.length);
  305. }
  306. static int ioctl_send_request(struct client *client, void *buffer)
  307. {
  308. struct fw_device *device = client->device;
  309. struct fw_cdev_send_request *request = buffer;
  310. struct response *response;
  311. /* What is the biggest size we'll accept, really? */
  312. if (request->length > 4096)
  313. return -EINVAL;
  314. response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
  315. if (response == NULL)
  316. return -ENOMEM;
  317. response->client = client;
  318. response->response.length = request->length;
  319. response->response.closure = request->closure;
  320. if (request->data &&
  321. copy_from_user(response->response.data,
  322. u64_to_uptr(request->data), request->length)) {
  323. kfree(response);
  324. return -EFAULT;
  325. }
  326. response->resource.release = release_transaction;
  327. add_client_resource(client, &response->resource);
  328. fw_send_request(device->card, &response->transaction,
  329. request->tcode & 0x1f,
  330. device->node->node_id,
  331. request->generation,
  332. device->max_speed,
  333. request->offset,
  334. response->response.data, request->length,
  335. complete_transaction, response);
  336. if (request->data)
  337. return sizeof(request) + request->length;
  338. else
  339. return sizeof(request);
  340. }
  341. struct address_handler {
  342. struct fw_address_handler handler;
  343. __u64 closure;
  344. struct client *client;
  345. struct client_resource resource;
  346. };
  347. struct request {
  348. struct fw_request *request;
  349. void *data;
  350. size_t length;
  351. struct client_resource resource;
  352. };
  353. struct request_event {
  354. struct event event;
  355. struct fw_cdev_event_request request;
  356. };
  357. static void
  358. release_request(struct client *client, struct client_resource *resource)
  359. {
  360. struct request *request =
  361. container_of(resource, struct request, resource);
  362. fw_send_response(client->device->card, request->request,
  363. RCODE_CONFLICT_ERROR);
  364. kfree(request);
  365. }
  366. static void
  367. handle_request(struct fw_card *card, struct fw_request *r,
  368. int tcode, int destination, int source,
  369. int generation, int speed,
  370. unsigned long long offset,
  371. void *payload, size_t length, void *callback_data)
  372. {
  373. struct address_handler *handler = callback_data;
  374. struct request *request;
  375. struct request_event *e;
  376. struct client *client = handler->client;
  377. request = kmalloc(sizeof(*request), GFP_ATOMIC);
  378. e = kmalloc(sizeof(*e), GFP_ATOMIC);
  379. if (request == NULL || e == NULL) {
  380. kfree(request);
  381. kfree(e);
  382. fw_send_response(card, r, RCODE_CONFLICT_ERROR);
  383. return;
  384. }
  385. request->request = r;
  386. request->data = payload;
  387. request->length = length;
  388. request->resource.release = release_request;
  389. add_client_resource(client, &request->resource);
  390. e->request.type = FW_CDEV_EVENT_REQUEST;
  391. e->request.tcode = tcode;
  392. e->request.offset = offset;
  393. e->request.length = length;
  394. e->request.handle = request->resource.handle;
  395. e->request.closure = handler->closure;
  396. queue_event(client, &e->event,
  397. &e->request, sizeof(e->request), payload, length);
  398. }
  399. static void
  400. release_address_handler(struct client *client,
  401. struct client_resource *resource)
  402. {
  403. struct address_handler *handler =
  404. container_of(resource, struct address_handler, resource);
  405. fw_core_remove_address_handler(&handler->handler);
  406. kfree(handler);
  407. }
  408. static int ioctl_allocate(struct client *client, void *buffer)
  409. {
  410. struct fw_cdev_allocate *request = buffer;
  411. struct address_handler *handler;
  412. struct fw_address_region region;
  413. handler = kmalloc(sizeof(*handler), GFP_KERNEL);
  414. if (handler == NULL)
  415. return -ENOMEM;
  416. region.start = request->offset;
  417. region.end = request->offset + request->length;
  418. handler->handler.length = request->length;
  419. handler->handler.address_callback = handle_request;
  420. handler->handler.callback_data = handler;
  421. handler->closure = request->closure;
  422. handler->client = client;
  423. if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
  424. kfree(handler);
  425. return -EBUSY;
  426. }
  427. handler->resource.release = release_address_handler;
  428. add_client_resource(client, &handler->resource);
  429. request->handle = handler->resource.handle;
  430. return 0;
  431. }
  432. static int ioctl_deallocate(struct client *client, void *buffer)
  433. {
  434. struct fw_cdev_deallocate *request = buffer;
  435. return release_client_resource(client, request->handle, NULL);
  436. }
  437. static int ioctl_send_response(struct client *client, void *buffer)
  438. {
  439. struct fw_cdev_send_response *request = buffer;
  440. struct client_resource *resource;
  441. struct request *r;
  442. if (release_client_resource(client, request->handle, &resource) < 0)
  443. return -EINVAL;
  444. r = container_of(resource, struct request, resource);
  445. if (request->length < r->length)
  446. r->length = request->length;
  447. if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
  448. return -EFAULT;
  449. fw_send_response(client->device->card, r->request, request->rcode);
  450. kfree(r);
  451. return 0;
  452. }
  453. static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
  454. {
  455. struct fw_cdev_initiate_bus_reset *request = buffer;
  456. int short_reset;
  457. short_reset = (request->type == FW_CDEV_SHORT_RESET);
  458. return fw_core_initiate_bus_reset(client->device->card, short_reset);
  459. }
  460. struct descriptor {
  461. struct fw_descriptor d;
  462. struct client_resource resource;
  463. u32 data[0];
  464. };
  465. static void release_descriptor(struct client *client,
  466. struct client_resource *resource)
  467. {
  468. struct descriptor *descriptor =
  469. container_of(resource, struct descriptor, resource);
  470. fw_core_remove_descriptor(&descriptor->d);
  471. kfree(descriptor);
  472. }
  473. static int ioctl_add_descriptor(struct client *client, void *buffer)
  474. {
  475. struct fw_cdev_add_descriptor *request = buffer;
  476. struct descriptor *descriptor;
  477. int retval;
  478. if (request->length > 256)
  479. return -EINVAL;
  480. descriptor =
  481. kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
  482. if (descriptor == NULL)
  483. return -ENOMEM;
  484. if (copy_from_user(descriptor->data,
  485. u64_to_uptr(request->data), request->length * 4)) {
  486. kfree(descriptor);
  487. return -EFAULT;
  488. }
  489. descriptor->d.length = request->length;
  490. descriptor->d.immediate = request->immediate;
  491. descriptor->d.key = request->key;
  492. descriptor->d.data = descriptor->data;
  493. retval = fw_core_add_descriptor(&descriptor->d);
  494. if (retval < 0) {
  495. kfree(descriptor);
  496. return retval;
  497. }
  498. descriptor->resource.release = release_descriptor;
  499. add_client_resource(client, &descriptor->resource);
  500. request->handle = descriptor->resource.handle;
  501. return 0;
  502. }
  503. static int ioctl_remove_descriptor(struct client *client, void *buffer)
  504. {
  505. struct fw_cdev_remove_descriptor *request = buffer;
  506. return release_client_resource(client, request->handle, NULL);
  507. }
  508. static void
  509. iso_callback(struct fw_iso_context *context, u32 cycle,
  510. size_t header_length, void *header, void *data)
  511. {
  512. struct client *client = data;
  513. struct iso_interrupt *irq;
  514. irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
  515. if (irq == NULL)
  516. return;
  517. irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
  518. irq->interrupt.closure = client->iso_closure;
  519. irq->interrupt.cycle = cycle;
  520. irq->interrupt.header_length = header_length;
  521. memcpy(irq->interrupt.header, header, header_length);
  522. queue_event(client, &irq->event, &irq->interrupt,
  523. sizeof(irq->interrupt) + header_length, NULL, 0);
  524. }
  525. static int ioctl_create_iso_context(struct client *client, void *buffer)
  526. {
  527. struct fw_cdev_create_iso_context *request = buffer;
  528. struct fw_iso_context *context;
  529. /* We only support one context at this time. */
  530. if (client->iso_context != NULL)
  531. return -EBUSY;
  532. if (request->channel > 63)
  533. return -EINVAL;
  534. switch (request->type) {
  535. case FW_ISO_CONTEXT_RECEIVE:
  536. if (request->header_size < 4 || (request->header_size & 3))
  537. return -EINVAL;
  538. break;
  539. case FW_ISO_CONTEXT_TRANSMIT:
  540. if (request->speed > SCODE_3200)
  541. return -EINVAL;
  542. break;
  543. default:
  544. return -EINVAL;
  545. }
  546. context = fw_iso_context_create(client->device->card,
  547. request->type,
  548. request->channel,
  549. request->speed,
  550. request->header_size,
  551. iso_callback, client);
  552. if (IS_ERR(context))
  553. return PTR_ERR(context);
  554. client->iso_closure = request->closure;
  555. client->iso_context = context;
  556. /* We only support one context at this time. */
  557. request->handle = 0;
  558. return 0;
  559. }
  560. /* Macros for decoding the iso packet control header. */
  561. #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
  562. #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
  563. #define GET_SKIP(v) (((v) >> 17) & 0x01)
  564. #define GET_TAG(v) (((v) >> 18) & 0x02)
  565. #define GET_SY(v) (((v) >> 20) & 0x04)
  566. #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
  567. static int ioctl_queue_iso(struct client *client, void *buffer)
  568. {
  569. struct fw_cdev_queue_iso *request = buffer;
  570. struct fw_cdev_iso_packet __user *p, *end, *next;
  571. struct fw_iso_context *ctx = client->iso_context;
  572. unsigned long payload, buffer_end, header_length;
  573. u32 control;
  574. int count;
  575. struct {
  576. struct fw_iso_packet packet;
  577. u8 header[256];
  578. } u;
  579. if (ctx == NULL || request->handle != 0)
  580. return -EINVAL;
  581. /*
  582. * If the user passes a non-NULL data pointer, has mmap()'ed
  583. * the iso buffer, and the pointer points inside the buffer,
  584. * we setup the payload pointers accordingly. Otherwise we
  585. * set them both to 0, which will still let packets with
  586. * payload_length == 0 through. In other words, if no packets
  587. * use the indirect payload, the iso buffer need not be mapped
  588. * and the request->data pointer is ignored.
  589. */
  590. payload = (unsigned long)request->data - client->vm_start;
  591. buffer_end = client->buffer.page_count << PAGE_SHIFT;
  592. if (request->data == 0 || client->buffer.pages == NULL ||
  593. payload >= buffer_end) {
  594. payload = 0;
  595. buffer_end = 0;
  596. }
  597. p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
  598. if (!access_ok(VERIFY_READ, p, request->size))
  599. return -EFAULT;
  600. end = (void __user *)p + request->size;
  601. count = 0;
  602. while (p < end) {
  603. if (get_user(control, &p->control))
  604. return -EFAULT;
  605. u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
  606. u.packet.interrupt = GET_INTERRUPT(control);
  607. u.packet.skip = GET_SKIP(control);
  608. u.packet.tag = GET_TAG(control);
  609. u.packet.sy = GET_SY(control);
  610. u.packet.header_length = GET_HEADER_LENGTH(control);
  611. if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
  612. header_length = u.packet.header_length;
  613. } else {
  614. /*
  615. * We require that header_length is a multiple of
  616. * the fixed header size, ctx->header_size.
  617. */
  618. if (ctx->header_size == 0) {
  619. if (u.packet.header_length > 0)
  620. return -EINVAL;
  621. } else if (u.packet.header_length % ctx->header_size != 0) {
  622. return -EINVAL;
  623. }
  624. header_length = 0;
  625. }
  626. next = (struct fw_cdev_iso_packet __user *)
  627. &p->header[header_length / 4];
  628. if (next > end)
  629. return -EINVAL;
  630. if (__copy_from_user
  631. (u.packet.header, p->header, header_length))
  632. return -EFAULT;
  633. if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
  634. u.packet.header_length + u.packet.payload_length > 0)
  635. return -EINVAL;
  636. if (payload + u.packet.payload_length > buffer_end)
  637. return -EINVAL;
  638. if (fw_iso_context_queue(ctx, &u.packet,
  639. &client->buffer, payload))
  640. break;
  641. p = next;
  642. payload += u.packet.payload_length;
  643. count++;
  644. }
  645. request->size -= uptr_to_u64(p) - request->packets;
  646. request->packets = uptr_to_u64(p);
  647. request->data = client->vm_start + payload;
  648. return count;
  649. }
  650. static int ioctl_start_iso(struct client *client, void *buffer)
  651. {
  652. struct fw_cdev_start_iso *request = buffer;
  653. if (client->iso_context == NULL || request->handle != 0)
  654. return -EINVAL;
  655. if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
  656. if (request->tags == 0 || request->tags > 15)
  657. return -EINVAL;
  658. if (request->sync > 15)
  659. return -EINVAL;
  660. }
  661. return fw_iso_context_start(client->iso_context, request->cycle,
  662. request->sync, request->tags);
  663. }
  664. static int ioctl_stop_iso(struct client *client, void *buffer)
  665. {
  666. struct fw_cdev_stop_iso *request = buffer;
  667. if (client->iso_context == NULL || request->handle != 0)
  668. return -EINVAL;
  669. return fw_iso_context_stop(client->iso_context);
  670. }
  671. static int ioctl_get_cycle_timer(struct client *client, void *buffer)
  672. {
  673. struct fw_cdev_get_cycle_timer *request = buffer;
  674. struct fw_card *card = client->device->card;
  675. unsigned long long bus_time;
  676. struct timeval tv;
  677. unsigned long flags;
  678. preempt_disable();
  679. local_irq_save(flags);
  680. bus_time = card->driver->get_bus_time(card);
  681. do_gettimeofday(&tv);
  682. local_irq_restore(flags);
  683. preempt_enable();
  684. request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
  685. request->cycle_timer = bus_time & 0xffffffff;
  686. return 0;
  687. }
  688. static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
  689. ioctl_get_info,
  690. ioctl_send_request,
  691. ioctl_allocate,
  692. ioctl_deallocate,
  693. ioctl_send_response,
  694. ioctl_initiate_bus_reset,
  695. ioctl_add_descriptor,
  696. ioctl_remove_descriptor,
  697. ioctl_create_iso_context,
  698. ioctl_queue_iso,
  699. ioctl_start_iso,
  700. ioctl_stop_iso,
  701. ioctl_get_cycle_timer,
  702. };
  703. static int
  704. dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
  705. {
  706. char buffer[256];
  707. int retval;
  708. if (_IOC_TYPE(cmd) != '#' ||
  709. _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
  710. return -EINVAL;
  711. if (_IOC_DIR(cmd) & _IOC_WRITE) {
  712. if (_IOC_SIZE(cmd) > sizeof(buffer) ||
  713. copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
  714. return -EFAULT;
  715. }
  716. retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
  717. if (retval < 0)
  718. return retval;
  719. if (_IOC_DIR(cmd) & _IOC_READ) {
  720. if (_IOC_SIZE(cmd) > sizeof(buffer) ||
  721. copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
  722. return -EFAULT;
  723. }
  724. return 0;
  725. }
  726. static long
  727. fw_device_op_ioctl(struct file *file,
  728. unsigned int cmd, unsigned long arg)
  729. {
  730. struct client *client = file->private_data;
  731. return dispatch_ioctl(client, cmd, (void __user *) arg);
  732. }
  733. #ifdef CONFIG_COMPAT
  734. static long
  735. fw_device_op_compat_ioctl(struct file *file,
  736. unsigned int cmd, unsigned long arg)
  737. {
  738. struct client *client = file->private_data;
  739. return dispatch_ioctl(client, cmd, compat_ptr(arg));
  740. }
  741. #endif
  742. static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
  743. {
  744. struct client *client = file->private_data;
  745. enum dma_data_direction direction;
  746. unsigned long size;
  747. int page_count, retval;
  748. /* FIXME: We could support multiple buffers, but we don't. */
  749. if (client->buffer.pages != NULL)
  750. return -EBUSY;
  751. if (!(vma->vm_flags & VM_SHARED))
  752. return -EINVAL;
  753. if (vma->vm_start & ~PAGE_MASK)
  754. return -EINVAL;
  755. client->vm_start = vma->vm_start;
  756. size = vma->vm_end - vma->vm_start;
  757. page_count = size >> PAGE_SHIFT;
  758. if (size & ~PAGE_MASK)
  759. return -EINVAL;
  760. if (vma->vm_flags & VM_WRITE)
  761. direction = DMA_TO_DEVICE;
  762. else
  763. direction = DMA_FROM_DEVICE;
  764. retval = fw_iso_buffer_init(&client->buffer, client->device->card,
  765. page_count, direction);
  766. if (retval < 0)
  767. return retval;
  768. retval = fw_iso_buffer_map(&client->buffer, vma);
  769. if (retval < 0)
  770. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  771. return retval;
  772. }
  773. static int fw_device_op_release(struct inode *inode, struct file *file)
  774. {
  775. struct client *client = file->private_data;
  776. struct event *e, *next_e;
  777. struct client_resource *r, *next_r;
  778. unsigned long flags;
  779. if (client->buffer.pages)
  780. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  781. if (client->iso_context)
  782. fw_iso_context_destroy(client->iso_context);
  783. list_for_each_entry_safe(r, next_r, &client->resource_list, link)
  784. r->release(client, r);
  785. /*
  786. * FIXME: We should wait for the async tasklets to stop
  787. * running before freeing the memory.
  788. */
  789. list_for_each_entry_safe(e, next_e, &client->event_list, link)
  790. kfree(e);
  791. spin_lock_irqsave(&client->device->card->lock, flags);
  792. list_del(&client->link);
  793. spin_unlock_irqrestore(&client->device->card->lock, flags);
  794. fw_device_put(client->device);
  795. kfree(client);
  796. return 0;
  797. }
  798. static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
  799. {
  800. struct client *client = file->private_data;
  801. unsigned int mask = 0;
  802. poll_wait(file, &client->wait, pt);
  803. if (fw_device_is_shutdown(client->device))
  804. mask |= POLLHUP | POLLERR;
  805. if (!list_empty(&client->event_list))
  806. mask |= POLLIN | POLLRDNORM;
  807. return mask;
  808. }
  809. const struct file_operations fw_device_ops = {
  810. .owner = THIS_MODULE,
  811. .open = fw_device_op_open,
  812. .read = fw_device_op_read,
  813. .unlocked_ioctl = fw_device_op_ioctl,
  814. .poll = fw_device_op_poll,
  815. .release = fw_device_op_release,
  816. .mmap = fw_device_op_mmap,
  817. #ifdef CONFIG_COMPAT
  818. .compat_ioctl = fw_device_op_compat_ioctl,
  819. #endif
  820. };