relay.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * Public API and common code for kernel->userspace relay file support.
  3. *
  4. * See Documentation/filesystems/relayfs.txt for an overview of relayfs.
  5. *
  6. * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
  7. * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
  8. *
  9. * Moved to kernel/relay.c by Paul Mundt, 2006.
  10. *
  11. * This file is released under the GPL.
  12. */
  13. #include <linux/errno.h>
  14. #include <linux/stddef.h>
  15. #include <linux/slab.h>
  16. #include <linux/module.h>
  17. #include <linux/string.h>
  18. #include <linux/relay.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/mm.h>
  21. /*
  22. * close() vm_op implementation for relay file mapping.
  23. */
  24. static void relay_file_mmap_close(struct vm_area_struct *vma)
  25. {
  26. struct rchan_buf *buf = vma->vm_private_data;
  27. buf->chan->cb->buf_unmapped(buf, vma->vm_file);
  28. }
  29. /*
  30. * nopage() vm_op implementation for relay file mapping.
  31. */
  32. static struct page *relay_buf_nopage(struct vm_area_struct *vma,
  33. unsigned long address,
  34. int *type)
  35. {
  36. struct page *page;
  37. struct rchan_buf *buf = vma->vm_private_data;
  38. unsigned long offset = address - vma->vm_start;
  39. if (address > vma->vm_end)
  40. return NOPAGE_SIGBUS; /* Disallow mremap */
  41. if (!buf)
  42. return NOPAGE_OOM;
  43. page = vmalloc_to_page(buf->start + offset);
  44. if (!page)
  45. return NOPAGE_OOM;
  46. get_page(page);
  47. if (type)
  48. *type = VM_FAULT_MINOR;
  49. return page;
  50. }
  51. /*
  52. * vm_ops for relay file mappings.
  53. */
  54. static struct vm_operations_struct relay_file_mmap_ops = {
  55. .nopage = relay_buf_nopage,
  56. .close = relay_file_mmap_close,
  57. };
  58. /**
  59. * relay_mmap_buf: - mmap channel buffer to process address space
  60. * @buf: relay channel buffer
  61. * @vma: vm_area_struct describing memory to be mapped
  62. *
  63. * Returns 0 if ok, negative on error
  64. *
  65. * Caller should already have grabbed mmap_sem.
  66. */
  67. int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
  68. {
  69. unsigned long length = vma->vm_end - vma->vm_start;
  70. struct file *filp = vma->vm_file;
  71. if (!buf)
  72. return -EBADF;
  73. if (length != (unsigned long)buf->chan->alloc_size)
  74. return -EINVAL;
  75. vma->vm_ops = &relay_file_mmap_ops;
  76. vma->vm_private_data = buf;
  77. buf->chan->cb->buf_mapped(buf, filp);
  78. return 0;
  79. }
  80. /**
  81. * relay_alloc_buf - allocate a channel buffer
  82. * @buf: the buffer struct
  83. * @size: total size of the buffer
  84. *
  85. * Returns a pointer to the resulting buffer, NULL if unsuccessful. The
  86. * passed in size will get page aligned, if it isn't already.
  87. */
  88. static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
  89. {
  90. void *mem;
  91. unsigned int i, j, n_pages;
  92. *size = PAGE_ALIGN(*size);
  93. n_pages = *size >> PAGE_SHIFT;
  94. buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
  95. if (!buf->page_array)
  96. return NULL;
  97. for (i = 0; i < n_pages; i++) {
  98. buf->page_array[i] = alloc_page(GFP_KERNEL);
  99. if (unlikely(!buf->page_array[i]))
  100. goto depopulate;
  101. }
  102. mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
  103. if (!mem)
  104. goto depopulate;
  105. memset(mem, 0, *size);
  106. buf->page_count = n_pages;
  107. return mem;
  108. depopulate:
  109. for (j = 0; j < i; j++)
  110. __free_page(buf->page_array[j]);
  111. kfree(buf->page_array);
  112. return NULL;
  113. }
  114. /**
  115. * relay_create_buf - allocate and initialize a channel buffer
  116. * @alloc_size: size of the buffer to allocate
  117. * @n_subbufs: number of sub-buffers in the channel
  118. *
  119. * Returns channel buffer if successful, NULL otherwise
  120. */
  121. struct rchan_buf *relay_create_buf(struct rchan *chan)
  122. {
  123. struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL);
  124. if (!buf)
  125. return NULL;
  126. buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
  127. if (!buf->padding)
  128. goto free_buf;
  129. buf->start = relay_alloc_buf(buf, &chan->alloc_size);
  130. if (!buf->start)
  131. goto free_buf;
  132. buf->chan = chan;
  133. kref_get(&buf->chan->kref);
  134. return buf;
  135. free_buf:
  136. kfree(buf->padding);
  137. kfree(buf);
  138. return NULL;
  139. }
  140. /**
  141. * relay_destroy_channel - free the channel struct
  142. *
  143. * Should only be called from kref_put().
  144. */
  145. void relay_destroy_channel(struct kref *kref)
  146. {
  147. struct rchan *chan = container_of(kref, struct rchan, kref);
  148. kfree(chan);
  149. }
  150. /**
  151. * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
  152. * @buf: the buffer struct
  153. */
  154. void relay_destroy_buf(struct rchan_buf *buf)
  155. {
  156. struct rchan *chan = buf->chan;
  157. unsigned int i;
  158. if (likely(buf->start)) {
  159. vunmap(buf->start);
  160. for (i = 0; i < buf->page_count; i++)
  161. __free_page(buf->page_array[i]);
  162. kfree(buf->page_array);
  163. }
  164. kfree(buf->padding);
  165. kfree(buf);
  166. kref_put(&chan->kref, relay_destroy_channel);
  167. }
  168. /**
  169. * relay_remove_buf - remove a channel buffer
  170. *
  171. * Removes the file from the fileystem, which also frees the
  172. * rchan_buf_struct and the channel buffer. Should only be called from
  173. * kref_put().
  174. */
  175. void relay_remove_buf(struct kref *kref)
  176. {
  177. struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
  178. buf->chan->cb->remove_buf_file(buf->dentry);
  179. relay_destroy_buf(buf);
  180. }
  181. /**
  182. * relay_buf_empty - boolean, is the channel buffer empty?
  183. * @buf: channel buffer
  184. *
  185. * Returns 1 if the buffer is empty, 0 otherwise.
  186. */
  187. int relay_buf_empty(struct rchan_buf *buf)
  188. {
  189. return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
  190. }
  191. EXPORT_SYMBOL_GPL(relay_buf_empty);
  192. /**
  193. * relay_buf_full - boolean, is the channel buffer full?
  194. * @buf: channel buffer
  195. *
  196. * Returns 1 if the buffer is full, 0 otherwise.
  197. */
  198. int relay_buf_full(struct rchan_buf *buf)
  199. {
  200. size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
  201. return (ready >= buf->chan->n_subbufs) ? 1 : 0;
  202. }
  203. EXPORT_SYMBOL_GPL(relay_buf_full);
  204. /*
  205. * High-level relay kernel API and associated functions.
  206. */
  207. /*
  208. * rchan_callback implementations defining default channel behavior. Used
  209. * in place of corresponding NULL values in client callback struct.
  210. */
  211. /*
  212. * subbuf_start() default callback. Does nothing.
  213. */
  214. static int subbuf_start_default_callback (struct rchan_buf *buf,
  215. void *subbuf,
  216. void *prev_subbuf,
  217. size_t prev_padding)
  218. {
  219. if (relay_buf_full(buf))
  220. return 0;
  221. return 1;
  222. }
  223. /*
  224. * buf_mapped() default callback. Does nothing.
  225. */
  226. static void buf_mapped_default_callback(struct rchan_buf *buf,
  227. struct file *filp)
  228. {
  229. }
  230. /*
  231. * buf_unmapped() default callback. Does nothing.
  232. */
  233. static void buf_unmapped_default_callback(struct rchan_buf *buf,
  234. struct file *filp)
  235. {
  236. }
  237. /*
  238. * create_buf_file_create() default callback. Does nothing.
  239. */
  240. static struct dentry *create_buf_file_default_callback(const char *filename,
  241. struct dentry *parent,
  242. int mode,
  243. struct rchan_buf *buf,
  244. int *is_global)
  245. {
  246. return NULL;
  247. }
  248. /*
  249. * remove_buf_file() default callback. Does nothing.
  250. */
  251. static int remove_buf_file_default_callback(struct dentry *dentry)
  252. {
  253. return -EINVAL;
  254. }
  255. /* relay channel default callbacks */
  256. static struct rchan_callbacks default_channel_callbacks = {
  257. .subbuf_start = subbuf_start_default_callback,
  258. .buf_mapped = buf_mapped_default_callback,
  259. .buf_unmapped = buf_unmapped_default_callback,
  260. .create_buf_file = create_buf_file_default_callback,
  261. .remove_buf_file = remove_buf_file_default_callback,
  262. };
  263. /**
  264. * wakeup_readers - wake up readers waiting on a channel
  265. * @private: the channel buffer
  266. *
  267. * This is the work function used to defer reader waking. The
  268. * reason waking is deferred is that calling directly from write
  269. * causes problems if you're writing from say the scheduler.
  270. */
  271. static void wakeup_readers(void *private)
  272. {
  273. struct rchan_buf *buf = private;
  274. wake_up_interruptible(&buf->read_wait);
  275. }
  276. /**
  277. * __relay_reset - reset a channel buffer
  278. * @buf: the channel buffer
  279. * @init: 1 if this is a first-time initialization
  280. *
  281. * See relay_reset for description of effect.
  282. */
  283. static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
  284. {
  285. size_t i;
  286. if (init) {
  287. init_waitqueue_head(&buf->read_wait);
  288. kref_init(&buf->kref);
  289. INIT_WORK(&buf->wake_readers, NULL, NULL);
  290. } else {
  291. cancel_delayed_work(&buf->wake_readers);
  292. flush_scheduled_work();
  293. }
  294. buf->subbufs_produced = 0;
  295. buf->subbufs_consumed = 0;
  296. buf->bytes_consumed = 0;
  297. buf->finalized = 0;
  298. buf->data = buf->start;
  299. buf->offset = 0;
  300. for (i = 0; i < buf->chan->n_subbufs; i++)
  301. buf->padding[i] = 0;
  302. buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0);
  303. }
  304. /**
  305. * relay_reset - reset the channel
  306. * @chan: the channel
  307. *
  308. * This has the effect of erasing all data from all channel buffers
  309. * and restarting the channel in its initial state. The buffers
  310. * are not freed, so any mappings are still in effect.
  311. *
  312. * NOTE: Care should be taken that the channel isn't actually
  313. * being used by anything when this call is made.
  314. */
  315. void relay_reset(struct rchan *chan)
  316. {
  317. unsigned int i;
  318. struct rchan_buf *prev = NULL;
  319. if (!chan)
  320. return;
  321. for (i = 0; i < NR_CPUS; i++) {
  322. if (!chan->buf[i] || chan->buf[i] == prev)
  323. break;
  324. __relay_reset(chan->buf[i], 0);
  325. prev = chan->buf[i];
  326. }
  327. }
  328. EXPORT_SYMBOL_GPL(relay_reset);
  329. /**
  330. * relay_open_buf - create a new relay channel buffer
  331. *
  332. * Internal - used by relay_open().
  333. */
  334. static struct rchan_buf *relay_open_buf(struct rchan *chan,
  335. const char *filename,
  336. struct dentry *parent,
  337. int *is_global)
  338. {
  339. struct rchan_buf *buf;
  340. struct dentry *dentry;
  341. if (*is_global)
  342. return chan->buf[0];
  343. buf = relay_create_buf(chan);
  344. if (!buf)
  345. return NULL;
  346. /* Create file in fs */
  347. dentry = chan->cb->create_buf_file(filename, parent, S_IRUSR,
  348. buf, is_global);
  349. if (!dentry) {
  350. relay_destroy_buf(buf);
  351. return NULL;
  352. }
  353. buf->dentry = dentry;
  354. __relay_reset(buf, 1);
  355. return buf;
  356. }
  357. /**
  358. * relay_close_buf - close a channel buffer
  359. * @buf: channel buffer
  360. *
  361. * Marks the buffer finalized and restores the default callbacks.
  362. * The channel buffer and channel buffer data structure are then freed
  363. * automatically when the last reference is given up.
  364. */
  365. static inline void relay_close_buf(struct rchan_buf *buf)
  366. {
  367. buf->finalized = 1;
  368. cancel_delayed_work(&buf->wake_readers);
  369. flush_scheduled_work();
  370. kref_put(&buf->kref, relay_remove_buf);
  371. }
  372. static inline void setup_callbacks(struct rchan *chan,
  373. struct rchan_callbacks *cb)
  374. {
  375. if (!cb) {
  376. chan->cb = &default_channel_callbacks;
  377. return;
  378. }
  379. if (!cb->subbuf_start)
  380. cb->subbuf_start = subbuf_start_default_callback;
  381. if (!cb->buf_mapped)
  382. cb->buf_mapped = buf_mapped_default_callback;
  383. if (!cb->buf_unmapped)
  384. cb->buf_unmapped = buf_unmapped_default_callback;
  385. if (!cb->create_buf_file)
  386. cb->create_buf_file = create_buf_file_default_callback;
  387. if (!cb->remove_buf_file)
  388. cb->remove_buf_file = remove_buf_file_default_callback;
  389. chan->cb = cb;
  390. }
  391. /**
  392. * relay_open - create a new relay channel
  393. * @base_filename: base name of files to create
  394. * @parent: dentry of parent directory, NULL for root directory
  395. * @subbuf_size: size of sub-buffers
  396. * @n_subbufs: number of sub-buffers
  397. * @cb: client callback functions
  398. *
  399. * Returns channel pointer if successful, NULL otherwise.
  400. *
  401. * Creates a channel buffer for each cpu using the sizes and
  402. * attributes specified. The created channel buffer files
  403. * will be named base_filename0...base_filenameN-1. File
  404. * permissions will be S_IRUSR.
  405. */
  406. struct rchan *relay_open(const char *base_filename,
  407. struct dentry *parent,
  408. size_t subbuf_size,
  409. size_t n_subbufs,
  410. struct rchan_callbacks *cb)
  411. {
  412. unsigned int i;
  413. struct rchan *chan;
  414. char *tmpname;
  415. int is_global = 0;
  416. if (!base_filename)
  417. return NULL;
  418. if (!(subbuf_size && n_subbufs))
  419. return NULL;
  420. chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL);
  421. if (!chan)
  422. return NULL;
  423. chan->version = RELAYFS_CHANNEL_VERSION;
  424. chan->n_subbufs = n_subbufs;
  425. chan->subbuf_size = subbuf_size;
  426. chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
  427. setup_callbacks(chan, cb);
  428. kref_init(&chan->kref);
  429. tmpname = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  430. if (!tmpname)
  431. goto free_chan;
  432. for_each_online_cpu(i) {
  433. sprintf(tmpname, "%s%d", base_filename, i);
  434. chan->buf[i] = relay_open_buf(chan, tmpname, parent,
  435. &is_global);
  436. if (!chan->buf[i])
  437. goto free_bufs;
  438. chan->buf[i]->cpu = i;
  439. }
  440. kfree(tmpname);
  441. return chan;
  442. free_bufs:
  443. for (i = 0; i < NR_CPUS; i++) {
  444. if (!chan->buf[i])
  445. break;
  446. relay_close_buf(chan->buf[i]);
  447. if (is_global)
  448. break;
  449. }
  450. kfree(tmpname);
  451. free_chan:
  452. kref_put(&chan->kref, relay_destroy_channel);
  453. return NULL;
  454. }
  455. EXPORT_SYMBOL_GPL(relay_open);
  456. /**
  457. * relay_switch_subbuf - switch to a new sub-buffer
  458. * @buf: channel buffer
  459. * @length: size of current event
  460. *
  461. * Returns either the length passed in or 0 if full.
  462. *
  463. * Performs sub-buffer-switch tasks such as invoking callbacks,
  464. * updating padding counts, waking up readers, etc.
  465. */
  466. size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
  467. {
  468. void *old, *new;
  469. size_t old_subbuf, new_subbuf;
  470. if (unlikely(length > buf->chan->subbuf_size))
  471. goto toobig;
  472. if (buf->offset != buf->chan->subbuf_size + 1) {
  473. buf->prev_padding = buf->chan->subbuf_size - buf->offset;
  474. old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
  475. buf->padding[old_subbuf] = buf->prev_padding;
  476. buf->subbufs_produced++;
  477. buf->dentry->d_inode->i_size += buf->chan->subbuf_size -
  478. buf->padding[old_subbuf];
  479. smp_mb();
  480. if (waitqueue_active(&buf->read_wait)) {
  481. PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
  482. schedule_delayed_work(&buf->wake_readers, 1);
  483. }
  484. }
  485. old = buf->data;
  486. new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
  487. new = buf->start + new_subbuf * buf->chan->subbuf_size;
  488. buf->offset = 0;
  489. if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
  490. buf->offset = buf->chan->subbuf_size + 1;
  491. return 0;
  492. }
  493. buf->data = new;
  494. buf->padding[new_subbuf] = 0;
  495. if (unlikely(length + buf->offset > buf->chan->subbuf_size))
  496. goto toobig;
  497. return length;
  498. toobig:
  499. buf->chan->last_toobig = length;
  500. return 0;
  501. }
  502. EXPORT_SYMBOL_GPL(relay_switch_subbuf);
  503. /**
  504. * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
  505. * @chan: the channel
  506. * @cpu: the cpu associated with the channel buffer to update
  507. * @subbufs_consumed: number of sub-buffers to add to current buf's count
  508. *
  509. * Adds to the channel buffer's consumed sub-buffer count.
  510. * subbufs_consumed should be the number of sub-buffers newly consumed,
  511. * not the total consumed.
  512. *
  513. * NOTE: kernel clients don't need to call this function if the channel
  514. * mode is 'overwrite'.
  515. */
  516. void relay_subbufs_consumed(struct rchan *chan,
  517. unsigned int cpu,
  518. size_t subbufs_consumed)
  519. {
  520. struct rchan_buf *buf;
  521. if (!chan)
  522. return;
  523. if (cpu >= NR_CPUS || !chan->buf[cpu])
  524. return;
  525. buf = chan->buf[cpu];
  526. buf->subbufs_consumed += subbufs_consumed;
  527. if (buf->subbufs_consumed > buf->subbufs_produced)
  528. buf->subbufs_consumed = buf->subbufs_produced;
  529. }
  530. EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
  531. /**
  532. * relay_close - close the channel
  533. * @chan: the channel
  534. *
  535. * Closes all channel buffers and frees the channel.
  536. */
  537. void relay_close(struct rchan *chan)
  538. {
  539. unsigned int i;
  540. struct rchan_buf *prev = NULL;
  541. if (!chan)
  542. return;
  543. for (i = 0; i < NR_CPUS; i++) {
  544. if (!chan->buf[i] || chan->buf[i] == prev)
  545. break;
  546. relay_close_buf(chan->buf[i]);
  547. prev = chan->buf[i];
  548. }
  549. if (chan->last_toobig)
  550. printk(KERN_WARNING "relay: one or more items not logged "
  551. "[item size (%Zd) > sub-buffer size (%Zd)]\n",
  552. chan->last_toobig, chan->subbuf_size);
  553. kref_put(&chan->kref, relay_destroy_channel);
  554. }
  555. EXPORT_SYMBOL_GPL(relay_close);
  556. /**
  557. * relay_flush - close the channel
  558. * @chan: the channel
  559. *
  560. * Flushes all channel buffers i.e. forces buffer switch.
  561. */
  562. void relay_flush(struct rchan *chan)
  563. {
  564. unsigned int i;
  565. struct rchan_buf *prev = NULL;
  566. if (!chan)
  567. return;
  568. for (i = 0; i < NR_CPUS; i++) {
  569. if (!chan->buf[i] || chan->buf[i] == prev)
  570. break;
  571. relay_switch_subbuf(chan->buf[i], 0);
  572. prev = chan->buf[i];
  573. }
  574. }
  575. EXPORT_SYMBOL_GPL(relay_flush);
  576. /**
  577. * relay_file_open - open file op for relay files
  578. * @inode: the inode
  579. * @filp: the file
  580. *
  581. * Increments the channel buffer refcount.
  582. */
  583. static int relay_file_open(struct inode *inode, struct file *filp)
  584. {
  585. struct rchan_buf *buf = inode->u.generic_ip;
  586. kref_get(&buf->kref);
  587. filp->private_data = buf;
  588. return 0;
  589. }
  590. /**
  591. * relay_file_mmap - mmap file op for relay files
  592. * @filp: the file
  593. * @vma: the vma describing what to map
  594. *
  595. * Calls upon relay_mmap_buf to map the file into user space.
  596. */
  597. static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
  598. {
  599. struct rchan_buf *buf = filp->private_data;
  600. return relay_mmap_buf(buf, vma);
  601. }
  602. /**
  603. * relay_file_poll - poll file op for relay files
  604. * @filp: the file
  605. * @wait: poll table
  606. *
  607. * Poll implemention.
  608. */
  609. static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
  610. {
  611. unsigned int mask = 0;
  612. struct rchan_buf *buf = filp->private_data;
  613. if (buf->finalized)
  614. return POLLERR;
  615. if (filp->f_mode & FMODE_READ) {
  616. poll_wait(filp, &buf->read_wait, wait);
  617. if (!relay_buf_empty(buf))
  618. mask |= POLLIN | POLLRDNORM;
  619. }
  620. return mask;
  621. }
  622. /**
  623. * relay_file_release - release file op for relay files
  624. * @inode: the inode
  625. * @filp: the file
  626. *
  627. * Decrements the channel refcount, as the filesystem is
  628. * no longer using it.
  629. */
  630. static int relay_file_release(struct inode *inode, struct file *filp)
  631. {
  632. struct rchan_buf *buf = filp->private_data;
  633. kref_put(&buf->kref, relay_remove_buf);
  634. return 0;
  635. }
  636. /**
  637. * relay_file_read_consume - update the consumed count for the buffer
  638. */
  639. static void relay_file_read_consume(struct rchan_buf *buf,
  640. size_t read_pos,
  641. size_t bytes_consumed)
  642. {
  643. size_t subbuf_size = buf->chan->subbuf_size;
  644. size_t n_subbufs = buf->chan->n_subbufs;
  645. size_t read_subbuf;
  646. if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
  647. relay_subbufs_consumed(buf->chan, buf->cpu, 1);
  648. buf->bytes_consumed = 0;
  649. }
  650. buf->bytes_consumed += bytes_consumed;
  651. read_subbuf = read_pos / buf->chan->subbuf_size;
  652. if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
  653. if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
  654. (buf->offset == subbuf_size))
  655. return;
  656. relay_subbufs_consumed(buf->chan, buf->cpu, 1);
  657. buf->bytes_consumed = 0;
  658. }
  659. }
  660. /**
  661. * relay_file_read_avail - boolean, are there unconsumed bytes available?
  662. */
  663. static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
  664. {
  665. size_t subbuf_size = buf->chan->subbuf_size;
  666. size_t n_subbufs = buf->chan->n_subbufs;
  667. size_t produced = buf->subbufs_produced;
  668. size_t consumed = buf->subbufs_consumed;
  669. relay_file_read_consume(buf, read_pos, 0);
  670. if (unlikely(buf->offset > subbuf_size)) {
  671. if (produced == consumed)
  672. return 0;
  673. return 1;
  674. }
  675. if (unlikely(produced - consumed >= n_subbufs)) {
  676. consumed = (produced / n_subbufs) * n_subbufs;
  677. buf->subbufs_consumed = consumed;
  678. }
  679. produced = (produced % n_subbufs) * subbuf_size + buf->offset;
  680. consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
  681. if (consumed > produced)
  682. produced += n_subbufs * subbuf_size;
  683. if (consumed == produced)
  684. return 0;
  685. return 1;
  686. }
  687. /**
  688. * relay_file_read_subbuf_avail - return bytes available in sub-buffer
  689. */
  690. static size_t relay_file_read_subbuf_avail(size_t read_pos,
  691. struct rchan_buf *buf)
  692. {
  693. size_t padding, avail = 0;
  694. size_t read_subbuf, read_offset, write_subbuf, write_offset;
  695. size_t subbuf_size = buf->chan->subbuf_size;
  696. write_subbuf = (buf->data - buf->start) / subbuf_size;
  697. write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
  698. read_subbuf = read_pos / subbuf_size;
  699. read_offset = read_pos % subbuf_size;
  700. padding = buf->padding[read_subbuf];
  701. if (read_subbuf == write_subbuf) {
  702. if (read_offset + padding < write_offset)
  703. avail = write_offset - (read_offset + padding);
  704. } else
  705. avail = (subbuf_size - padding) - read_offset;
  706. return avail;
  707. }
  708. /**
  709. * relay_file_read_start_pos - find the first available byte to read
  710. *
  711. * If the read_pos is in the middle of padding, return the
  712. * position of the first actually available byte, otherwise
  713. * return the original value.
  714. */
  715. static size_t relay_file_read_start_pos(size_t read_pos,
  716. struct rchan_buf *buf)
  717. {
  718. size_t read_subbuf, padding, padding_start, padding_end;
  719. size_t subbuf_size = buf->chan->subbuf_size;
  720. size_t n_subbufs = buf->chan->n_subbufs;
  721. read_subbuf = read_pos / subbuf_size;
  722. padding = buf->padding[read_subbuf];
  723. padding_start = (read_subbuf + 1) * subbuf_size - padding;
  724. padding_end = (read_subbuf + 1) * subbuf_size;
  725. if (read_pos >= padding_start && read_pos < padding_end) {
  726. read_subbuf = (read_subbuf + 1) % n_subbufs;
  727. read_pos = read_subbuf * subbuf_size;
  728. }
  729. return read_pos;
  730. }
  731. /**
  732. * relay_file_read_end_pos - return the new read position
  733. */
  734. static size_t relay_file_read_end_pos(struct rchan_buf *buf,
  735. size_t read_pos,
  736. size_t count)
  737. {
  738. size_t read_subbuf, padding, end_pos;
  739. size_t subbuf_size = buf->chan->subbuf_size;
  740. size_t n_subbufs = buf->chan->n_subbufs;
  741. read_subbuf = read_pos / subbuf_size;
  742. padding = buf->padding[read_subbuf];
  743. if (read_pos % subbuf_size + count + padding == subbuf_size)
  744. end_pos = (read_subbuf + 1) * subbuf_size;
  745. else
  746. end_pos = read_pos + count;
  747. if (end_pos >= subbuf_size * n_subbufs)
  748. end_pos = 0;
  749. return end_pos;
  750. }
  751. /**
  752. * subbuf_read_actor - read up to one subbuf's worth of data
  753. */
  754. static int subbuf_read_actor(size_t read_start,
  755. struct rchan_buf *buf,
  756. size_t avail,
  757. read_descriptor_t *desc,
  758. read_actor_t actor)
  759. {
  760. void *from;
  761. int ret = 0;
  762. from = buf->start + read_start;
  763. ret = avail;
  764. if (copy_to_user(desc->arg.data, from, avail)) {
  765. desc->error = -EFAULT;
  766. ret = 0;
  767. }
  768. desc->arg.data += ret;
  769. desc->written += ret;
  770. desc->count -= ret;
  771. return ret;
  772. }
  773. /**
  774. * subbuf_send_actor - send up to one subbuf's worth of data
  775. */
  776. static int subbuf_send_actor(size_t read_start,
  777. struct rchan_buf *buf,
  778. size_t avail,
  779. read_descriptor_t *desc,
  780. read_actor_t actor)
  781. {
  782. unsigned long pidx, poff;
  783. unsigned int subbuf_pages;
  784. int ret = 0;
  785. subbuf_pages = buf->chan->alloc_size >> PAGE_SHIFT;
  786. pidx = (read_start / PAGE_SIZE) % subbuf_pages;
  787. poff = read_start & ~PAGE_MASK;
  788. while (avail) {
  789. struct page *p = buf->page_array[pidx];
  790. unsigned int len;
  791. len = PAGE_SIZE - poff;
  792. if (len > avail)
  793. len = avail;
  794. len = actor(desc, p, poff, len);
  795. if (desc->error)
  796. break;
  797. avail -= len;
  798. ret += len;
  799. poff = 0;
  800. pidx = (pidx + 1) % subbuf_pages;
  801. }
  802. return ret;
  803. }
  804. typedef int (*subbuf_actor_t) (size_t read_start,
  805. struct rchan_buf *buf,
  806. size_t avail,
  807. read_descriptor_t *desc,
  808. read_actor_t actor);
  809. /**
  810. * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
  811. */
  812. static inline ssize_t relay_file_read_subbufs(struct file *filp,
  813. loff_t *ppos,
  814. size_t count,
  815. subbuf_actor_t subbuf_actor,
  816. read_actor_t actor,
  817. void *target)
  818. {
  819. struct rchan_buf *buf = filp->private_data;
  820. size_t read_start, avail;
  821. read_descriptor_t desc;
  822. int ret;
  823. if (!count)
  824. return 0;
  825. desc.written = 0;
  826. desc.count = count;
  827. desc.arg.data = target;
  828. desc.error = 0;
  829. mutex_lock(&filp->f_dentry->d_inode->i_mutex);
  830. do {
  831. if (!relay_file_read_avail(buf, *ppos))
  832. break;
  833. read_start = relay_file_read_start_pos(*ppos, buf);
  834. avail = relay_file_read_subbuf_avail(read_start, buf);
  835. if (!avail)
  836. break;
  837. avail = min(desc.count, avail);
  838. ret = subbuf_actor(read_start, buf, avail, &desc, actor);
  839. if (desc.error < 0)
  840. break;
  841. if (ret) {
  842. relay_file_read_consume(buf, read_start, ret);
  843. *ppos = relay_file_read_end_pos(buf, read_start, ret);
  844. }
  845. } while (desc.count && ret);
  846. mutex_unlock(&filp->f_dentry->d_inode->i_mutex);
  847. return desc.written;
  848. }
  849. static ssize_t relay_file_read(struct file *filp,
  850. char __user *buffer,
  851. size_t count,
  852. loff_t *ppos)
  853. {
  854. return relay_file_read_subbufs(filp, ppos, count, subbuf_read_actor,
  855. NULL, buffer);
  856. }
  857. static ssize_t relay_file_sendfile(struct file *filp,
  858. loff_t *ppos,
  859. size_t count,
  860. read_actor_t actor,
  861. void *target)
  862. {
  863. return relay_file_read_subbufs(filp, ppos, count, subbuf_send_actor,
  864. actor, target);
  865. }
  866. struct file_operations relay_file_operations = {
  867. .open = relay_file_open,
  868. .poll = relay_file_poll,
  869. .mmap = relay_file_mmap,
  870. .read = relay_file_read,
  871. .llseek = no_llseek,
  872. .release = relay_file_release,
  873. .sendfile = relay_file_sendfile,
  874. };
  875. EXPORT_SYMBOL_GPL(relay_file_operations);