mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/splice.h>
  26. #include <linux/pfn.h>
  27. #include <linux/export.h>
  28. #include <linux/io.h>
  29. #include <linux/aio.h>
  30. #include <asm/uaccess.h>
  31. #ifdef CONFIG_IA64
  32. # include <linux/efi.h>
  33. #endif
  34. #define DEVPORT_MINOR 4
  35. static inline unsigned long size_inside_page(unsigned long start,
  36. unsigned long size)
  37. {
  38. unsigned long sz;
  39. sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  40. return min(sz, size);
  41. }
  42. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  43. static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  44. {
  45. return addr + count <= __pa(high_memory);
  46. }
  47. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  48. {
  49. return 1;
  50. }
  51. #endif
  52. #ifdef CONFIG_STRICT_DEVMEM
  53. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  54. {
  55. u64 from = ((u64)pfn) << PAGE_SHIFT;
  56. u64 to = from + size;
  57. u64 cursor = from;
  58. while (cursor < to) {
  59. if (!devmem_is_allowed(pfn)) {
  60. printk(KERN_INFO
  61. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  62. current->comm, from, to);
  63. return 0;
  64. }
  65. cursor += PAGE_SIZE;
  66. pfn++;
  67. }
  68. return 1;
  69. }
  70. #else
  71. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  72. {
  73. return 1;
  74. }
  75. #endif
  76. void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  77. {
  78. }
  79. /*
  80. * This funcion reads the *physical* memory. The f_pos points directly to the
  81. * memory location.
  82. */
  83. static ssize_t read_mem(struct file *file, char __user *buf,
  84. size_t count, loff_t *ppos)
  85. {
  86. phys_addr_t p = *ppos;
  87. ssize_t read, sz;
  88. char *ptr;
  89. if (!valid_phys_addr_range(p, count))
  90. return -EFAULT;
  91. read = 0;
  92. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  93. /* we don't have page 0 mapped on sparc and m68k.. */
  94. if (p < PAGE_SIZE) {
  95. sz = size_inside_page(p, count);
  96. if (sz > 0) {
  97. if (clear_user(buf, sz))
  98. return -EFAULT;
  99. buf += sz;
  100. p += sz;
  101. count -= sz;
  102. read += sz;
  103. }
  104. }
  105. #endif
  106. while (count > 0) {
  107. unsigned long remaining;
  108. sz = size_inside_page(p, count);
  109. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  110. return -EPERM;
  111. /*
  112. * On ia64 if a page has been mapped somewhere as uncached, then
  113. * it must also be accessed uncached by the kernel or data
  114. * corruption may occur.
  115. */
  116. ptr = xlate_dev_mem_ptr(p);
  117. if (!ptr)
  118. return -EFAULT;
  119. remaining = copy_to_user(buf, ptr, sz);
  120. unxlate_dev_mem_ptr(p, ptr);
  121. if (remaining)
  122. return -EFAULT;
  123. buf += sz;
  124. p += sz;
  125. count -= sz;
  126. read += sz;
  127. }
  128. *ppos += read;
  129. return read;
  130. }
  131. static ssize_t write_mem(struct file *file, const char __user *buf,
  132. size_t count, loff_t *ppos)
  133. {
  134. phys_addr_t p = *ppos;
  135. ssize_t written, sz;
  136. unsigned long copied;
  137. void *ptr;
  138. if (!valid_phys_addr_range(p, count))
  139. return -EFAULT;
  140. written = 0;
  141. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  142. /* we don't have page 0 mapped on sparc and m68k.. */
  143. if (p < PAGE_SIZE) {
  144. sz = size_inside_page(p, count);
  145. /* Hmm. Do something? */
  146. buf += sz;
  147. p += sz;
  148. count -= sz;
  149. written += sz;
  150. }
  151. #endif
  152. while (count > 0) {
  153. sz = size_inside_page(p, count);
  154. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  155. return -EPERM;
  156. /*
  157. * On ia64 if a page has been mapped somewhere as uncached, then
  158. * it must also be accessed uncached by the kernel or data
  159. * corruption may occur.
  160. */
  161. ptr = xlate_dev_mem_ptr(p);
  162. if (!ptr) {
  163. if (written)
  164. break;
  165. return -EFAULT;
  166. }
  167. copied = copy_from_user(ptr, buf, sz);
  168. unxlate_dev_mem_ptr(p, ptr);
  169. if (copied) {
  170. written += sz - copied;
  171. if (written)
  172. break;
  173. return -EFAULT;
  174. }
  175. buf += sz;
  176. p += sz;
  177. count -= sz;
  178. written += sz;
  179. }
  180. *ppos += written;
  181. return written;
  182. }
  183. int __weak phys_mem_access_prot_allowed(struct file *file,
  184. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  185. {
  186. return 1;
  187. }
  188. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  189. /*
  190. * Architectures vary in how they handle caching for addresses
  191. * outside of main memory.
  192. *
  193. */
  194. #ifdef pgprot_noncached
  195. static int uncached_access(struct file *file, phys_addr_t addr)
  196. {
  197. #if defined(CONFIG_IA64)
  198. /*
  199. * On ia64, we ignore O_DSYNC because we cannot tolerate memory
  200. * attribute aliases.
  201. */
  202. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  203. #elif defined(CONFIG_MIPS)
  204. {
  205. extern int __uncached_access(struct file *file,
  206. unsigned long addr);
  207. return __uncached_access(file, addr);
  208. }
  209. #else
  210. /*
  211. * Accessing memory above the top the kernel knows about or through a
  212. * file pointer
  213. * that was marked O_DSYNC will be done non-cached.
  214. */
  215. if (file->f_flags & O_DSYNC)
  216. return 1;
  217. return addr >= __pa(high_memory);
  218. #endif
  219. }
  220. #endif
  221. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  222. unsigned long size, pgprot_t vma_prot)
  223. {
  224. #ifdef pgprot_noncached
  225. phys_addr_t offset = pfn << PAGE_SHIFT;
  226. if (uncached_access(file, offset))
  227. return pgprot_noncached(vma_prot);
  228. #endif
  229. return vma_prot;
  230. }
  231. #endif
  232. #ifndef CONFIG_MMU
  233. static unsigned long get_unmapped_area_mem(struct file *file,
  234. unsigned long addr,
  235. unsigned long len,
  236. unsigned long pgoff,
  237. unsigned long flags)
  238. {
  239. if (!valid_mmap_phys_addr_range(pgoff, len))
  240. return (unsigned long) -EINVAL;
  241. return pgoff << PAGE_SHIFT;
  242. }
  243. /* can't do an in-place private mapping if there's no MMU */
  244. static inline int private_mapping_ok(struct vm_area_struct *vma)
  245. {
  246. return vma->vm_flags & VM_MAYSHARE;
  247. }
  248. #else
  249. #define get_unmapped_area_mem NULL
  250. static inline int private_mapping_ok(struct vm_area_struct *vma)
  251. {
  252. return 1;
  253. }
  254. #endif
  255. static const struct vm_operations_struct mmap_mem_ops = {
  256. #ifdef CONFIG_HAVE_IOREMAP_PROT
  257. .access = generic_access_phys
  258. #endif
  259. };
  260. static int mmap_mem(struct file *file, struct vm_area_struct *vma)
  261. {
  262. size_t size = vma->vm_end - vma->vm_start;
  263. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  264. return -EINVAL;
  265. if (!private_mapping_ok(vma))
  266. return -ENOSYS;
  267. if (!range_is_allowed(vma->vm_pgoff, size))
  268. return -EPERM;
  269. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  270. &vma->vm_page_prot))
  271. return -EINVAL;
  272. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  273. size,
  274. vma->vm_page_prot);
  275. vma->vm_ops = &mmap_mem_ops;
  276. /* Remap-pfn-range will mark the range VM_IO */
  277. if (remap_pfn_range(vma,
  278. vma->vm_start,
  279. vma->vm_pgoff,
  280. size,
  281. vma->vm_page_prot)) {
  282. return -EAGAIN;
  283. }
  284. return 0;
  285. }
  286. #ifdef CONFIG_DEVKMEM
  287. static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
  288. {
  289. unsigned long pfn;
  290. /* Turn a kernel-virtual address into a physical page frame */
  291. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  292. /*
  293. * RED-PEN: on some architectures there is more mapped memory than
  294. * available in mem_map which pfn_valid checks for. Perhaps should add a
  295. * new macro here.
  296. *
  297. * RED-PEN: vmalloc is not supported right now.
  298. */
  299. if (!pfn_valid(pfn))
  300. return -EIO;
  301. vma->vm_pgoff = pfn;
  302. return mmap_mem(file, vma);
  303. }
  304. #endif
  305. #ifdef CONFIG_DEVKMEM
  306. /*
  307. * This function reads the *virtual* memory as seen by the kernel.
  308. */
  309. static ssize_t read_kmem(struct file *file, char __user *buf,
  310. size_t count, loff_t *ppos)
  311. {
  312. unsigned long p = *ppos;
  313. ssize_t low_count, read, sz;
  314. char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  315. int err = 0;
  316. read = 0;
  317. if (p < (unsigned long) high_memory) {
  318. low_count = count;
  319. if (count > (unsigned long)high_memory - p)
  320. low_count = (unsigned long)high_memory - p;
  321. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  322. /* we don't have page 0 mapped on sparc and m68k.. */
  323. if (p < PAGE_SIZE && low_count > 0) {
  324. sz = size_inside_page(p, low_count);
  325. if (clear_user(buf, sz))
  326. return -EFAULT;
  327. buf += sz;
  328. p += sz;
  329. read += sz;
  330. low_count -= sz;
  331. count -= sz;
  332. }
  333. #endif
  334. while (low_count > 0) {
  335. sz = size_inside_page(p, low_count);
  336. /*
  337. * On ia64 if a page has been mapped somewhere as
  338. * uncached, then it must also be accessed uncached
  339. * by the kernel or data corruption may occur
  340. */
  341. kbuf = xlate_dev_kmem_ptr((char *)p);
  342. if (copy_to_user(buf, kbuf, sz))
  343. return -EFAULT;
  344. buf += sz;
  345. p += sz;
  346. read += sz;
  347. low_count -= sz;
  348. count -= sz;
  349. }
  350. }
  351. if (count > 0) {
  352. kbuf = (char *)__get_free_page(GFP_KERNEL);
  353. if (!kbuf)
  354. return -ENOMEM;
  355. while (count > 0) {
  356. sz = size_inside_page(p, count);
  357. if (!is_vmalloc_or_module_addr((void *)p)) {
  358. err = -ENXIO;
  359. break;
  360. }
  361. sz = vread(kbuf, (char *)p, sz);
  362. if (!sz)
  363. break;
  364. if (copy_to_user(buf, kbuf, sz)) {
  365. err = -EFAULT;
  366. break;
  367. }
  368. count -= sz;
  369. buf += sz;
  370. read += sz;
  371. p += sz;
  372. }
  373. free_page((unsigned long)kbuf);
  374. }
  375. *ppos = p;
  376. return read ? read : err;
  377. }
  378. static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
  379. size_t count, loff_t *ppos)
  380. {
  381. ssize_t written, sz;
  382. unsigned long copied;
  383. written = 0;
  384. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  385. /* we don't have page 0 mapped on sparc and m68k.. */
  386. if (p < PAGE_SIZE) {
  387. sz = size_inside_page(p, count);
  388. /* Hmm. Do something? */
  389. buf += sz;
  390. p += sz;
  391. count -= sz;
  392. written += sz;
  393. }
  394. #endif
  395. while (count > 0) {
  396. char *ptr;
  397. sz = size_inside_page(p, count);
  398. /*
  399. * On ia64 if a page has been mapped somewhere as uncached, then
  400. * it must also be accessed uncached by the kernel or data
  401. * corruption may occur.
  402. */
  403. ptr = xlate_dev_kmem_ptr((char *)p);
  404. copied = copy_from_user(ptr, buf, sz);
  405. if (copied) {
  406. written += sz - copied;
  407. if (written)
  408. break;
  409. return -EFAULT;
  410. }
  411. buf += sz;
  412. p += sz;
  413. count -= sz;
  414. written += sz;
  415. }
  416. *ppos += written;
  417. return written;
  418. }
  419. /*
  420. * This function writes to the *virtual* memory as seen by the kernel.
  421. */
  422. static ssize_t write_kmem(struct file *file, const char __user *buf,
  423. size_t count, loff_t *ppos)
  424. {
  425. unsigned long p = *ppos;
  426. ssize_t wrote = 0;
  427. ssize_t virtr = 0;
  428. char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  429. int err = 0;
  430. if (p < (unsigned long) high_memory) {
  431. unsigned long to_write = min_t(unsigned long, count,
  432. (unsigned long)high_memory - p);
  433. wrote = do_write_kmem(p, buf, to_write, ppos);
  434. if (wrote != to_write)
  435. return wrote;
  436. p += wrote;
  437. buf += wrote;
  438. count -= wrote;
  439. }
  440. if (count > 0) {
  441. kbuf = (char *)__get_free_page(GFP_KERNEL);
  442. if (!kbuf)
  443. return wrote ? wrote : -ENOMEM;
  444. while (count > 0) {
  445. unsigned long sz = size_inside_page(p, count);
  446. unsigned long n;
  447. if (!is_vmalloc_or_module_addr((void *)p)) {
  448. err = -ENXIO;
  449. break;
  450. }
  451. n = copy_from_user(kbuf, buf, sz);
  452. if (n) {
  453. err = -EFAULT;
  454. break;
  455. }
  456. vwrite(kbuf, (char *)p, sz);
  457. count -= sz;
  458. buf += sz;
  459. virtr += sz;
  460. p += sz;
  461. }
  462. free_page((unsigned long)kbuf);
  463. }
  464. *ppos = p;
  465. return virtr + wrote ? : err;
  466. }
  467. #endif
  468. #ifdef CONFIG_DEVPORT
  469. static ssize_t read_port(struct file *file, char __user *buf,
  470. size_t count, loff_t *ppos)
  471. {
  472. unsigned long i = *ppos;
  473. char __user *tmp = buf;
  474. if (!access_ok(VERIFY_WRITE, buf, count))
  475. return -EFAULT;
  476. while (count-- > 0 && i < 65536) {
  477. if (__put_user(inb(i), tmp) < 0)
  478. return -EFAULT;
  479. i++;
  480. tmp++;
  481. }
  482. *ppos = i;
  483. return tmp-buf;
  484. }
  485. static ssize_t write_port(struct file *file, const char __user *buf,
  486. size_t count, loff_t *ppos)
  487. {
  488. unsigned long i = *ppos;
  489. const char __user *tmp = buf;
  490. if (!access_ok(VERIFY_READ, buf, count))
  491. return -EFAULT;
  492. while (count-- > 0 && i < 65536) {
  493. char c;
  494. if (__get_user(c, tmp)) {
  495. if (tmp > buf)
  496. break;
  497. return -EFAULT;
  498. }
  499. outb(c, i);
  500. i++;
  501. tmp++;
  502. }
  503. *ppos = i;
  504. return tmp-buf;
  505. }
  506. #endif
  507. static ssize_t read_null(struct file *file, char __user *buf,
  508. size_t count, loff_t *ppos)
  509. {
  510. return 0;
  511. }
  512. static ssize_t write_null(struct file *file, const char __user *buf,
  513. size_t count, loff_t *ppos)
  514. {
  515. return count;
  516. }
  517. static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
  518. unsigned long nr_segs, loff_t pos)
  519. {
  520. return 0;
  521. }
  522. static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
  523. unsigned long nr_segs, loff_t pos)
  524. {
  525. return iov_length(iov, nr_segs);
  526. }
  527. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  528. struct splice_desc *sd)
  529. {
  530. return sd->len;
  531. }
  532. static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
  533. loff_t *ppos, size_t len, unsigned int flags)
  534. {
  535. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  536. }
  537. static ssize_t read_zero(struct file *file, char __user *buf,
  538. size_t count, loff_t *ppos)
  539. {
  540. size_t written;
  541. if (!count)
  542. return 0;
  543. if (!access_ok(VERIFY_WRITE, buf, count))
  544. return -EFAULT;
  545. written = 0;
  546. while (count) {
  547. unsigned long unwritten;
  548. size_t chunk = count;
  549. if (chunk > PAGE_SIZE)
  550. chunk = PAGE_SIZE; /* Just for latency reasons */
  551. unwritten = __clear_user(buf, chunk);
  552. written += chunk - unwritten;
  553. if (unwritten)
  554. break;
  555. if (signal_pending(current))
  556. return written ? written : -ERESTARTSYS;
  557. buf += chunk;
  558. count -= chunk;
  559. cond_resched();
  560. }
  561. return written ? written : -EFAULT;
  562. }
  563. static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
  564. unsigned long nr_segs, loff_t pos)
  565. {
  566. size_t written = 0;
  567. unsigned long i;
  568. ssize_t ret;
  569. for (i = 0; i < nr_segs; i++) {
  570. ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
  571. &pos);
  572. if (ret < 0)
  573. break;
  574. written += ret;
  575. }
  576. return written ? written : -EFAULT;
  577. }
  578. static int mmap_zero(struct file *file, struct vm_area_struct *vma)
  579. {
  580. #ifndef CONFIG_MMU
  581. return -ENOSYS;
  582. #endif
  583. if (vma->vm_flags & VM_SHARED)
  584. return shmem_zero_setup(vma);
  585. return 0;
  586. }
  587. static ssize_t write_full(struct file *file, const char __user *buf,
  588. size_t count, loff_t *ppos)
  589. {
  590. return -ENOSPC;
  591. }
  592. /*
  593. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  594. * can fopen() both devices with "a" now. This was previously impossible.
  595. * -- SRB.
  596. */
  597. static loff_t null_lseek(struct file *file, loff_t offset, int orig)
  598. {
  599. return file->f_pos = 0;
  600. }
  601. /*
  602. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  603. * check against negative addresses: they are ok. The return value is weird,
  604. * though, in that case (0).
  605. *
  606. * also note that seeking relative to the "end of file" isn't supported:
  607. * it has no meaning, so it returns -EINVAL.
  608. */
  609. static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
  610. {
  611. loff_t ret;
  612. mutex_lock(&file_inode(file)->i_mutex);
  613. switch (orig) {
  614. case SEEK_CUR:
  615. offset += file->f_pos;
  616. case SEEK_SET:
  617. /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
  618. if (IS_ERR_VALUE((unsigned long long)offset)) {
  619. ret = -EOVERFLOW;
  620. break;
  621. }
  622. file->f_pos = offset;
  623. ret = file->f_pos;
  624. force_successful_syscall_return();
  625. break;
  626. default:
  627. ret = -EINVAL;
  628. }
  629. mutex_unlock(&file_inode(file)->i_mutex);
  630. return ret;
  631. }
  632. static int open_port(struct inode *inode, struct file *filp)
  633. {
  634. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  635. }
  636. #define zero_lseek null_lseek
  637. #define full_lseek null_lseek
  638. #define write_zero write_null
  639. #define read_full read_zero
  640. #define aio_write_zero aio_write_null
  641. #define open_mem open_port
  642. #define open_kmem open_mem
  643. static const struct file_operations mem_fops = {
  644. .llseek = memory_lseek,
  645. .read = read_mem,
  646. .write = write_mem,
  647. .mmap = mmap_mem,
  648. .open = open_mem,
  649. .get_unmapped_area = get_unmapped_area_mem,
  650. };
  651. #ifdef CONFIG_DEVKMEM
  652. static const struct file_operations kmem_fops = {
  653. .llseek = memory_lseek,
  654. .read = read_kmem,
  655. .write = write_kmem,
  656. .mmap = mmap_kmem,
  657. .open = open_kmem,
  658. .get_unmapped_area = get_unmapped_area_mem,
  659. };
  660. #endif
  661. static const struct file_operations null_fops = {
  662. .llseek = null_lseek,
  663. .read = read_null,
  664. .write = write_null,
  665. .aio_read = aio_read_null,
  666. .aio_write = aio_write_null,
  667. .splice_write = splice_write_null,
  668. };
  669. #ifdef CONFIG_DEVPORT
  670. static const struct file_operations port_fops = {
  671. .llseek = memory_lseek,
  672. .read = read_port,
  673. .write = write_port,
  674. .open = open_port,
  675. };
  676. #endif
  677. static const struct file_operations zero_fops = {
  678. .llseek = zero_lseek,
  679. .read = read_zero,
  680. .write = write_zero,
  681. .aio_read = aio_read_zero,
  682. .aio_write = aio_write_zero,
  683. .mmap = mmap_zero,
  684. };
  685. /*
  686. * capabilities for /dev/zero
  687. * - permits private mappings, "copies" are taken of the source of zeros
  688. * - no writeback happens
  689. */
  690. static struct backing_dev_info zero_bdi = {
  691. .name = "char/mem",
  692. .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
  693. };
  694. static const struct file_operations full_fops = {
  695. .llseek = full_lseek,
  696. .read = read_full,
  697. .write = write_full,
  698. };
  699. static const struct memdev {
  700. const char *name;
  701. umode_t mode;
  702. const struct file_operations *fops;
  703. struct backing_dev_info *dev_info;
  704. } devlist[] = {
  705. [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
  706. #ifdef CONFIG_DEVKMEM
  707. [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
  708. #endif
  709. [3] = { "null", 0666, &null_fops, NULL },
  710. #ifdef CONFIG_DEVPORT
  711. [4] = { "port", 0, &port_fops, NULL },
  712. #endif
  713. [5] = { "zero", 0666, &zero_fops, &zero_bdi },
  714. [7] = { "full", 0666, &full_fops, NULL },
  715. [8] = { "random", 0666, &random_fops, NULL },
  716. [9] = { "urandom", 0666, &urandom_fops, NULL },
  717. #ifdef CONFIG_PRINTK
  718. [11] = { "kmsg", 0644, &kmsg_fops, NULL },
  719. #endif
  720. };
  721. static int memory_open(struct inode *inode, struct file *filp)
  722. {
  723. int minor;
  724. const struct memdev *dev;
  725. minor = iminor(inode);
  726. if (minor >= ARRAY_SIZE(devlist))
  727. return -ENXIO;
  728. dev = &devlist[minor];
  729. if (!dev->fops)
  730. return -ENXIO;
  731. filp->f_op = dev->fops;
  732. if (dev->dev_info)
  733. filp->f_mapping->backing_dev_info = dev->dev_info;
  734. /* Is /dev/mem or /dev/kmem ? */
  735. if (dev->dev_info == &directly_mappable_cdev_bdi)
  736. filp->f_mode |= FMODE_UNSIGNED_OFFSET;
  737. if (dev->fops->open)
  738. return dev->fops->open(inode, filp);
  739. return 0;
  740. }
  741. static const struct file_operations memory_fops = {
  742. .open = memory_open,
  743. .llseek = noop_llseek,
  744. };
  745. static char *mem_devnode(struct device *dev, umode_t *mode)
  746. {
  747. if (mode && devlist[MINOR(dev->devt)].mode)
  748. *mode = devlist[MINOR(dev->devt)].mode;
  749. return NULL;
  750. }
  751. static struct class *mem_class;
  752. static int __init chr_dev_init(void)
  753. {
  754. int minor;
  755. int err;
  756. err = bdi_init(&zero_bdi);
  757. if (err)
  758. return err;
  759. if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
  760. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  761. mem_class = class_create(THIS_MODULE, "mem");
  762. if (IS_ERR(mem_class))
  763. return PTR_ERR(mem_class);
  764. mem_class->devnode = mem_devnode;
  765. for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
  766. if (!devlist[minor].name)
  767. continue;
  768. /*
  769. * Create /dev/port?
  770. */
  771. if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
  772. continue;
  773. device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
  774. NULL, devlist[minor].name);
  775. }
  776. return tty_init();
  777. }
  778. fs_initcall(chr_dev_init);