mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/splice.h>
  27. #include <linux/pfn.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. #ifdef CONFIG_IA64
  31. # include <linux/efi.h>
  32. #endif
  33. static inline unsigned long size_inside_page(unsigned long start,
  34. unsigned long size)
  35. {
  36. unsigned long sz;
  37. sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  38. return min(sz, size);
  39. }
  40. /*
  41. * Architectures vary in how they handle caching for addresses
  42. * outside of main memory.
  43. *
  44. */
  45. static inline int uncached_access(struct file *file, unsigned long addr)
  46. {
  47. #if defined(CONFIG_IA64)
  48. /*
  49. * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
  50. */
  51. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  52. #elif defined(CONFIG_MIPS)
  53. {
  54. extern int __uncached_access(struct file *file,
  55. unsigned long addr);
  56. return __uncached_access(file, addr);
  57. }
  58. #else
  59. /*
  60. * Accessing memory above the top the kernel knows about or through a file pointer
  61. * that was marked O_DSYNC will be done non-cached.
  62. */
  63. if (file->f_flags & O_DSYNC)
  64. return 1;
  65. return addr >= __pa(high_memory);
  66. #endif
  67. }
  68. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  69. static inline int valid_phys_addr_range(unsigned long addr, size_t count)
  70. {
  71. if (addr + count > __pa(high_memory))
  72. return 0;
  73. return 1;
  74. }
  75. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  76. {
  77. return 1;
  78. }
  79. #endif
  80. #ifdef CONFIG_STRICT_DEVMEM
  81. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  82. {
  83. u64 from = ((u64)pfn) << PAGE_SHIFT;
  84. u64 to = from + size;
  85. u64 cursor = from;
  86. while (cursor < to) {
  87. if (!devmem_is_allowed(pfn)) {
  88. printk(KERN_INFO
  89. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  90. current->comm, from, to);
  91. return 0;
  92. }
  93. cursor += PAGE_SIZE;
  94. pfn++;
  95. }
  96. return 1;
  97. }
  98. #else
  99. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  100. {
  101. return 1;
  102. }
  103. #endif
  104. void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  105. {
  106. }
  107. /*
  108. * This funcion reads the *physical* memory. The f_pos points directly to the
  109. * memory location.
  110. */
  111. static ssize_t read_mem(struct file * file, char __user * buf,
  112. size_t count, loff_t *ppos)
  113. {
  114. unsigned long p = *ppos;
  115. ssize_t read, sz;
  116. char *ptr;
  117. if (!valid_phys_addr_range(p, count))
  118. return -EFAULT;
  119. read = 0;
  120. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  121. /* we don't have page 0 mapped on sparc and m68k.. */
  122. if (p < PAGE_SIZE) {
  123. sz = size_inside_page(p, count);
  124. if (sz > 0) {
  125. if (clear_user(buf, sz))
  126. return -EFAULT;
  127. buf += sz;
  128. p += sz;
  129. count -= sz;
  130. read += sz;
  131. }
  132. }
  133. #endif
  134. while (count > 0) {
  135. unsigned long remaining;
  136. sz = size_inside_page(p, count);
  137. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  138. return -EPERM;
  139. /*
  140. * On ia64 if a page has been mapped somewhere as
  141. * uncached, then it must also be accessed uncached
  142. * by the kernel or data corruption may occur
  143. */
  144. ptr = xlate_dev_mem_ptr(p);
  145. if (!ptr)
  146. return -EFAULT;
  147. remaining = copy_to_user(buf, ptr, sz);
  148. unxlate_dev_mem_ptr(p, ptr);
  149. if (remaining)
  150. return -EFAULT;
  151. buf += sz;
  152. p += sz;
  153. count -= sz;
  154. read += sz;
  155. }
  156. *ppos += read;
  157. return read;
  158. }
  159. static ssize_t write_mem(struct file * file, const char __user * buf,
  160. size_t count, loff_t *ppos)
  161. {
  162. unsigned long p = *ppos;
  163. ssize_t written, sz;
  164. unsigned long copied;
  165. void *ptr;
  166. if (!valid_phys_addr_range(p, count))
  167. return -EFAULT;
  168. written = 0;
  169. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  170. /* we don't have page 0 mapped on sparc and m68k.. */
  171. if (p < PAGE_SIZE) {
  172. sz = size_inside_page(p, count);
  173. /* Hmm. Do something? */
  174. buf += sz;
  175. p += sz;
  176. count -= sz;
  177. written += sz;
  178. }
  179. #endif
  180. while (count > 0) {
  181. sz = size_inside_page(p, count);
  182. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  183. return -EPERM;
  184. /*
  185. * On ia64 if a page has been mapped somewhere as
  186. * uncached, then it must also be accessed uncached
  187. * by the kernel or data corruption may occur
  188. */
  189. ptr = xlate_dev_mem_ptr(p);
  190. if (!ptr) {
  191. if (written)
  192. break;
  193. return -EFAULT;
  194. }
  195. copied = copy_from_user(ptr, buf, sz);
  196. unxlate_dev_mem_ptr(p, ptr);
  197. if (copied) {
  198. written += sz - copied;
  199. if (written)
  200. break;
  201. return -EFAULT;
  202. }
  203. buf += sz;
  204. p += sz;
  205. count -= sz;
  206. written += sz;
  207. }
  208. *ppos += written;
  209. return written;
  210. }
  211. int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
  212. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  213. {
  214. return 1;
  215. }
  216. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  217. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  218. unsigned long size, pgprot_t vma_prot)
  219. {
  220. #ifdef pgprot_noncached
  221. unsigned long offset = pfn << PAGE_SHIFT;
  222. if (uncached_access(file, offset))
  223. return pgprot_noncached(vma_prot);
  224. #endif
  225. return vma_prot;
  226. }
  227. #endif
  228. #ifndef CONFIG_MMU
  229. static unsigned long get_unmapped_area_mem(struct file *file,
  230. unsigned long addr,
  231. unsigned long len,
  232. unsigned long pgoff,
  233. unsigned long flags)
  234. {
  235. if (!valid_mmap_phys_addr_range(pgoff, len))
  236. return (unsigned long) -EINVAL;
  237. return pgoff << PAGE_SHIFT;
  238. }
  239. /* can't do an in-place private mapping if there's no MMU */
  240. static inline int private_mapping_ok(struct vm_area_struct *vma)
  241. {
  242. return vma->vm_flags & VM_MAYSHARE;
  243. }
  244. #else
  245. #define get_unmapped_area_mem NULL
  246. static inline int private_mapping_ok(struct vm_area_struct *vma)
  247. {
  248. return 1;
  249. }
  250. #endif
  251. static const struct vm_operations_struct mmap_mem_ops = {
  252. #ifdef CONFIG_HAVE_IOREMAP_PROT
  253. .access = generic_access_phys
  254. #endif
  255. };
  256. static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  257. {
  258. size_t size = vma->vm_end - vma->vm_start;
  259. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  260. return -EINVAL;
  261. if (!private_mapping_ok(vma))
  262. return -ENOSYS;
  263. if (!range_is_allowed(vma->vm_pgoff, size))
  264. return -EPERM;
  265. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  266. &vma->vm_page_prot))
  267. return -EINVAL;
  268. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  269. size,
  270. vma->vm_page_prot);
  271. vma->vm_ops = &mmap_mem_ops;
  272. /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
  273. if (remap_pfn_range(vma,
  274. vma->vm_start,
  275. vma->vm_pgoff,
  276. size,
  277. vma->vm_page_prot)) {
  278. return -EAGAIN;
  279. }
  280. return 0;
  281. }
  282. #ifdef CONFIG_DEVKMEM
  283. static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
  284. {
  285. unsigned long pfn;
  286. /* Turn a kernel-virtual address into a physical page frame */
  287. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  288. /*
  289. * RED-PEN: on some architectures there is more mapped memory
  290. * than available in mem_map which pfn_valid checks
  291. * for. Perhaps should add a new macro here.
  292. *
  293. * RED-PEN: vmalloc is not supported right now.
  294. */
  295. if (!pfn_valid(pfn))
  296. return -EIO;
  297. vma->vm_pgoff = pfn;
  298. return mmap_mem(file, vma);
  299. }
  300. #endif
  301. #ifdef CONFIG_CRASH_DUMP
  302. /*
  303. * Read memory corresponding to the old kernel.
  304. */
  305. static ssize_t read_oldmem(struct file *file, char __user *buf,
  306. size_t count, loff_t *ppos)
  307. {
  308. unsigned long pfn, offset;
  309. size_t read = 0, csize;
  310. int rc = 0;
  311. while (count) {
  312. pfn = *ppos / PAGE_SIZE;
  313. if (pfn > saved_max_pfn)
  314. return read;
  315. offset = (unsigned long)(*ppos % PAGE_SIZE);
  316. if (count > PAGE_SIZE - offset)
  317. csize = PAGE_SIZE - offset;
  318. else
  319. csize = count;
  320. rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
  321. if (rc < 0)
  322. return rc;
  323. buf += csize;
  324. *ppos += csize;
  325. read += csize;
  326. count -= csize;
  327. }
  328. return read;
  329. }
  330. #endif
  331. #ifdef CONFIG_DEVKMEM
  332. /*
  333. * This function reads the *virtual* memory as seen by the kernel.
  334. */
  335. static ssize_t read_kmem(struct file *file, char __user *buf,
  336. size_t count, loff_t *ppos)
  337. {
  338. unsigned long p = *ppos;
  339. ssize_t low_count, read, sz;
  340. char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  341. read = 0;
  342. if (p < (unsigned long) high_memory) {
  343. low_count = count;
  344. if (count > (unsigned long) high_memory - p)
  345. low_count = (unsigned long) high_memory - p;
  346. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  347. /* we don't have page 0 mapped on sparc and m68k.. */
  348. if (p < PAGE_SIZE && low_count > 0) {
  349. sz = size_inside_page(p, low_count);
  350. if (clear_user(buf, sz))
  351. return -EFAULT;
  352. buf += sz;
  353. p += sz;
  354. read += sz;
  355. low_count -= sz;
  356. count -= sz;
  357. }
  358. #endif
  359. while (low_count > 0) {
  360. sz = size_inside_page(p, low_count);
  361. /*
  362. * On ia64 if a page has been mapped somewhere as
  363. * uncached, then it must also be accessed uncached
  364. * by the kernel or data corruption may occur
  365. */
  366. kbuf = xlate_dev_kmem_ptr((char *)p);
  367. if (copy_to_user(buf, kbuf, sz))
  368. return -EFAULT;
  369. buf += sz;
  370. p += sz;
  371. read += sz;
  372. low_count -= sz;
  373. count -= sz;
  374. }
  375. }
  376. if (count > 0) {
  377. kbuf = (char *)__get_free_page(GFP_KERNEL);
  378. if (!kbuf)
  379. return -ENOMEM;
  380. while (count > 0) {
  381. sz = size_inside_page(p, count);
  382. sz = vread(kbuf, (char *)p, sz);
  383. if (!sz)
  384. break;
  385. if (copy_to_user(buf, kbuf, sz)) {
  386. free_page((unsigned long)kbuf);
  387. return -EFAULT;
  388. }
  389. count -= sz;
  390. buf += sz;
  391. read += sz;
  392. p += sz;
  393. }
  394. free_page((unsigned long)kbuf);
  395. }
  396. *ppos = p;
  397. return read;
  398. }
  399. static inline ssize_t
  400. do_write_kmem(unsigned long p, const char __user *buf,
  401. size_t count, loff_t *ppos)
  402. {
  403. ssize_t written, sz;
  404. unsigned long copied;
  405. written = 0;
  406. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  407. /* we don't have page 0 mapped on sparc and m68k.. */
  408. if (p < PAGE_SIZE) {
  409. sz = size_inside_page(p, count);
  410. /* Hmm. Do something? */
  411. buf += sz;
  412. p += sz;
  413. count -= sz;
  414. written += sz;
  415. }
  416. #endif
  417. while (count > 0) {
  418. char *ptr;
  419. sz = size_inside_page(p, count);
  420. /*
  421. * On ia64 if a page has been mapped somewhere as
  422. * uncached, then it must also be accessed uncached
  423. * by the kernel or data corruption may occur
  424. */
  425. ptr = xlate_dev_kmem_ptr((char *)p);
  426. copied = copy_from_user(ptr, buf, sz);
  427. if (copied) {
  428. written += sz - copied;
  429. if (written)
  430. break;
  431. return -EFAULT;
  432. }
  433. buf += sz;
  434. p += sz;
  435. count -= sz;
  436. written += sz;
  437. }
  438. *ppos += written;
  439. return written;
  440. }
  441. /*
  442. * This function writes to the *virtual* memory as seen by the kernel.
  443. */
  444. static ssize_t write_kmem(struct file * file, const char __user * buf,
  445. size_t count, loff_t *ppos)
  446. {
  447. unsigned long p = *ppos;
  448. ssize_t wrote = 0;
  449. ssize_t virtr = 0;
  450. char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  451. if (p < (unsigned long) high_memory) {
  452. unsigned long to_write = min_t(unsigned long, count,
  453. (unsigned long)high_memory - p);
  454. wrote = do_write_kmem(p, buf, to_write, ppos);
  455. if (wrote != to_write)
  456. return wrote;
  457. p += wrote;
  458. buf += wrote;
  459. count -= wrote;
  460. }
  461. if (count > 0) {
  462. kbuf = (char *)__get_free_page(GFP_KERNEL);
  463. if (!kbuf)
  464. return wrote ? wrote : -ENOMEM;
  465. while (count > 0) {
  466. unsigned long sz = size_inside_page(p, count);
  467. unsigned long n;
  468. n = copy_from_user(kbuf, buf, sz);
  469. if (n) {
  470. if (wrote + virtr)
  471. break;
  472. free_page((unsigned long)kbuf);
  473. return -EFAULT;
  474. }
  475. sz = vwrite(kbuf, (char *)p, sz);
  476. count -= sz;
  477. buf += sz;
  478. virtr += sz;
  479. p += sz;
  480. }
  481. free_page((unsigned long)kbuf);
  482. }
  483. *ppos = p;
  484. return virtr + wrote;
  485. }
  486. #endif
  487. #ifdef CONFIG_DEVPORT
  488. static ssize_t read_port(struct file * file, char __user * buf,
  489. size_t count, loff_t *ppos)
  490. {
  491. unsigned long i = *ppos;
  492. char __user *tmp = buf;
  493. if (!access_ok(VERIFY_WRITE, buf, count))
  494. return -EFAULT;
  495. while (count-- > 0 && i < 65536) {
  496. if (__put_user(inb(i),tmp) < 0)
  497. return -EFAULT;
  498. i++;
  499. tmp++;
  500. }
  501. *ppos = i;
  502. return tmp-buf;
  503. }
  504. static ssize_t write_port(struct file * file, const char __user * buf,
  505. size_t count, loff_t *ppos)
  506. {
  507. unsigned long i = *ppos;
  508. const char __user * tmp = buf;
  509. if (!access_ok(VERIFY_READ,buf,count))
  510. return -EFAULT;
  511. while (count-- > 0 && i < 65536) {
  512. char c;
  513. if (__get_user(c, tmp)) {
  514. if (tmp > buf)
  515. break;
  516. return -EFAULT;
  517. }
  518. outb(c,i);
  519. i++;
  520. tmp++;
  521. }
  522. *ppos = i;
  523. return tmp-buf;
  524. }
  525. #endif
  526. static ssize_t read_null(struct file * file, char __user * buf,
  527. size_t count, loff_t *ppos)
  528. {
  529. return 0;
  530. }
  531. static ssize_t write_null(struct file * file, const char __user * buf,
  532. size_t count, loff_t *ppos)
  533. {
  534. return count;
  535. }
  536. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  537. struct splice_desc *sd)
  538. {
  539. return sd->len;
  540. }
  541. static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
  542. loff_t *ppos, size_t len, unsigned int flags)
  543. {
  544. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  545. }
  546. static ssize_t read_zero(struct file * file, char __user * buf,
  547. size_t count, loff_t *ppos)
  548. {
  549. size_t written;
  550. if (!count)
  551. return 0;
  552. if (!access_ok(VERIFY_WRITE, buf, count))
  553. return -EFAULT;
  554. written = 0;
  555. while (count) {
  556. unsigned long unwritten;
  557. size_t chunk = count;
  558. if (chunk > PAGE_SIZE)
  559. chunk = PAGE_SIZE; /* Just for latency reasons */
  560. unwritten = __clear_user(buf, chunk);
  561. written += chunk - unwritten;
  562. if (unwritten)
  563. break;
  564. if (signal_pending(current))
  565. return written ? written : -ERESTARTSYS;
  566. buf += chunk;
  567. count -= chunk;
  568. cond_resched();
  569. }
  570. return written ? written : -EFAULT;
  571. }
  572. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  573. {
  574. #ifndef CONFIG_MMU
  575. return -ENOSYS;
  576. #endif
  577. if (vma->vm_flags & VM_SHARED)
  578. return shmem_zero_setup(vma);
  579. return 0;
  580. }
  581. static ssize_t write_full(struct file * file, const char __user * buf,
  582. size_t count, loff_t *ppos)
  583. {
  584. return -ENOSPC;
  585. }
  586. /*
  587. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  588. * can fopen() both devices with "a" now. This was previously impossible.
  589. * -- SRB.
  590. */
  591. static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  592. {
  593. return file->f_pos = 0;
  594. }
  595. /*
  596. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  597. * check against negative addresses: they are ok. The return value is weird,
  598. * though, in that case (0).
  599. *
  600. * also note that seeking relative to the "end of file" isn't supported:
  601. * it has no meaning, so it returns -EINVAL.
  602. */
  603. static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
  604. {
  605. loff_t ret;
  606. mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
  607. switch (orig) {
  608. case 0:
  609. file->f_pos = offset;
  610. ret = file->f_pos;
  611. force_successful_syscall_return();
  612. break;
  613. case 1:
  614. file->f_pos += offset;
  615. ret = file->f_pos;
  616. force_successful_syscall_return();
  617. break;
  618. default:
  619. ret = -EINVAL;
  620. }
  621. mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
  622. return ret;
  623. }
  624. static int open_port(struct inode * inode, struct file * filp)
  625. {
  626. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  627. }
  628. #define zero_lseek null_lseek
  629. #define full_lseek null_lseek
  630. #define write_zero write_null
  631. #define read_full read_zero
  632. #define open_mem open_port
  633. #define open_kmem open_mem
  634. #define open_oldmem open_mem
  635. static const struct file_operations mem_fops = {
  636. .llseek = memory_lseek,
  637. .read = read_mem,
  638. .write = write_mem,
  639. .mmap = mmap_mem,
  640. .open = open_mem,
  641. .get_unmapped_area = get_unmapped_area_mem,
  642. };
  643. #ifdef CONFIG_DEVKMEM
  644. static const struct file_operations kmem_fops = {
  645. .llseek = memory_lseek,
  646. .read = read_kmem,
  647. .write = write_kmem,
  648. .mmap = mmap_kmem,
  649. .open = open_kmem,
  650. .get_unmapped_area = get_unmapped_area_mem,
  651. };
  652. #endif
  653. static const struct file_operations null_fops = {
  654. .llseek = null_lseek,
  655. .read = read_null,
  656. .write = write_null,
  657. .splice_write = splice_write_null,
  658. };
  659. #ifdef CONFIG_DEVPORT
  660. static const struct file_operations port_fops = {
  661. .llseek = memory_lseek,
  662. .read = read_port,
  663. .write = write_port,
  664. .open = open_port,
  665. };
  666. #endif
  667. static const struct file_operations zero_fops = {
  668. .llseek = zero_lseek,
  669. .read = read_zero,
  670. .write = write_zero,
  671. .mmap = mmap_zero,
  672. };
  673. /*
  674. * capabilities for /dev/zero
  675. * - permits private mappings, "copies" are taken of the source of zeros
  676. */
  677. static struct backing_dev_info zero_bdi = {
  678. .name = "char/mem",
  679. .capabilities = BDI_CAP_MAP_COPY,
  680. };
  681. static const struct file_operations full_fops = {
  682. .llseek = full_lseek,
  683. .read = read_full,
  684. .write = write_full,
  685. };
  686. #ifdef CONFIG_CRASH_DUMP
  687. static const struct file_operations oldmem_fops = {
  688. .read = read_oldmem,
  689. .open = open_oldmem,
  690. };
  691. #endif
  692. static ssize_t kmsg_write(struct file * file, const char __user * buf,
  693. size_t count, loff_t *ppos)
  694. {
  695. char *tmp;
  696. ssize_t ret;
  697. tmp = kmalloc(count + 1, GFP_KERNEL);
  698. if (tmp == NULL)
  699. return -ENOMEM;
  700. ret = -EFAULT;
  701. if (!copy_from_user(tmp, buf, count)) {
  702. tmp[count] = 0;
  703. ret = printk("%s", tmp);
  704. if (ret > count)
  705. /* printk can add a prefix */
  706. ret = count;
  707. }
  708. kfree(tmp);
  709. return ret;
  710. }
  711. static const struct file_operations kmsg_fops = {
  712. .write = kmsg_write,
  713. };
  714. static const struct memdev {
  715. const char *name;
  716. mode_t mode;
  717. const struct file_operations *fops;
  718. struct backing_dev_info *dev_info;
  719. } devlist[] = {
  720. [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
  721. #ifdef CONFIG_DEVKMEM
  722. [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
  723. #endif
  724. [3] = { "null", 0666, &null_fops, NULL },
  725. #ifdef CONFIG_DEVPORT
  726. [4] = { "port", 0, &port_fops, NULL },
  727. #endif
  728. [5] = { "zero", 0666, &zero_fops, &zero_bdi },
  729. [7] = { "full", 0666, &full_fops, NULL },
  730. [8] = { "random", 0666, &random_fops, NULL },
  731. [9] = { "urandom", 0666, &urandom_fops, NULL },
  732. [11] = { "kmsg", 0, &kmsg_fops, NULL },
  733. #ifdef CONFIG_CRASH_DUMP
  734. [12] = { "oldmem", 0, &oldmem_fops, NULL },
  735. #endif
  736. };
  737. static int memory_open(struct inode *inode, struct file *filp)
  738. {
  739. int minor;
  740. const struct memdev *dev;
  741. minor = iminor(inode);
  742. if (minor >= ARRAY_SIZE(devlist))
  743. return -ENXIO;
  744. dev = &devlist[minor];
  745. if (!dev->fops)
  746. return -ENXIO;
  747. filp->f_op = dev->fops;
  748. if (dev->dev_info)
  749. filp->f_mapping->backing_dev_info = dev->dev_info;
  750. if (dev->fops->open)
  751. return dev->fops->open(inode, filp);
  752. return 0;
  753. }
  754. static const struct file_operations memory_fops = {
  755. .open = memory_open,
  756. };
  757. static char *mem_devnode(struct device *dev, mode_t *mode)
  758. {
  759. if (mode && devlist[MINOR(dev->devt)].mode)
  760. *mode = devlist[MINOR(dev->devt)].mode;
  761. return NULL;
  762. }
  763. static struct class *mem_class;
  764. static int __init chr_dev_init(void)
  765. {
  766. int minor;
  767. int err;
  768. err = bdi_init(&zero_bdi);
  769. if (err)
  770. return err;
  771. if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
  772. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  773. mem_class = class_create(THIS_MODULE, "mem");
  774. mem_class->devnode = mem_devnode;
  775. for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
  776. if (!devlist[minor].name)
  777. continue;
  778. device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
  779. NULL, devlist[minor].name);
  780. }
  781. return 0;
  782. }
  783. fs_initcall(chr_dev_init);