mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/splice.h>
  27. #include <linux/pfn.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. #ifdef CONFIG_IA64
  31. # include <linux/efi.h>
  32. #endif
  33. /*
  34. * Architectures vary in how they handle caching for addresses
  35. * outside of main memory.
  36. *
  37. */
  38. static inline int uncached_access(struct file *file, unsigned long addr)
  39. {
  40. #if defined(CONFIG_IA64)
  41. /*
  42. * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
  43. */
  44. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  45. #elif defined(CONFIG_MIPS)
  46. {
  47. extern int __uncached_access(struct file *file,
  48. unsigned long addr);
  49. return __uncached_access(file, addr);
  50. }
  51. #else
  52. /*
  53. * Accessing memory above the top the kernel knows about or through a file pointer
  54. * that was marked O_SYNC will be done non-cached.
  55. */
  56. if (file->f_flags & O_SYNC)
  57. return 1;
  58. return addr >= __pa(high_memory);
  59. #endif
  60. }
  61. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  62. static inline int valid_phys_addr_range(unsigned long addr, size_t count)
  63. {
  64. if (addr + count > __pa(high_memory))
  65. return 0;
  66. return 1;
  67. }
  68. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  69. {
  70. return 1;
  71. }
  72. #endif
  73. #ifdef CONFIG_NONPROMISC_DEVMEM
  74. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  75. {
  76. u64 from = ((u64)pfn) << PAGE_SHIFT;
  77. u64 to = from + size;
  78. u64 cursor = from;
  79. while (cursor < to) {
  80. if (!devmem_is_allowed(pfn)) {
  81. printk(KERN_INFO
  82. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  83. current->comm, from, to);
  84. return 0;
  85. }
  86. cursor += PAGE_SIZE;
  87. pfn++;
  88. }
  89. return 1;
  90. }
  91. #else
  92. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  93. {
  94. return 1;
  95. }
  96. #endif
  97. void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  98. {
  99. }
  100. /*
  101. * This funcion reads the *physical* memory. The f_pos points directly to the
  102. * memory location.
  103. */
  104. static ssize_t read_mem(struct file * file, char __user * buf,
  105. size_t count, loff_t *ppos)
  106. {
  107. unsigned long p = *ppos;
  108. ssize_t read, sz;
  109. char *ptr;
  110. if (!valid_phys_addr_range(p, count))
  111. return -EFAULT;
  112. read = 0;
  113. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  114. /* we don't have page 0 mapped on sparc and m68k.. */
  115. if (p < PAGE_SIZE) {
  116. sz = PAGE_SIZE - p;
  117. if (sz > count)
  118. sz = count;
  119. if (sz > 0) {
  120. if (clear_user(buf, sz))
  121. return -EFAULT;
  122. buf += sz;
  123. p += sz;
  124. count -= sz;
  125. read += sz;
  126. }
  127. }
  128. #endif
  129. while (count > 0) {
  130. /*
  131. * Handle first page in case it's not aligned
  132. */
  133. if (-p & (PAGE_SIZE - 1))
  134. sz = -p & (PAGE_SIZE - 1);
  135. else
  136. sz = PAGE_SIZE;
  137. sz = min_t(unsigned long, sz, count);
  138. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  139. return -EPERM;
  140. /*
  141. * On ia64 if a page has been mapped somewhere as
  142. * uncached, then it must also be accessed uncached
  143. * by the kernel or data corruption may occur
  144. */
  145. ptr = xlate_dev_mem_ptr(p);
  146. if (!ptr)
  147. return -EFAULT;
  148. if (copy_to_user(buf, ptr, sz)) {
  149. unxlate_dev_mem_ptr(p, ptr);
  150. return -EFAULT;
  151. }
  152. unxlate_dev_mem_ptr(p, ptr);
  153. buf += sz;
  154. p += sz;
  155. count -= sz;
  156. read += sz;
  157. }
  158. *ppos += read;
  159. return read;
  160. }
  161. static ssize_t write_mem(struct file * file, const char __user * buf,
  162. size_t count, loff_t *ppos)
  163. {
  164. unsigned long p = *ppos;
  165. ssize_t written, sz;
  166. unsigned long copied;
  167. void *ptr;
  168. if (!valid_phys_addr_range(p, count))
  169. return -EFAULT;
  170. written = 0;
  171. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  172. /* we don't have page 0 mapped on sparc and m68k.. */
  173. if (p < PAGE_SIZE) {
  174. unsigned long sz = PAGE_SIZE - p;
  175. if (sz > count)
  176. sz = count;
  177. /* Hmm. Do something? */
  178. buf += sz;
  179. p += sz;
  180. count -= sz;
  181. written += sz;
  182. }
  183. #endif
  184. while (count > 0) {
  185. /*
  186. * Handle first page in case it's not aligned
  187. */
  188. if (-p & (PAGE_SIZE - 1))
  189. sz = -p & (PAGE_SIZE - 1);
  190. else
  191. sz = PAGE_SIZE;
  192. sz = min_t(unsigned long, sz, count);
  193. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  194. return -EPERM;
  195. /*
  196. * On ia64 if a page has been mapped somewhere as
  197. * uncached, then it must also be accessed uncached
  198. * by the kernel or data corruption may occur
  199. */
  200. ptr = xlate_dev_mem_ptr(p);
  201. if (!ptr) {
  202. if (written)
  203. break;
  204. return -EFAULT;
  205. }
  206. copied = copy_from_user(ptr, buf, sz);
  207. if (copied) {
  208. written += sz - copied;
  209. unxlate_dev_mem_ptr(p, ptr);
  210. if (written)
  211. break;
  212. return -EFAULT;
  213. }
  214. unxlate_dev_mem_ptr(p, ptr);
  215. buf += sz;
  216. p += sz;
  217. count -= sz;
  218. written += sz;
  219. }
  220. *ppos += written;
  221. return written;
  222. }
  223. int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
  224. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  225. {
  226. return 1;
  227. }
  228. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  229. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  230. unsigned long size, pgprot_t vma_prot)
  231. {
  232. #ifdef pgprot_noncached
  233. unsigned long offset = pfn << PAGE_SHIFT;
  234. if (uncached_access(file, offset))
  235. return pgprot_noncached(vma_prot);
  236. #endif
  237. return vma_prot;
  238. }
  239. #endif
  240. #ifndef CONFIG_MMU
  241. static unsigned long get_unmapped_area_mem(struct file *file,
  242. unsigned long addr,
  243. unsigned long len,
  244. unsigned long pgoff,
  245. unsigned long flags)
  246. {
  247. if (!valid_mmap_phys_addr_range(pgoff, len))
  248. return (unsigned long) -EINVAL;
  249. return pgoff << PAGE_SHIFT;
  250. }
  251. /* can't do an in-place private mapping if there's no MMU */
  252. static inline int private_mapping_ok(struct vm_area_struct *vma)
  253. {
  254. return vma->vm_flags & VM_MAYSHARE;
  255. }
  256. #else
  257. #define get_unmapped_area_mem NULL
  258. static inline int private_mapping_ok(struct vm_area_struct *vma)
  259. {
  260. return 1;
  261. }
  262. #endif
  263. void __attribute__((weak))
  264. map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
  265. {
  266. /* nothing. architectures can override. */
  267. }
  268. void __attribute__((weak))
  269. unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
  270. {
  271. /* nothing. architectures can override. */
  272. }
  273. static void mmap_mem_open(struct vm_area_struct *vma)
  274. {
  275. map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
  276. vma->vm_page_prot);
  277. }
  278. static void mmap_mem_close(struct vm_area_struct *vma)
  279. {
  280. unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
  281. vma->vm_page_prot);
  282. }
  283. static struct vm_operations_struct mmap_mem_ops = {
  284. .open = mmap_mem_open,
  285. .close = mmap_mem_close
  286. };
  287. static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  288. {
  289. size_t size = vma->vm_end - vma->vm_start;
  290. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  291. return -EINVAL;
  292. if (!private_mapping_ok(vma))
  293. return -ENOSYS;
  294. if (!range_is_allowed(vma->vm_pgoff, size))
  295. return -EPERM;
  296. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  297. &vma->vm_page_prot))
  298. return -EINVAL;
  299. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  300. size,
  301. vma->vm_page_prot);
  302. vma->vm_ops = &mmap_mem_ops;
  303. /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
  304. if (remap_pfn_range(vma,
  305. vma->vm_start,
  306. vma->vm_pgoff,
  307. size,
  308. vma->vm_page_prot)) {
  309. unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
  310. return -EAGAIN;
  311. }
  312. return 0;
  313. }
  314. #ifdef CONFIG_DEVKMEM
  315. static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
  316. {
  317. unsigned long pfn;
  318. /* Turn a kernel-virtual address into a physical page frame */
  319. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  320. /*
  321. * RED-PEN: on some architectures there is more mapped memory
  322. * than available in mem_map which pfn_valid checks
  323. * for. Perhaps should add a new macro here.
  324. *
  325. * RED-PEN: vmalloc is not supported right now.
  326. */
  327. if (!pfn_valid(pfn))
  328. return -EIO;
  329. vma->vm_pgoff = pfn;
  330. return mmap_mem(file, vma);
  331. }
  332. #endif
  333. #ifdef CONFIG_CRASH_DUMP
  334. /*
  335. * Read memory corresponding to the old kernel.
  336. */
  337. static ssize_t read_oldmem(struct file *file, char __user *buf,
  338. size_t count, loff_t *ppos)
  339. {
  340. unsigned long pfn, offset;
  341. size_t read = 0, csize;
  342. int rc = 0;
  343. while (count) {
  344. pfn = *ppos / PAGE_SIZE;
  345. if (pfn > saved_max_pfn)
  346. return read;
  347. offset = (unsigned long)(*ppos % PAGE_SIZE);
  348. if (count > PAGE_SIZE - offset)
  349. csize = PAGE_SIZE - offset;
  350. else
  351. csize = count;
  352. rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
  353. if (rc < 0)
  354. return rc;
  355. buf += csize;
  356. *ppos += csize;
  357. read += csize;
  358. count -= csize;
  359. }
  360. return read;
  361. }
  362. #endif
  363. extern long vread(char *buf, char *addr, unsigned long count);
  364. extern long vwrite(char *buf, char *addr, unsigned long count);
  365. #ifdef CONFIG_DEVKMEM
  366. /*
  367. * This function reads the *virtual* memory as seen by the kernel.
  368. */
  369. static ssize_t read_kmem(struct file *file, char __user *buf,
  370. size_t count, loff_t *ppos)
  371. {
  372. unsigned long p = *ppos;
  373. ssize_t low_count, read, sz;
  374. char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  375. read = 0;
  376. if (p < (unsigned long) high_memory) {
  377. low_count = count;
  378. if (count > (unsigned long) high_memory - p)
  379. low_count = (unsigned long) high_memory - p;
  380. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  381. /* we don't have page 0 mapped on sparc and m68k.. */
  382. if (p < PAGE_SIZE && low_count > 0) {
  383. size_t tmp = PAGE_SIZE - p;
  384. if (tmp > low_count) tmp = low_count;
  385. if (clear_user(buf, tmp))
  386. return -EFAULT;
  387. buf += tmp;
  388. p += tmp;
  389. read += tmp;
  390. low_count -= tmp;
  391. count -= tmp;
  392. }
  393. #endif
  394. while (low_count > 0) {
  395. /*
  396. * Handle first page in case it's not aligned
  397. */
  398. if (-p & (PAGE_SIZE - 1))
  399. sz = -p & (PAGE_SIZE - 1);
  400. else
  401. sz = PAGE_SIZE;
  402. sz = min_t(unsigned long, sz, low_count);
  403. /*
  404. * On ia64 if a page has been mapped somewhere as
  405. * uncached, then it must also be accessed uncached
  406. * by the kernel or data corruption may occur
  407. */
  408. kbuf = xlate_dev_kmem_ptr((char *)p);
  409. if (copy_to_user(buf, kbuf, sz))
  410. return -EFAULT;
  411. buf += sz;
  412. p += sz;
  413. read += sz;
  414. low_count -= sz;
  415. count -= sz;
  416. }
  417. }
  418. if (count > 0) {
  419. kbuf = (char *)__get_free_page(GFP_KERNEL);
  420. if (!kbuf)
  421. return -ENOMEM;
  422. while (count > 0) {
  423. int len = count;
  424. if (len > PAGE_SIZE)
  425. len = PAGE_SIZE;
  426. len = vread(kbuf, (char *)p, len);
  427. if (!len)
  428. break;
  429. if (copy_to_user(buf, kbuf, len)) {
  430. free_page((unsigned long)kbuf);
  431. return -EFAULT;
  432. }
  433. count -= len;
  434. buf += len;
  435. read += len;
  436. p += len;
  437. }
  438. free_page((unsigned long)kbuf);
  439. }
  440. *ppos = p;
  441. return read;
  442. }
  443. static inline ssize_t
  444. do_write_kmem(void *p, unsigned long realp, const char __user * buf,
  445. size_t count, loff_t *ppos)
  446. {
  447. ssize_t written, sz;
  448. unsigned long copied;
  449. written = 0;
  450. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  451. /* we don't have page 0 mapped on sparc and m68k.. */
  452. if (realp < PAGE_SIZE) {
  453. unsigned long sz = PAGE_SIZE - realp;
  454. if (sz > count)
  455. sz = count;
  456. /* Hmm. Do something? */
  457. buf += sz;
  458. p += sz;
  459. realp += sz;
  460. count -= sz;
  461. written += sz;
  462. }
  463. #endif
  464. while (count > 0) {
  465. char *ptr;
  466. /*
  467. * Handle first page in case it's not aligned
  468. */
  469. if (-realp & (PAGE_SIZE - 1))
  470. sz = -realp & (PAGE_SIZE - 1);
  471. else
  472. sz = PAGE_SIZE;
  473. sz = min_t(unsigned long, sz, count);
  474. /*
  475. * On ia64 if a page has been mapped somewhere as
  476. * uncached, then it must also be accessed uncached
  477. * by the kernel or data corruption may occur
  478. */
  479. ptr = xlate_dev_kmem_ptr(p);
  480. copied = copy_from_user(ptr, buf, sz);
  481. if (copied) {
  482. written += sz - copied;
  483. if (written)
  484. break;
  485. return -EFAULT;
  486. }
  487. buf += sz;
  488. p += sz;
  489. realp += sz;
  490. count -= sz;
  491. written += sz;
  492. }
  493. *ppos += written;
  494. return written;
  495. }
  496. /*
  497. * This function writes to the *virtual* memory as seen by the kernel.
  498. */
  499. static ssize_t write_kmem(struct file * file, const char __user * buf,
  500. size_t count, loff_t *ppos)
  501. {
  502. unsigned long p = *ppos;
  503. ssize_t wrote = 0;
  504. ssize_t virtr = 0;
  505. ssize_t written;
  506. char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  507. if (p < (unsigned long) high_memory) {
  508. wrote = count;
  509. if (count > (unsigned long) high_memory - p)
  510. wrote = (unsigned long) high_memory - p;
  511. written = do_write_kmem((void*)p, p, buf, wrote, ppos);
  512. if (written != wrote)
  513. return written;
  514. wrote = written;
  515. p += wrote;
  516. buf += wrote;
  517. count -= wrote;
  518. }
  519. if (count > 0) {
  520. kbuf = (char *)__get_free_page(GFP_KERNEL);
  521. if (!kbuf)
  522. return wrote ? wrote : -ENOMEM;
  523. while (count > 0) {
  524. int len = count;
  525. if (len > PAGE_SIZE)
  526. len = PAGE_SIZE;
  527. if (len) {
  528. written = copy_from_user(kbuf, buf, len);
  529. if (written) {
  530. if (wrote + virtr)
  531. break;
  532. free_page((unsigned long)kbuf);
  533. return -EFAULT;
  534. }
  535. }
  536. len = vwrite(kbuf, (char *)p, len);
  537. count -= len;
  538. buf += len;
  539. virtr += len;
  540. p += len;
  541. }
  542. free_page((unsigned long)kbuf);
  543. }
  544. *ppos = p;
  545. return virtr + wrote;
  546. }
  547. #endif
  548. #ifdef CONFIG_DEVPORT
  549. static ssize_t read_port(struct file * file, char __user * buf,
  550. size_t count, loff_t *ppos)
  551. {
  552. unsigned long i = *ppos;
  553. char __user *tmp = buf;
  554. if (!access_ok(VERIFY_WRITE, buf, count))
  555. return -EFAULT;
  556. while (count-- > 0 && i < 65536) {
  557. if (__put_user(inb(i),tmp) < 0)
  558. return -EFAULT;
  559. i++;
  560. tmp++;
  561. }
  562. *ppos = i;
  563. return tmp-buf;
  564. }
  565. static ssize_t write_port(struct file * file, const char __user * buf,
  566. size_t count, loff_t *ppos)
  567. {
  568. unsigned long i = *ppos;
  569. const char __user * tmp = buf;
  570. if (!access_ok(VERIFY_READ,buf,count))
  571. return -EFAULT;
  572. while (count-- > 0 && i < 65536) {
  573. char c;
  574. if (__get_user(c, tmp)) {
  575. if (tmp > buf)
  576. break;
  577. return -EFAULT;
  578. }
  579. outb(c,i);
  580. i++;
  581. tmp++;
  582. }
  583. *ppos = i;
  584. return tmp-buf;
  585. }
  586. #endif
  587. static ssize_t read_null(struct file * file, char __user * buf,
  588. size_t count, loff_t *ppos)
  589. {
  590. return 0;
  591. }
  592. static ssize_t write_null(struct file * file, const char __user * buf,
  593. size_t count, loff_t *ppos)
  594. {
  595. return count;
  596. }
  597. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  598. struct splice_desc *sd)
  599. {
  600. return sd->len;
  601. }
  602. static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
  603. loff_t *ppos, size_t len, unsigned int flags)
  604. {
  605. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  606. }
  607. static ssize_t read_zero(struct file * file, char __user * buf,
  608. size_t count, loff_t *ppos)
  609. {
  610. size_t written;
  611. if (!count)
  612. return 0;
  613. if (!access_ok(VERIFY_WRITE, buf, count))
  614. return -EFAULT;
  615. written = 0;
  616. while (count) {
  617. unsigned long unwritten;
  618. size_t chunk = count;
  619. if (chunk > PAGE_SIZE)
  620. chunk = PAGE_SIZE; /* Just for latency reasons */
  621. unwritten = clear_user(buf, chunk);
  622. written += chunk - unwritten;
  623. if (unwritten)
  624. break;
  625. buf += chunk;
  626. count -= chunk;
  627. cond_resched();
  628. }
  629. return written ? written : -EFAULT;
  630. }
  631. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  632. {
  633. #ifndef CONFIG_MMU
  634. return -ENOSYS;
  635. #endif
  636. if (vma->vm_flags & VM_SHARED)
  637. return shmem_zero_setup(vma);
  638. return 0;
  639. }
  640. static ssize_t write_full(struct file * file, const char __user * buf,
  641. size_t count, loff_t *ppos)
  642. {
  643. return -ENOSPC;
  644. }
  645. /*
  646. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  647. * can fopen() both devices with "a" now. This was previously impossible.
  648. * -- SRB.
  649. */
  650. static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  651. {
  652. return file->f_pos = 0;
  653. }
  654. /*
  655. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  656. * check against negative addresses: they are ok. The return value is weird,
  657. * though, in that case (0).
  658. *
  659. * also note that seeking relative to the "end of file" isn't supported:
  660. * it has no meaning, so it returns -EINVAL.
  661. */
  662. static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
  663. {
  664. loff_t ret;
  665. mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
  666. switch (orig) {
  667. case 0:
  668. file->f_pos = offset;
  669. ret = file->f_pos;
  670. force_successful_syscall_return();
  671. break;
  672. case 1:
  673. file->f_pos += offset;
  674. ret = file->f_pos;
  675. force_successful_syscall_return();
  676. break;
  677. default:
  678. ret = -EINVAL;
  679. }
  680. mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
  681. return ret;
  682. }
  683. static int open_port(struct inode * inode, struct file * filp)
  684. {
  685. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  686. }
  687. #define zero_lseek null_lseek
  688. #define full_lseek null_lseek
  689. #define write_zero write_null
  690. #define read_full read_zero
  691. #define open_mem open_port
  692. #define open_kmem open_mem
  693. #define open_oldmem open_mem
  694. static const struct file_operations mem_fops = {
  695. .llseek = memory_lseek,
  696. .read = read_mem,
  697. .write = write_mem,
  698. .mmap = mmap_mem,
  699. .open = open_mem,
  700. .get_unmapped_area = get_unmapped_area_mem,
  701. };
  702. #ifdef CONFIG_DEVKMEM
  703. static const struct file_operations kmem_fops = {
  704. .llseek = memory_lseek,
  705. .read = read_kmem,
  706. .write = write_kmem,
  707. .mmap = mmap_kmem,
  708. .open = open_kmem,
  709. .get_unmapped_area = get_unmapped_area_mem,
  710. };
  711. #endif
  712. static const struct file_operations null_fops = {
  713. .llseek = null_lseek,
  714. .read = read_null,
  715. .write = write_null,
  716. .splice_write = splice_write_null,
  717. };
  718. #ifdef CONFIG_DEVPORT
  719. static const struct file_operations port_fops = {
  720. .llseek = memory_lseek,
  721. .read = read_port,
  722. .write = write_port,
  723. .open = open_port,
  724. };
  725. #endif
  726. static const struct file_operations zero_fops = {
  727. .llseek = zero_lseek,
  728. .read = read_zero,
  729. .write = write_zero,
  730. .mmap = mmap_zero,
  731. };
  732. /*
  733. * capabilities for /dev/zero
  734. * - permits private mappings, "copies" are taken of the source of zeros
  735. */
  736. static struct backing_dev_info zero_bdi = {
  737. .capabilities = BDI_CAP_MAP_COPY,
  738. };
  739. static const struct file_operations full_fops = {
  740. .llseek = full_lseek,
  741. .read = read_full,
  742. .write = write_full,
  743. };
  744. #ifdef CONFIG_CRASH_DUMP
  745. static const struct file_operations oldmem_fops = {
  746. .read = read_oldmem,
  747. .open = open_oldmem,
  748. };
  749. #endif
  750. static ssize_t kmsg_write(struct file * file, const char __user * buf,
  751. size_t count, loff_t *ppos)
  752. {
  753. char *tmp;
  754. ssize_t ret;
  755. tmp = kmalloc(count + 1, GFP_KERNEL);
  756. if (tmp == NULL)
  757. return -ENOMEM;
  758. ret = -EFAULT;
  759. if (!copy_from_user(tmp, buf, count)) {
  760. tmp[count] = 0;
  761. ret = printk("%s", tmp);
  762. if (ret > count)
  763. /* printk can add a prefix */
  764. ret = count;
  765. }
  766. kfree(tmp);
  767. return ret;
  768. }
  769. static const struct file_operations kmsg_fops = {
  770. .write = kmsg_write,
  771. };
  772. static int memory_open(struct inode * inode, struct file * filp)
  773. {
  774. switch (iminor(inode)) {
  775. case 1:
  776. filp->f_op = &mem_fops;
  777. filp->f_mapping->backing_dev_info =
  778. &directly_mappable_cdev_bdi;
  779. break;
  780. #ifdef CONFIG_DEVKMEM
  781. case 2:
  782. filp->f_op = &kmem_fops;
  783. filp->f_mapping->backing_dev_info =
  784. &directly_mappable_cdev_bdi;
  785. break;
  786. #endif
  787. case 3:
  788. filp->f_op = &null_fops;
  789. break;
  790. #ifdef CONFIG_DEVPORT
  791. case 4:
  792. filp->f_op = &port_fops;
  793. break;
  794. #endif
  795. case 5:
  796. filp->f_mapping->backing_dev_info = &zero_bdi;
  797. filp->f_op = &zero_fops;
  798. break;
  799. case 7:
  800. filp->f_op = &full_fops;
  801. break;
  802. case 8:
  803. filp->f_op = &random_fops;
  804. break;
  805. case 9:
  806. filp->f_op = &urandom_fops;
  807. break;
  808. case 11:
  809. filp->f_op = &kmsg_fops;
  810. break;
  811. #ifdef CONFIG_CRASH_DUMP
  812. case 12:
  813. filp->f_op = &oldmem_fops;
  814. break;
  815. #endif
  816. default:
  817. return -ENXIO;
  818. }
  819. if (filp->f_op && filp->f_op->open)
  820. return filp->f_op->open(inode,filp);
  821. return 0;
  822. }
  823. static const struct file_operations memory_fops = {
  824. .open = memory_open, /* just a selector for the real open */
  825. };
  826. static const struct {
  827. unsigned int minor;
  828. char *name;
  829. umode_t mode;
  830. const struct file_operations *fops;
  831. } devlist[] = { /* list of minor devices */
  832. {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
  833. #ifdef CONFIG_DEVKMEM
  834. {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
  835. #endif
  836. {3, "null", S_IRUGO | S_IWUGO, &null_fops},
  837. #ifdef CONFIG_DEVPORT
  838. {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
  839. #endif
  840. {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
  841. {7, "full", S_IRUGO | S_IWUGO, &full_fops},
  842. {8, "random", S_IRUGO | S_IWUSR, &random_fops},
  843. {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
  844. {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
  845. #ifdef CONFIG_CRASH_DUMP
  846. {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
  847. #endif
  848. };
  849. static struct class *mem_class;
  850. static int __init chr_dev_init(void)
  851. {
  852. int i;
  853. int err;
  854. err = bdi_init(&zero_bdi);
  855. if (err)
  856. return err;
  857. if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
  858. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  859. mem_class = class_create(THIS_MODULE, "mem");
  860. for (i = 0; i < ARRAY_SIZE(devlist); i++)
  861. device_create(mem_class, NULL,
  862. MKDEV(MEM_MAJOR, devlist[i].minor),
  863. devlist[i].name);
  864. return 0;
  865. }
  866. fs_initcall(chr_dev_init);