mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/smp_lock.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/device.h>
  23. #include <linux/highmem.h>
  24. #include <linux/crash_dump.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/pipe_fs_i.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. #ifdef CONFIG_IA64
  31. # include <linux/efi.h>
  32. #endif
  33. /*
  34. * Architectures vary in how they handle caching for addresses
  35. * outside of main memory.
  36. *
  37. */
  38. static inline int uncached_access(struct file *file, unsigned long addr)
  39. {
  40. #if defined(__i386__)
  41. /*
  42. * On the PPro and successors, the MTRRs are used to set
  43. * memory types for physical addresses outside main memory,
  44. * so blindly setting PCD or PWT on those pages is wrong.
  45. * For Pentiums and earlier, the surround logic should disable
  46. * caching for the high addresses through the KEN pin, but
  47. * we maintain the tradition of paranoia in this code.
  48. */
  49. if (file->f_flags & O_SYNC)
  50. return 1;
  51. return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
  52. test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
  53. test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
  54. test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
  55. && addr >= __pa(high_memory);
  56. #elif defined(__x86_64__)
  57. /*
  58. * This is broken because it can generate memory type aliases,
  59. * which can cause cache corruptions
  60. * But it is only available for root and we have to be bug-to-bug
  61. * compatible with i386.
  62. */
  63. if (file->f_flags & O_SYNC)
  64. return 1;
  65. /* same behaviour as i386. PAT always set to cached and MTRRs control the
  66. caching behaviour.
  67. Hopefully a full PAT implementation will fix that soon. */
  68. return 0;
  69. #elif defined(CONFIG_IA64)
  70. /*
  71. * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
  72. */
  73. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  74. #else
  75. /*
  76. * Accessing memory above the top the kernel knows about or through a file pointer
  77. * that was marked O_SYNC will be done non-cached.
  78. */
  79. if (file->f_flags & O_SYNC)
  80. return 1;
  81. return addr >= __pa(high_memory);
  82. #endif
  83. }
  84. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  85. static inline int valid_phys_addr_range(unsigned long addr, size_t count)
  86. {
  87. if (addr + count > __pa(high_memory))
  88. return 0;
  89. return 1;
  90. }
  91. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  92. {
  93. return 1;
  94. }
  95. #endif
  96. /*
  97. * This funcion reads the *physical* memory. The f_pos points directly to the
  98. * memory location.
  99. */
  100. static ssize_t read_mem(struct file * file, char __user * buf,
  101. size_t count, loff_t *ppos)
  102. {
  103. unsigned long p = *ppos;
  104. ssize_t read, sz;
  105. char *ptr;
  106. if (!valid_phys_addr_range(p, count))
  107. return -EFAULT;
  108. read = 0;
  109. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  110. /* we don't have page 0 mapped on sparc and m68k.. */
  111. if (p < PAGE_SIZE) {
  112. sz = PAGE_SIZE - p;
  113. if (sz > count)
  114. sz = count;
  115. if (sz > 0) {
  116. if (clear_user(buf, sz))
  117. return -EFAULT;
  118. buf += sz;
  119. p += sz;
  120. count -= sz;
  121. read += sz;
  122. }
  123. }
  124. #endif
  125. while (count > 0) {
  126. /*
  127. * Handle first page in case it's not aligned
  128. */
  129. if (-p & (PAGE_SIZE - 1))
  130. sz = -p & (PAGE_SIZE - 1);
  131. else
  132. sz = PAGE_SIZE;
  133. sz = min_t(unsigned long, sz, count);
  134. /*
  135. * On ia64 if a page has been mapped somewhere as
  136. * uncached, then it must also be accessed uncached
  137. * by the kernel or data corruption may occur
  138. */
  139. ptr = xlate_dev_mem_ptr(p);
  140. if (copy_to_user(buf, ptr, sz))
  141. return -EFAULT;
  142. buf += sz;
  143. p += sz;
  144. count -= sz;
  145. read += sz;
  146. }
  147. *ppos += read;
  148. return read;
  149. }
  150. static ssize_t write_mem(struct file * file, const char __user * buf,
  151. size_t count, loff_t *ppos)
  152. {
  153. unsigned long p = *ppos;
  154. ssize_t written, sz;
  155. unsigned long copied;
  156. void *ptr;
  157. if (!valid_phys_addr_range(p, count))
  158. return -EFAULT;
  159. written = 0;
  160. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  161. /* we don't have page 0 mapped on sparc and m68k.. */
  162. if (p < PAGE_SIZE) {
  163. unsigned long sz = PAGE_SIZE - p;
  164. if (sz > count)
  165. sz = count;
  166. /* Hmm. Do something? */
  167. buf += sz;
  168. p += sz;
  169. count -= sz;
  170. written += sz;
  171. }
  172. #endif
  173. while (count > 0) {
  174. /*
  175. * Handle first page in case it's not aligned
  176. */
  177. if (-p & (PAGE_SIZE - 1))
  178. sz = -p & (PAGE_SIZE - 1);
  179. else
  180. sz = PAGE_SIZE;
  181. sz = min_t(unsigned long, sz, count);
  182. /*
  183. * On ia64 if a page has been mapped somewhere as
  184. * uncached, then it must also be accessed uncached
  185. * by the kernel or data corruption may occur
  186. */
  187. ptr = xlate_dev_mem_ptr(p);
  188. copied = copy_from_user(ptr, buf, sz);
  189. if (copied) {
  190. written += sz - copied;
  191. if (written)
  192. break;
  193. return -EFAULT;
  194. }
  195. buf += sz;
  196. p += sz;
  197. count -= sz;
  198. written += sz;
  199. }
  200. *ppos += written;
  201. return written;
  202. }
  203. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  204. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  205. unsigned long size, pgprot_t vma_prot)
  206. {
  207. #ifdef pgprot_noncached
  208. unsigned long offset = pfn << PAGE_SHIFT;
  209. if (uncached_access(file, offset))
  210. return pgprot_noncached(vma_prot);
  211. #endif
  212. return vma_prot;
  213. }
  214. #endif
  215. static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  216. {
  217. size_t size = vma->vm_end - vma->vm_start;
  218. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  219. return -EINVAL;
  220. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  221. size,
  222. vma->vm_page_prot);
  223. /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
  224. if (remap_pfn_range(vma,
  225. vma->vm_start,
  226. vma->vm_pgoff,
  227. size,
  228. vma->vm_page_prot))
  229. return -EAGAIN;
  230. return 0;
  231. }
  232. static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
  233. {
  234. unsigned long pfn;
  235. /* Turn a kernel-virtual address into a physical page frame */
  236. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  237. /*
  238. * RED-PEN: on some architectures there is more mapped memory
  239. * than available in mem_map which pfn_valid checks
  240. * for. Perhaps should add a new macro here.
  241. *
  242. * RED-PEN: vmalloc is not supported right now.
  243. */
  244. if (!pfn_valid(pfn))
  245. return -EIO;
  246. vma->vm_pgoff = pfn;
  247. return mmap_mem(file, vma);
  248. }
  249. #ifdef CONFIG_CRASH_DUMP
  250. /*
  251. * Read memory corresponding to the old kernel.
  252. */
  253. static ssize_t read_oldmem(struct file *file, char __user *buf,
  254. size_t count, loff_t *ppos)
  255. {
  256. unsigned long pfn, offset;
  257. size_t read = 0, csize;
  258. int rc = 0;
  259. while (count) {
  260. pfn = *ppos / PAGE_SIZE;
  261. if (pfn > saved_max_pfn)
  262. return read;
  263. offset = (unsigned long)(*ppos % PAGE_SIZE);
  264. if (count > PAGE_SIZE - offset)
  265. csize = PAGE_SIZE - offset;
  266. else
  267. csize = count;
  268. rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
  269. if (rc < 0)
  270. return rc;
  271. buf += csize;
  272. *ppos += csize;
  273. read += csize;
  274. count -= csize;
  275. }
  276. return read;
  277. }
  278. #endif
  279. extern long vread(char *buf, char *addr, unsigned long count);
  280. extern long vwrite(char *buf, char *addr, unsigned long count);
  281. /*
  282. * This function reads the *virtual* memory as seen by the kernel.
  283. */
  284. static ssize_t read_kmem(struct file *file, char __user *buf,
  285. size_t count, loff_t *ppos)
  286. {
  287. unsigned long p = *ppos;
  288. ssize_t low_count, read, sz;
  289. char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  290. read = 0;
  291. if (p < (unsigned long) high_memory) {
  292. low_count = count;
  293. if (count > (unsigned long) high_memory - p)
  294. low_count = (unsigned long) high_memory - p;
  295. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  296. /* we don't have page 0 mapped on sparc and m68k.. */
  297. if (p < PAGE_SIZE && low_count > 0) {
  298. size_t tmp = PAGE_SIZE - p;
  299. if (tmp > low_count) tmp = low_count;
  300. if (clear_user(buf, tmp))
  301. return -EFAULT;
  302. buf += tmp;
  303. p += tmp;
  304. read += tmp;
  305. low_count -= tmp;
  306. count -= tmp;
  307. }
  308. #endif
  309. while (low_count > 0) {
  310. /*
  311. * Handle first page in case it's not aligned
  312. */
  313. if (-p & (PAGE_SIZE - 1))
  314. sz = -p & (PAGE_SIZE - 1);
  315. else
  316. sz = PAGE_SIZE;
  317. sz = min_t(unsigned long, sz, low_count);
  318. /*
  319. * On ia64 if a page has been mapped somewhere as
  320. * uncached, then it must also be accessed uncached
  321. * by the kernel or data corruption may occur
  322. */
  323. kbuf = xlate_dev_kmem_ptr((char *)p);
  324. if (copy_to_user(buf, kbuf, sz))
  325. return -EFAULT;
  326. buf += sz;
  327. p += sz;
  328. read += sz;
  329. low_count -= sz;
  330. count -= sz;
  331. }
  332. }
  333. if (count > 0) {
  334. kbuf = (char *)__get_free_page(GFP_KERNEL);
  335. if (!kbuf)
  336. return -ENOMEM;
  337. while (count > 0) {
  338. int len = count;
  339. if (len > PAGE_SIZE)
  340. len = PAGE_SIZE;
  341. len = vread(kbuf, (char *)p, len);
  342. if (!len)
  343. break;
  344. if (copy_to_user(buf, kbuf, len)) {
  345. free_page((unsigned long)kbuf);
  346. return -EFAULT;
  347. }
  348. count -= len;
  349. buf += len;
  350. read += len;
  351. p += len;
  352. }
  353. free_page((unsigned long)kbuf);
  354. }
  355. *ppos = p;
  356. return read;
  357. }
  358. static inline ssize_t
  359. do_write_kmem(void *p, unsigned long realp, const char __user * buf,
  360. size_t count, loff_t *ppos)
  361. {
  362. ssize_t written, sz;
  363. unsigned long copied;
  364. written = 0;
  365. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  366. /* we don't have page 0 mapped on sparc and m68k.. */
  367. if (realp < PAGE_SIZE) {
  368. unsigned long sz = PAGE_SIZE - realp;
  369. if (sz > count)
  370. sz = count;
  371. /* Hmm. Do something? */
  372. buf += sz;
  373. p += sz;
  374. realp += sz;
  375. count -= sz;
  376. written += sz;
  377. }
  378. #endif
  379. while (count > 0) {
  380. char *ptr;
  381. /*
  382. * Handle first page in case it's not aligned
  383. */
  384. if (-realp & (PAGE_SIZE - 1))
  385. sz = -realp & (PAGE_SIZE - 1);
  386. else
  387. sz = PAGE_SIZE;
  388. sz = min_t(unsigned long, sz, count);
  389. /*
  390. * On ia64 if a page has been mapped somewhere as
  391. * uncached, then it must also be accessed uncached
  392. * by the kernel or data corruption may occur
  393. */
  394. ptr = xlate_dev_kmem_ptr(p);
  395. copied = copy_from_user(ptr, buf, sz);
  396. if (copied) {
  397. written += sz - copied;
  398. if (written)
  399. break;
  400. return -EFAULT;
  401. }
  402. buf += sz;
  403. p += sz;
  404. realp += sz;
  405. count -= sz;
  406. written += sz;
  407. }
  408. *ppos += written;
  409. return written;
  410. }
  411. /*
  412. * This function writes to the *virtual* memory as seen by the kernel.
  413. */
  414. static ssize_t write_kmem(struct file * file, const char __user * buf,
  415. size_t count, loff_t *ppos)
  416. {
  417. unsigned long p = *ppos;
  418. ssize_t wrote = 0;
  419. ssize_t virtr = 0;
  420. ssize_t written;
  421. char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  422. if (p < (unsigned long) high_memory) {
  423. wrote = count;
  424. if (count > (unsigned long) high_memory - p)
  425. wrote = (unsigned long) high_memory - p;
  426. written = do_write_kmem((void*)p, p, buf, wrote, ppos);
  427. if (written != wrote)
  428. return written;
  429. wrote = written;
  430. p += wrote;
  431. buf += wrote;
  432. count -= wrote;
  433. }
  434. if (count > 0) {
  435. kbuf = (char *)__get_free_page(GFP_KERNEL);
  436. if (!kbuf)
  437. return wrote ? wrote : -ENOMEM;
  438. while (count > 0) {
  439. int len = count;
  440. if (len > PAGE_SIZE)
  441. len = PAGE_SIZE;
  442. if (len) {
  443. written = copy_from_user(kbuf, buf, len);
  444. if (written) {
  445. if (wrote + virtr)
  446. break;
  447. free_page((unsigned long)kbuf);
  448. return -EFAULT;
  449. }
  450. }
  451. len = vwrite(kbuf, (char *)p, len);
  452. count -= len;
  453. buf += len;
  454. virtr += len;
  455. p += len;
  456. }
  457. free_page((unsigned long)kbuf);
  458. }
  459. *ppos = p;
  460. return virtr + wrote;
  461. }
  462. #if defined(CONFIG_ISA) || !defined(__mc68000__)
  463. static ssize_t read_port(struct file * file, char __user * buf,
  464. size_t count, loff_t *ppos)
  465. {
  466. unsigned long i = *ppos;
  467. char __user *tmp = buf;
  468. if (!access_ok(VERIFY_WRITE, buf, count))
  469. return -EFAULT;
  470. while (count-- > 0 && i < 65536) {
  471. if (__put_user(inb(i),tmp) < 0)
  472. return -EFAULT;
  473. i++;
  474. tmp++;
  475. }
  476. *ppos = i;
  477. return tmp-buf;
  478. }
  479. static ssize_t write_port(struct file * file, const char __user * buf,
  480. size_t count, loff_t *ppos)
  481. {
  482. unsigned long i = *ppos;
  483. const char __user * tmp = buf;
  484. if (!access_ok(VERIFY_READ,buf,count))
  485. return -EFAULT;
  486. while (count-- > 0 && i < 65536) {
  487. char c;
  488. if (__get_user(c, tmp)) {
  489. if (tmp > buf)
  490. break;
  491. return -EFAULT;
  492. }
  493. outb(c,i);
  494. i++;
  495. tmp++;
  496. }
  497. *ppos = i;
  498. return tmp-buf;
  499. }
  500. #endif
  501. static ssize_t read_null(struct file * file, char __user * buf,
  502. size_t count, loff_t *ppos)
  503. {
  504. return 0;
  505. }
  506. static ssize_t write_null(struct file * file, const char __user * buf,
  507. size_t count, loff_t *ppos)
  508. {
  509. return count;
  510. }
  511. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  512. struct splice_desc *sd)
  513. {
  514. return sd->len;
  515. }
  516. static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
  517. loff_t *ppos, size_t len, unsigned int flags)
  518. {
  519. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  520. }
  521. #ifdef CONFIG_MMU
  522. /*
  523. * For fun, we are using the MMU for this.
  524. */
  525. static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
  526. {
  527. struct mm_struct *mm;
  528. struct vm_area_struct * vma;
  529. unsigned long addr=(unsigned long)buf;
  530. mm = current->mm;
  531. /* Oops, this was forgotten before. -ben */
  532. down_read(&mm->mmap_sem);
  533. /* For private mappings, just map in zero pages. */
  534. for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
  535. unsigned long count;
  536. if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
  537. goto out_up;
  538. if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
  539. break;
  540. count = vma->vm_end - addr;
  541. if (count > size)
  542. count = size;
  543. zap_page_range(vma, addr, count, NULL);
  544. zeromap_page_range(vma, addr, count, PAGE_COPY);
  545. size -= count;
  546. buf += count;
  547. addr += count;
  548. if (size == 0)
  549. goto out_up;
  550. }
  551. up_read(&mm->mmap_sem);
  552. /* The shared case is hard. Let's do the conventional zeroing. */
  553. do {
  554. unsigned long unwritten = clear_user(buf, PAGE_SIZE);
  555. if (unwritten)
  556. return size + unwritten - PAGE_SIZE;
  557. cond_resched();
  558. buf += PAGE_SIZE;
  559. size -= PAGE_SIZE;
  560. } while (size);
  561. return size;
  562. out_up:
  563. up_read(&mm->mmap_sem);
  564. return size;
  565. }
  566. static ssize_t read_zero(struct file * file, char __user * buf,
  567. size_t count, loff_t *ppos)
  568. {
  569. unsigned long left, unwritten, written = 0;
  570. if (!count)
  571. return 0;
  572. if (!access_ok(VERIFY_WRITE, buf, count))
  573. return -EFAULT;
  574. left = count;
  575. /* do we want to be clever? Arbitrary cut-off */
  576. if (count >= PAGE_SIZE*4) {
  577. unsigned long partial;
  578. /* How much left of the page? */
  579. partial = (PAGE_SIZE-1) & -(unsigned long) buf;
  580. unwritten = clear_user(buf, partial);
  581. written = partial - unwritten;
  582. if (unwritten)
  583. goto out;
  584. left -= partial;
  585. buf += partial;
  586. unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
  587. written += (left & PAGE_MASK) - unwritten;
  588. if (unwritten)
  589. goto out;
  590. buf += left & PAGE_MASK;
  591. left &= ~PAGE_MASK;
  592. }
  593. unwritten = clear_user(buf, left);
  594. written += left - unwritten;
  595. out:
  596. return written ? written : -EFAULT;
  597. }
  598. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  599. {
  600. if (vma->vm_flags & VM_SHARED)
  601. return shmem_zero_setup(vma);
  602. if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
  603. return -EAGAIN;
  604. return 0;
  605. }
  606. #else /* CONFIG_MMU */
  607. static ssize_t read_zero(struct file * file, char * buf,
  608. size_t count, loff_t *ppos)
  609. {
  610. size_t todo = count;
  611. while (todo) {
  612. size_t chunk = todo;
  613. if (chunk > 4096)
  614. chunk = 4096; /* Just for latency reasons */
  615. if (clear_user(buf, chunk))
  616. return -EFAULT;
  617. buf += chunk;
  618. todo -= chunk;
  619. cond_resched();
  620. }
  621. return count;
  622. }
  623. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  624. {
  625. return -ENOSYS;
  626. }
  627. #endif /* CONFIG_MMU */
  628. static ssize_t write_full(struct file * file, const char __user * buf,
  629. size_t count, loff_t *ppos)
  630. {
  631. return -ENOSPC;
  632. }
  633. /*
  634. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  635. * can fopen() both devices with "a" now. This was previously impossible.
  636. * -- SRB.
  637. */
  638. static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  639. {
  640. return file->f_pos = 0;
  641. }
  642. /*
  643. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  644. * check against negative addresses: they are ok. The return value is weird,
  645. * though, in that case (0).
  646. *
  647. * also note that seeking relative to the "end of file" isn't supported:
  648. * it has no meaning, so it returns -EINVAL.
  649. */
  650. static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
  651. {
  652. loff_t ret;
  653. mutex_lock(&file->f_dentry->d_inode->i_mutex);
  654. switch (orig) {
  655. case 0:
  656. file->f_pos = offset;
  657. ret = file->f_pos;
  658. force_successful_syscall_return();
  659. break;
  660. case 1:
  661. file->f_pos += offset;
  662. ret = file->f_pos;
  663. force_successful_syscall_return();
  664. break;
  665. default:
  666. ret = -EINVAL;
  667. }
  668. mutex_unlock(&file->f_dentry->d_inode->i_mutex);
  669. return ret;
  670. }
  671. static int open_port(struct inode * inode, struct file * filp)
  672. {
  673. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  674. }
  675. #define zero_lseek null_lseek
  676. #define full_lseek null_lseek
  677. #define write_zero write_null
  678. #define read_full read_zero
  679. #define open_mem open_port
  680. #define open_kmem open_mem
  681. #define open_oldmem open_mem
  682. static const struct file_operations mem_fops = {
  683. .llseek = memory_lseek,
  684. .read = read_mem,
  685. .write = write_mem,
  686. .mmap = mmap_mem,
  687. .open = open_mem,
  688. };
  689. static const struct file_operations kmem_fops = {
  690. .llseek = memory_lseek,
  691. .read = read_kmem,
  692. .write = write_kmem,
  693. .mmap = mmap_kmem,
  694. .open = open_kmem,
  695. };
  696. static const struct file_operations null_fops = {
  697. .llseek = null_lseek,
  698. .read = read_null,
  699. .write = write_null,
  700. .splice_write = splice_write_null,
  701. };
  702. #if defined(CONFIG_ISA) || !defined(__mc68000__)
  703. static const struct file_operations port_fops = {
  704. .llseek = memory_lseek,
  705. .read = read_port,
  706. .write = write_port,
  707. .open = open_port,
  708. };
  709. #endif
  710. static const struct file_operations zero_fops = {
  711. .llseek = zero_lseek,
  712. .read = read_zero,
  713. .write = write_zero,
  714. .mmap = mmap_zero,
  715. };
  716. static struct backing_dev_info zero_bdi = {
  717. .capabilities = BDI_CAP_MAP_COPY,
  718. };
  719. static const struct file_operations full_fops = {
  720. .llseek = full_lseek,
  721. .read = read_full,
  722. .write = write_full,
  723. };
  724. #ifdef CONFIG_CRASH_DUMP
  725. static const struct file_operations oldmem_fops = {
  726. .read = read_oldmem,
  727. .open = open_oldmem,
  728. };
  729. #endif
  730. static ssize_t kmsg_write(struct file * file, const char __user * buf,
  731. size_t count, loff_t *ppos)
  732. {
  733. char *tmp;
  734. ssize_t ret;
  735. tmp = kmalloc(count + 1, GFP_KERNEL);
  736. if (tmp == NULL)
  737. return -ENOMEM;
  738. ret = -EFAULT;
  739. if (!copy_from_user(tmp, buf, count)) {
  740. tmp[count] = 0;
  741. ret = printk("%s", tmp);
  742. if (ret > count)
  743. /* printk can add a prefix */
  744. ret = count;
  745. }
  746. kfree(tmp);
  747. return ret;
  748. }
  749. static const struct file_operations kmsg_fops = {
  750. .write = kmsg_write,
  751. };
  752. static int memory_open(struct inode * inode, struct file * filp)
  753. {
  754. switch (iminor(inode)) {
  755. case 1:
  756. filp->f_op = &mem_fops;
  757. break;
  758. case 2:
  759. filp->f_op = &kmem_fops;
  760. break;
  761. case 3:
  762. filp->f_op = &null_fops;
  763. break;
  764. #if defined(CONFIG_ISA) || !defined(__mc68000__)
  765. case 4:
  766. filp->f_op = &port_fops;
  767. break;
  768. #endif
  769. case 5:
  770. filp->f_mapping->backing_dev_info = &zero_bdi;
  771. filp->f_op = &zero_fops;
  772. break;
  773. case 7:
  774. filp->f_op = &full_fops;
  775. break;
  776. case 8:
  777. filp->f_op = &random_fops;
  778. break;
  779. case 9:
  780. filp->f_op = &urandom_fops;
  781. break;
  782. case 11:
  783. filp->f_op = &kmsg_fops;
  784. break;
  785. #ifdef CONFIG_CRASH_DUMP
  786. case 12:
  787. filp->f_op = &oldmem_fops;
  788. break;
  789. #endif
  790. default:
  791. return -ENXIO;
  792. }
  793. if (filp->f_op && filp->f_op->open)
  794. return filp->f_op->open(inode,filp);
  795. return 0;
  796. }
  797. static const struct file_operations memory_fops = {
  798. .open = memory_open, /* just a selector for the real open */
  799. };
  800. static const struct {
  801. unsigned int minor;
  802. char *name;
  803. umode_t mode;
  804. const struct file_operations *fops;
  805. } devlist[] = { /* list of minor devices */
  806. {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
  807. {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
  808. {3, "null", S_IRUGO | S_IWUGO, &null_fops},
  809. #if defined(CONFIG_ISA) || !defined(__mc68000__)
  810. {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
  811. #endif
  812. {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
  813. {7, "full", S_IRUGO | S_IWUGO, &full_fops},
  814. {8, "random", S_IRUGO | S_IWUSR, &random_fops},
  815. {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
  816. {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
  817. #ifdef CONFIG_CRASH_DUMP
  818. {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
  819. #endif
  820. };
  821. static struct class *mem_class;
  822. static int __init chr_dev_init(void)
  823. {
  824. int i;
  825. if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
  826. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  827. mem_class = class_create(THIS_MODULE, "mem");
  828. for (i = 0; i < ARRAY_SIZE(devlist); i++)
  829. class_device_create(mem_class, NULL,
  830. MKDEV(MEM_MAJOR, devlist[i].minor),
  831. NULL, devlist[i].name);
  832. return 0;
  833. }
  834. fs_initcall(chr_dev_init);