mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/config.h>
  11. #include <linux/mm.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/mman.h>
  16. #include <linux/random.h>
  17. #include <linux/init.h>
  18. #include <linux/raw.h>
  19. #include <linux/tty.h>
  20. #include <linux/capability.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/devfs_fs_kernel.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/device.h>
  25. #include <linux/highmem.h>
  26. #include <linux/crash_dump.h>
  27. #include <linux/backing-dev.h>
  28. #include <linux/bootmem.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #ifdef CONFIG_IA64
  32. # include <linux/efi.h>
  33. #endif
  34. /*
  35. * Architectures vary in how they handle caching for addresses
  36. * outside of main memory.
  37. *
  38. */
  39. static inline int uncached_access(struct file *file, unsigned long addr)
  40. {
  41. #if defined(__i386__)
  42. /*
  43. * On the PPro and successors, the MTRRs are used to set
  44. * memory types for physical addresses outside main memory,
  45. * so blindly setting PCD or PWT on those pages is wrong.
  46. * For Pentiums and earlier, the surround logic should disable
  47. * caching for the high addresses through the KEN pin, but
  48. * we maintain the tradition of paranoia in this code.
  49. */
  50. if (file->f_flags & O_SYNC)
  51. return 1;
  52. return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
  53. test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
  54. test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
  55. test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
  56. && addr >= __pa(high_memory);
  57. #elif defined(__x86_64__)
  58. /*
  59. * This is broken because it can generate memory type aliases,
  60. * which can cause cache corruptions
  61. * But it is only available for root and we have to be bug-to-bug
  62. * compatible with i386.
  63. */
  64. if (file->f_flags & O_SYNC)
  65. return 1;
  66. /* same behaviour as i386. PAT always set to cached and MTRRs control the
  67. caching behaviour.
  68. Hopefully a full PAT implementation will fix that soon. */
  69. return 0;
  70. #elif defined(CONFIG_IA64)
  71. /*
  72. * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
  73. */
  74. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  75. #else
  76. /*
  77. * Accessing memory above the top the kernel knows about or through a file pointer
  78. * that was marked O_SYNC will be done non-cached.
  79. */
  80. if (file->f_flags & O_SYNC)
  81. return 1;
  82. return addr >= __pa(high_memory);
  83. #endif
  84. }
  85. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  86. static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
  87. {
  88. unsigned long end_mem;
  89. end_mem = __pa(high_memory);
  90. if (addr >= end_mem)
  91. return 0;
  92. if (*count > end_mem - addr)
  93. *count = end_mem - addr;
  94. return 1;
  95. }
  96. #endif
  97. /*
  98. * This funcion reads the *physical* memory. The f_pos points directly to the
  99. * memory location.
  100. */
  101. static ssize_t read_mem(struct file * file, char __user * buf,
  102. size_t count, loff_t *ppos)
  103. {
  104. unsigned long p = *ppos;
  105. ssize_t read, sz;
  106. char *ptr;
  107. if (!valid_phys_addr_range(p, &count))
  108. return -EFAULT;
  109. read = 0;
  110. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  111. /* we don't have page 0 mapped on sparc and m68k.. */
  112. if (p < PAGE_SIZE) {
  113. sz = PAGE_SIZE - p;
  114. if (sz > count)
  115. sz = count;
  116. if (sz > 0) {
  117. if (clear_user(buf, sz))
  118. return -EFAULT;
  119. buf += sz;
  120. p += sz;
  121. count -= sz;
  122. read += sz;
  123. }
  124. }
  125. #endif
  126. while (count > 0) {
  127. /*
  128. * Handle first page in case it's not aligned
  129. */
  130. if (-p & (PAGE_SIZE - 1))
  131. sz = -p & (PAGE_SIZE - 1);
  132. else
  133. sz = PAGE_SIZE;
  134. sz = min_t(unsigned long, sz, count);
  135. /*
  136. * On ia64 if a page has been mapped somewhere as
  137. * uncached, then it must also be accessed uncached
  138. * by the kernel or data corruption may occur
  139. */
  140. ptr = xlate_dev_mem_ptr(p);
  141. if (copy_to_user(buf, ptr, sz))
  142. return -EFAULT;
  143. buf += sz;
  144. p += sz;
  145. count -= sz;
  146. read += sz;
  147. }
  148. *ppos += read;
  149. return read;
  150. }
  151. static ssize_t write_mem(struct file * file, const char __user * buf,
  152. size_t count, loff_t *ppos)
  153. {
  154. unsigned long p = *ppos;
  155. ssize_t written, sz;
  156. unsigned long copied;
  157. void *ptr;
  158. if (!valid_phys_addr_range(p, &count))
  159. return -EFAULT;
  160. written = 0;
  161. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  162. /* we don't have page 0 mapped on sparc and m68k.. */
  163. if (p < PAGE_SIZE) {
  164. unsigned long sz = PAGE_SIZE - p;
  165. if (sz > count)
  166. sz = count;
  167. /* Hmm. Do something? */
  168. buf += sz;
  169. p += sz;
  170. count -= sz;
  171. written += sz;
  172. }
  173. #endif
  174. while (count > 0) {
  175. /*
  176. * Handle first page in case it's not aligned
  177. */
  178. if (-p & (PAGE_SIZE - 1))
  179. sz = -p & (PAGE_SIZE - 1);
  180. else
  181. sz = PAGE_SIZE;
  182. sz = min_t(unsigned long, sz, count);
  183. /*
  184. * On ia64 if a page has been mapped somewhere as
  185. * uncached, then it must also be accessed uncached
  186. * by the kernel or data corruption may occur
  187. */
  188. ptr = xlate_dev_mem_ptr(p);
  189. copied = copy_from_user(ptr, buf, sz);
  190. if (copied) {
  191. ssize_t ret;
  192. ret = written + (sz - copied);
  193. if (ret)
  194. return ret;
  195. return -EFAULT;
  196. }
  197. buf += sz;
  198. p += sz;
  199. count -= sz;
  200. written += sz;
  201. }
  202. *ppos += written;
  203. return written;
  204. }
  205. static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  206. {
  207. #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
  208. unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  209. vma->vm_page_prot = phys_mem_access_prot(file, offset,
  210. vma->vm_end - vma->vm_start,
  211. vma->vm_page_prot);
  212. #elif defined(pgprot_noncached)
  213. unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  214. int uncached;
  215. uncached = uncached_access(file, offset);
  216. if (uncached)
  217. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  218. #endif
  219. /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
  220. if (remap_pfn_range(vma,
  221. vma->vm_start,
  222. vma->vm_pgoff,
  223. vma->vm_end-vma->vm_start,
  224. vma->vm_page_prot))
  225. return -EAGAIN;
  226. return 0;
  227. }
  228. static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
  229. {
  230. unsigned long pfn;
  231. /* Turn a kernel-virtual address into a physical page frame */
  232. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  233. /*
  234. * RED-PEN: on some architectures there is more mapped memory
  235. * than available in mem_map which pfn_valid checks
  236. * for. Perhaps should add a new macro here.
  237. *
  238. * RED-PEN: vmalloc is not supported right now.
  239. */
  240. if (!pfn_valid(pfn))
  241. return -EIO;
  242. vma->vm_pgoff = pfn;
  243. return mmap_mem(file, vma);
  244. }
  245. #ifdef CONFIG_CRASH_DUMP
  246. /*
  247. * Read memory corresponding to the old kernel.
  248. */
  249. static ssize_t read_oldmem(struct file *file, char __user *buf,
  250. size_t count, loff_t *ppos)
  251. {
  252. unsigned long pfn, offset;
  253. size_t read = 0, csize;
  254. int rc = 0;
  255. while (count) {
  256. pfn = *ppos / PAGE_SIZE;
  257. if (pfn > saved_max_pfn)
  258. return read;
  259. offset = (unsigned long)(*ppos % PAGE_SIZE);
  260. if (count > PAGE_SIZE - offset)
  261. csize = PAGE_SIZE - offset;
  262. else
  263. csize = count;
  264. rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
  265. if (rc < 0)
  266. return rc;
  267. buf += csize;
  268. *ppos += csize;
  269. read += csize;
  270. count -= csize;
  271. }
  272. return read;
  273. }
  274. #endif
  275. extern long vread(char *buf, char *addr, unsigned long count);
  276. extern long vwrite(char *buf, char *addr, unsigned long count);
  277. /*
  278. * This function reads the *virtual* memory as seen by the kernel.
  279. */
  280. static ssize_t read_kmem(struct file *file, char __user *buf,
  281. size_t count, loff_t *ppos)
  282. {
  283. unsigned long p = *ppos;
  284. ssize_t low_count, read, sz;
  285. char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  286. read = 0;
  287. if (p < (unsigned long) high_memory) {
  288. low_count = count;
  289. if (count > (unsigned long) high_memory - p)
  290. low_count = (unsigned long) high_memory - p;
  291. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  292. /* we don't have page 0 mapped on sparc and m68k.. */
  293. if (p < PAGE_SIZE && low_count > 0) {
  294. size_t tmp = PAGE_SIZE - p;
  295. if (tmp > low_count) tmp = low_count;
  296. if (clear_user(buf, tmp))
  297. return -EFAULT;
  298. buf += tmp;
  299. p += tmp;
  300. read += tmp;
  301. low_count -= tmp;
  302. count -= tmp;
  303. }
  304. #endif
  305. while (low_count > 0) {
  306. /*
  307. * Handle first page in case it's not aligned
  308. */
  309. if (-p & (PAGE_SIZE - 1))
  310. sz = -p & (PAGE_SIZE - 1);
  311. else
  312. sz = PAGE_SIZE;
  313. sz = min_t(unsigned long, sz, low_count);
  314. /*
  315. * On ia64 if a page has been mapped somewhere as
  316. * uncached, then it must also be accessed uncached
  317. * by the kernel or data corruption may occur
  318. */
  319. kbuf = xlate_dev_kmem_ptr((char *)p);
  320. if (copy_to_user(buf, kbuf, sz))
  321. return -EFAULT;
  322. buf += sz;
  323. p += sz;
  324. read += sz;
  325. low_count -= sz;
  326. count -= sz;
  327. }
  328. }
  329. if (count > 0) {
  330. kbuf = (char *)__get_free_page(GFP_KERNEL);
  331. if (!kbuf)
  332. return -ENOMEM;
  333. while (count > 0) {
  334. int len = count;
  335. if (len > PAGE_SIZE)
  336. len = PAGE_SIZE;
  337. len = vread(kbuf, (char *)p, len);
  338. if (!len)
  339. break;
  340. if (copy_to_user(buf, kbuf, len)) {
  341. free_page((unsigned long)kbuf);
  342. return -EFAULT;
  343. }
  344. count -= len;
  345. buf += len;
  346. read += len;
  347. p += len;
  348. }
  349. free_page((unsigned long)kbuf);
  350. }
  351. *ppos = p;
  352. return read;
  353. }
  354. static inline ssize_t
  355. do_write_kmem(void *p, unsigned long realp, const char __user * buf,
  356. size_t count, loff_t *ppos)
  357. {
  358. ssize_t written, sz;
  359. unsigned long copied;
  360. written = 0;
  361. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  362. /* we don't have page 0 mapped on sparc and m68k.. */
  363. if (realp < PAGE_SIZE) {
  364. unsigned long sz = PAGE_SIZE - realp;
  365. if (sz > count)
  366. sz = count;
  367. /* Hmm. Do something? */
  368. buf += sz;
  369. p += sz;
  370. realp += sz;
  371. count -= sz;
  372. written += sz;
  373. }
  374. #endif
  375. while (count > 0) {
  376. char *ptr;
  377. /*
  378. * Handle first page in case it's not aligned
  379. */
  380. if (-realp & (PAGE_SIZE - 1))
  381. sz = -realp & (PAGE_SIZE - 1);
  382. else
  383. sz = PAGE_SIZE;
  384. sz = min_t(unsigned long, sz, count);
  385. /*
  386. * On ia64 if a page has been mapped somewhere as
  387. * uncached, then it must also be accessed uncached
  388. * by the kernel or data corruption may occur
  389. */
  390. ptr = xlate_dev_kmem_ptr(p);
  391. copied = copy_from_user(ptr, buf, sz);
  392. if (copied) {
  393. ssize_t ret;
  394. ret = written + (sz - copied);
  395. if (ret)
  396. return ret;
  397. return -EFAULT;
  398. }
  399. buf += sz;
  400. p += sz;
  401. realp += sz;
  402. count -= sz;
  403. written += sz;
  404. }
  405. *ppos += written;
  406. return written;
  407. }
  408. /*
  409. * This function writes to the *virtual* memory as seen by the kernel.
  410. */
  411. static ssize_t write_kmem(struct file * file, const char __user * buf,
  412. size_t count, loff_t *ppos)
  413. {
  414. unsigned long p = *ppos;
  415. ssize_t wrote = 0;
  416. ssize_t virtr = 0;
  417. ssize_t written;
  418. char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  419. if (p < (unsigned long) high_memory) {
  420. wrote = count;
  421. if (count > (unsigned long) high_memory - p)
  422. wrote = (unsigned long) high_memory - p;
  423. written = do_write_kmem((void*)p, p, buf, wrote, ppos);
  424. if (written != wrote)
  425. return written;
  426. wrote = written;
  427. p += wrote;
  428. buf += wrote;
  429. count -= wrote;
  430. }
  431. if (count > 0) {
  432. kbuf = (char *)__get_free_page(GFP_KERNEL);
  433. if (!kbuf)
  434. return wrote ? wrote : -ENOMEM;
  435. while (count > 0) {
  436. int len = count;
  437. if (len > PAGE_SIZE)
  438. len = PAGE_SIZE;
  439. if (len) {
  440. written = copy_from_user(kbuf, buf, len);
  441. if (written) {
  442. ssize_t ret;
  443. free_page((unsigned long)kbuf);
  444. ret = wrote + virtr + (len - written);
  445. return ret ? ret : -EFAULT;
  446. }
  447. }
  448. len = vwrite(kbuf, (char *)p, len);
  449. count -= len;
  450. buf += len;
  451. virtr += len;
  452. p += len;
  453. }
  454. free_page((unsigned long)kbuf);
  455. }
  456. *ppos = p;
  457. return virtr + wrote;
  458. }
  459. #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
  460. static ssize_t read_port(struct file * file, char __user * buf,
  461. size_t count, loff_t *ppos)
  462. {
  463. unsigned long i = *ppos;
  464. char __user *tmp = buf;
  465. if (!access_ok(VERIFY_WRITE, buf, count))
  466. return -EFAULT;
  467. while (count-- > 0 && i < 65536) {
  468. if (__put_user(inb(i),tmp) < 0)
  469. return -EFAULT;
  470. i++;
  471. tmp++;
  472. }
  473. *ppos = i;
  474. return tmp-buf;
  475. }
  476. static ssize_t write_port(struct file * file, const char __user * buf,
  477. size_t count, loff_t *ppos)
  478. {
  479. unsigned long i = *ppos;
  480. const char __user * tmp = buf;
  481. if (!access_ok(VERIFY_READ,buf,count))
  482. return -EFAULT;
  483. while (count-- > 0 && i < 65536) {
  484. char c;
  485. if (__get_user(c, tmp))
  486. return -EFAULT;
  487. outb(c,i);
  488. i++;
  489. tmp++;
  490. }
  491. *ppos = i;
  492. return tmp-buf;
  493. }
  494. #endif
  495. static ssize_t read_null(struct file * file, char __user * buf,
  496. size_t count, loff_t *ppos)
  497. {
  498. return 0;
  499. }
  500. static ssize_t write_null(struct file * file, const char __user * buf,
  501. size_t count, loff_t *ppos)
  502. {
  503. return count;
  504. }
  505. #ifdef CONFIG_MMU
  506. /*
  507. * For fun, we are using the MMU for this.
  508. */
  509. static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
  510. {
  511. struct mm_struct *mm;
  512. struct vm_area_struct * vma;
  513. unsigned long addr=(unsigned long)buf;
  514. mm = current->mm;
  515. /* Oops, this was forgotten before. -ben */
  516. down_read(&mm->mmap_sem);
  517. /* For private mappings, just map in zero pages. */
  518. for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
  519. unsigned long count;
  520. if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
  521. goto out_up;
  522. if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
  523. break;
  524. count = vma->vm_end - addr;
  525. if (count > size)
  526. count = size;
  527. zap_page_range(vma, addr, count, NULL);
  528. zeromap_page_range(vma, addr, count, PAGE_COPY);
  529. size -= count;
  530. buf += count;
  531. addr += count;
  532. if (size == 0)
  533. goto out_up;
  534. }
  535. up_read(&mm->mmap_sem);
  536. /* The shared case is hard. Let's do the conventional zeroing. */
  537. do {
  538. unsigned long unwritten = clear_user(buf, PAGE_SIZE);
  539. if (unwritten)
  540. return size + unwritten - PAGE_SIZE;
  541. cond_resched();
  542. buf += PAGE_SIZE;
  543. size -= PAGE_SIZE;
  544. } while (size);
  545. return size;
  546. out_up:
  547. up_read(&mm->mmap_sem);
  548. return size;
  549. }
  550. static ssize_t read_zero(struct file * file, char __user * buf,
  551. size_t count, loff_t *ppos)
  552. {
  553. unsigned long left, unwritten, written = 0;
  554. if (!count)
  555. return 0;
  556. if (!access_ok(VERIFY_WRITE, buf, count))
  557. return -EFAULT;
  558. left = count;
  559. /* do we want to be clever? Arbitrary cut-off */
  560. if (count >= PAGE_SIZE*4) {
  561. unsigned long partial;
  562. /* How much left of the page? */
  563. partial = (PAGE_SIZE-1) & -(unsigned long) buf;
  564. unwritten = clear_user(buf, partial);
  565. written = partial - unwritten;
  566. if (unwritten)
  567. goto out;
  568. left -= partial;
  569. buf += partial;
  570. unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
  571. written += (left & PAGE_MASK) - unwritten;
  572. if (unwritten)
  573. goto out;
  574. buf += left & PAGE_MASK;
  575. left &= ~PAGE_MASK;
  576. }
  577. unwritten = clear_user(buf, left);
  578. written += left - unwritten;
  579. out:
  580. return written ? written : -EFAULT;
  581. }
  582. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  583. {
  584. if (vma->vm_flags & VM_SHARED)
  585. return shmem_zero_setup(vma);
  586. if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
  587. return -EAGAIN;
  588. return 0;
  589. }
  590. #else /* CONFIG_MMU */
  591. static ssize_t read_zero(struct file * file, char * buf,
  592. size_t count, loff_t *ppos)
  593. {
  594. size_t todo = count;
  595. while (todo) {
  596. size_t chunk = todo;
  597. if (chunk > 4096)
  598. chunk = 4096; /* Just for latency reasons */
  599. if (clear_user(buf, chunk))
  600. return -EFAULT;
  601. buf += chunk;
  602. todo -= chunk;
  603. cond_resched();
  604. }
  605. return count;
  606. }
  607. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  608. {
  609. return -ENOSYS;
  610. }
  611. #endif /* CONFIG_MMU */
  612. static ssize_t write_full(struct file * file, const char __user * buf,
  613. size_t count, loff_t *ppos)
  614. {
  615. return -ENOSPC;
  616. }
  617. /*
  618. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  619. * can fopen() both devices with "a" now. This was previously impossible.
  620. * -- SRB.
  621. */
  622. static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  623. {
  624. return file->f_pos = 0;
  625. }
  626. /*
  627. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  628. * check against negative addresses: they are ok. The return value is weird,
  629. * though, in that case (0).
  630. *
  631. * also note that seeking relative to the "end of file" isn't supported:
  632. * it has no meaning, so it returns -EINVAL.
  633. */
  634. static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
  635. {
  636. loff_t ret;
  637. down(&file->f_dentry->d_inode->i_sem);
  638. switch (orig) {
  639. case 0:
  640. file->f_pos = offset;
  641. ret = file->f_pos;
  642. force_successful_syscall_return();
  643. break;
  644. case 1:
  645. file->f_pos += offset;
  646. ret = file->f_pos;
  647. force_successful_syscall_return();
  648. break;
  649. default:
  650. ret = -EINVAL;
  651. }
  652. up(&file->f_dentry->d_inode->i_sem);
  653. return ret;
  654. }
  655. static int open_port(struct inode * inode, struct file * filp)
  656. {
  657. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  658. }
  659. #define zero_lseek null_lseek
  660. #define full_lseek null_lseek
  661. #define write_zero write_null
  662. #define read_full read_zero
  663. #define open_mem open_port
  664. #define open_kmem open_mem
  665. #define open_oldmem open_mem
  666. static struct file_operations mem_fops = {
  667. .llseek = memory_lseek,
  668. .read = read_mem,
  669. .write = write_mem,
  670. .mmap = mmap_mem,
  671. .open = open_mem,
  672. };
  673. static struct file_operations kmem_fops = {
  674. .llseek = memory_lseek,
  675. .read = read_kmem,
  676. .write = write_kmem,
  677. .mmap = mmap_kmem,
  678. .open = open_kmem,
  679. };
  680. static struct file_operations null_fops = {
  681. .llseek = null_lseek,
  682. .read = read_null,
  683. .write = write_null,
  684. };
  685. #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
  686. static struct file_operations port_fops = {
  687. .llseek = memory_lseek,
  688. .read = read_port,
  689. .write = write_port,
  690. .open = open_port,
  691. };
  692. #endif
  693. static struct file_operations zero_fops = {
  694. .llseek = zero_lseek,
  695. .read = read_zero,
  696. .write = write_zero,
  697. .mmap = mmap_zero,
  698. };
  699. static struct backing_dev_info zero_bdi = {
  700. .capabilities = BDI_CAP_MAP_COPY,
  701. };
  702. static struct file_operations full_fops = {
  703. .llseek = full_lseek,
  704. .read = read_full,
  705. .write = write_full,
  706. };
  707. #ifdef CONFIG_CRASH_DUMP
  708. static struct file_operations oldmem_fops = {
  709. .read = read_oldmem,
  710. .open = open_oldmem,
  711. };
  712. #endif
  713. static ssize_t kmsg_write(struct file * file, const char __user * buf,
  714. size_t count, loff_t *ppos)
  715. {
  716. char *tmp;
  717. int ret;
  718. tmp = kmalloc(count + 1, GFP_KERNEL);
  719. if (tmp == NULL)
  720. return -ENOMEM;
  721. ret = -EFAULT;
  722. if (!copy_from_user(tmp, buf, count)) {
  723. tmp[count] = 0;
  724. ret = printk("%s", tmp);
  725. }
  726. kfree(tmp);
  727. return ret;
  728. }
  729. static struct file_operations kmsg_fops = {
  730. .write = kmsg_write,
  731. };
  732. static int memory_open(struct inode * inode, struct file * filp)
  733. {
  734. switch (iminor(inode)) {
  735. case 1:
  736. filp->f_op = &mem_fops;
  737. break;
  738. case 2:
  739. filp->f_op = &kmem_fops;
  740. break;
  741. case 3:
  742. filp->f_op = &null_fops;
  743. break;
  744. #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
  745. case 4:
  746. filp->f_op = &port_fops;
  747. break;
  748. #endif
  749. case 5:
  750. filp->f_mapping->backing_dev_info = &zero_bdi;
  751. filp->f_op = &zero_fops;
  752. break;
  753. case 7:
  754. filp->f_op = &full_fops;
  755. break;
  756. case 8:
  757. filp->f_op = &random_fops;
  758. break;
  759. case 9:
  760. filp->f_op = &urandom_fops;
  761. break;
  762. case 11:
  763. filp->f_op = &kmsg_fops;
  764. break;
  765. #ifdef CONFIG_CRASH_DUMP
  766. case 12:
  767. filp->f_op = &oldmem_fops;
  768. break;
  769. #endif
  770. default:
  771. return -ENXIO;
  772. }
  773. if (filp->f_op && filp->f_op->open)
  774. return filp->f_op->open(inode,filp);
  775. return 0;
  776. }
  777. static struct file_operations memory_fops = {
  778. .open = memory_open, /* just a selector for the real open */
  779. };
  780. static const struct {
  781. unsigned int minor;
  782. char *name;
  783. umode_t mode;
  784. struct file_operations *fops;
  785. } devlist[] = { /* list of minor devices */
  786. {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
  787. {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
  788. {3, "null", S_IRUGO | S_IWUGO, &null_fops},
  789. #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
  790. {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
  791. #endif
  792. {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
  793. {7, "full", S_IRUGO | S_IWUGO, &full_fops},
  794. {8, "random", S_IRUGO | S_IWUSR, &random_fops},
  795. {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
  796. {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
  797. #ifdef CONFIG_CRASH_DUMP
  798. {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
  799. #endif
  800. };
  801. static struct class *mem_class;
  802. static int __init chr_dev_init(void)
  803. {
  804. int i;
  805. if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
  806. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  807. mem_class = class_create(THIS_MODULE, "mem");
  808. for (i = 0; i < ARRAY_SIZE(devlist); i++) {
  809. class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor),
  810. NULL, devlist[i].name);
  811. devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
  812. S_IFCHR | devlist[i].mode, devlist[i].name);
  813. }
  814. return 0;
  815. }
  816. fs_initcall(chr_dev_init);