mem.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/splice.h>
  27. #include <linux/pfn.h>
  28. #include <linux/smp_lock.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #ifdef CONFIG_IA64
  32. # include <linux/efi.h>
  33. #endif
  34. /*
  35. * Architectures vary in how they handle caching for addresses
  36. * outside of main memory.
  37. *
  38. */
  39. static inline int uncached_access(struct file *file, unsigned long addr)
  40. {
  41. #if defined(CONFIG_IA64)
  42. /*
  43. * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
  44. */
  45. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  46. #elif defined(CONFIG_MIPS)
  47. {
  48. extern int __uncached_access(struct file *file,
  49. unsigned long addr);
  50. return __uncached_access(file, addr);
  51. }
  52. #else
  53. /*
  54. * Accessing memory above the top the kernel knows about or through a file pointer
  55. * that was marked O_SYNC will be done non-cached.
  56. */
  57. if (file->f_flags & O_SYNC)
  58. return 1;
  59. return addr >= __pa(high_memory);
  60. #endif
  61. }
  62. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  63. static inline int valid_phys_addr_range(unsigned long addr, size_t count)
  64. {
  65. if (addr + count > __pa(high_memory))
  66. return 0;
  67. return 1;
  68. }
  69. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  70. {
  71. return 1;
  72. }
  73. #endif
  74. #ifdef CONFIG_STRICT_DEVMEM
  75. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  76. {
  77. u64 from = ((u64)pfn) << PAGE_SHIFT;
  78. u64 to = from + size;
  79. u64 cursor = from;
  80. while (cursor < to) {
  81. if (!devmem_is_allowed(pfn)) {
  82. printk(KERN_INFO
  83. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  84. current->comm, from, to);
  85. return 0;
  86. }
  87. cursor += PAGE_SIZE;
  88. pfn++;
  89. }
  90. return 1;
  91. }
  92. #else
  93. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  94. {
  95. return 1;
  96. }
  97. #endif
  98. void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  99. {
  100. }
  101. /*
  102. * This funcion reads the *physical* memory. The f_pos points directly to the
  103. * memory location.
  104. */
  105. static ssize_t read_mem(struct file * file, char __user * buf,
  106. size_t count, loff_t *ppos)
  107. {
  108. unsigned long p = *ppos;
  109. ssize_t read, sz;
  110. char *ptr;
  111. if (!valid_phys_addr_range(p, count))
  112. return -EFAULT;
  113. read = 0;
  114. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  115. /* we don't have page 0 mapped on sparc and m68k.. */
  116. if (p < PAGE_SIZE) {
  117. sz = PAGE_SIZE - p;
  118. if (sz > count)
  119. sz = count;
  120. if (sz > 0) {
  121. if (clear_user(buf, sz))
  122. return -EFAULT;
  123. buf += sz;
  124. p += sz;
  125. count -= sz;
  126. read += sz;
  127. }
  128. }
  129. #endif
  130. while (count > 0) {
  131. /*
  132. * Handle first page in case it's not aligned
  133. */
  134. if (-p & (PAGE_SIZE - 1))
  135. sz = -p & (PAGE_SIZE - 1);
  136. else
  137. sz = PAGE_SIZE;
  138. sz = min_t(unsigned long, sz, count);
  139. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  140. return -EPERM;
  141. /*
  142. * On ia64 if a page has been mapped somewhere as
  143. * uncached, then it must also be accessed uncached
  144. * by the kernel or data corruption may occur
  145. */
  146. ptr = xlate_dev_mem_ptr(p);
  147. if (!ptr)
  148. return -EFAULT;
  149. if (copy_to_user(buf, ptr, sz)) {
  150. unxlate_dev_mem_ptr(p, ptr);
  151. return -EFAULT;
  152. }
  153. unxlate_dev_mem_ptr(p, ptr);
  154. buf += sz;
  155. p += sz;
  156. count -= sz;
  157. read += sz;
  158. }
  159. *ppos += read;
  160. return read;
  161. }
  162. static ssize_t write_mem(struct file * file, const char __user * buf,
  163. size_t count, loff_t *ppos)
  164. {
  165. unsigned long p = *ppos;
  166. ssize_t written, sz;
  167. unsigned long copied;
  168. void *ptr;
  169. if (!valid_phys_addr_range(p, count))
  170. return -EFAULT;
  171. written = 0;
  172. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  173. /* we don't have page 0 mapped on sparc and m68k.. */
  174. if (p < PAGE_SIZE) {
  175. unsigned long sz = PAGE_SIZE - p;
  176. if (sz > count)
  177. sz = count;
  178. /* Hmm. Do something? */
  179. buf += sz;
  180. p += sz;
  181. count -= sz;
  182. written += sz;
  183. }
  184. #endif
  185. while (count > 0) {
  186. /*
  187. * Handle first page in case it's not aligned
  188. */
  189. if (-p & (PAGE_SIZE - 1))
  190. sz = -p & (PAGE_SIZE - 1);
  191. else
  192. sz = PAGE_SIZE;
  193. sz = min_t(unsigned long, sz, count);
  194. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  195. return -EPERM;
  196. /*
  197. * On ia64 if a page has been mapped somewhere as
  198. * uncached, then it must also be accessed uncached
  199. * by the kernel or data corruption may occur
  200. */
  201. ptr = xlate_dev_mem_ptr(p);
  202. if (!ptr) {
  203. if (written)
  204. break;
  205. return -EFAULT;
  206. }
  207. copied = copy_from_user(ptr, buf, sz);
  208. if (copied) {
  209. written += sz - copied;
  210. unxlate_dev_mem_ptr(p, ptr);
  211. if (written)
  212. break;
  213. return -EFAULT;
  214. }
  215. unxlate_dev_mem_ptr(p, ptr);
  216. buf += sz;
  217. p += sz;
  218. count -= sz;
  219. written += sz;
  220. }
  221. *ppos += written;
  222. return written;
  223. }
  224. int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
  225. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  226. {
  227. return 1;
  228. }
  229. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  230. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  231. unsigned long size, pgprot_t vma_prot)
  232. {
  233. #ifdef pgprot_noncached
  234. unsigned long offset = pfn << PAGE_SHIFT;
  235. if (uncached_access(file, offset))
  236. return pgprot_noncached(vma_prot);
  237. #endif
  238. return vma_prot;
  239. }
  240. #endif
  241. #ifndef CONFIG_MMU
  242. static unsigned long get_unmapped_area_mem(struct file *file,
  243. unsigned long addr,
  244. unsigned long len,
  245. unsigned long pgoff,
  246. unsigned long flags)
  247. {
  248. if (!valid_mmap_phys_addr_range(pgoff, len))
  249. return (unsigned long) -EINVAL;
  250. return pgoff << PAGE_SHIFT;
  251. }
  252. /* can't do an in-place private mapping if there's no MMU */
  253. static inline int private_mapping_ok(struct vm_area_struct *vma)
  254. {
  255. return vma->vm_flags & VM_MAYSHARE;
  256. }
  257. #else
  258. #define get_unmapped_area_mem NULL
  259. static inline int private_mapping_ok(struct vm_area_struct *vma)
  260. {
  261. return 1;
  262. }
  263. #endif
  264. static struct vm_operations_struct mmap_mem_ops = {
  265. #ifdef CONFIG_HAVE_IOREMAP_PROT
  266. .access = generic_access_phys
  267. #endif
  268. };
  269. static int mmap_mem(struct file * file, struct vm_area_struct * vma)
  270. {
  271. size_t size = vma->vm_end - vma->vm_start;
  272. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  273. return -EINVAL;
  274. if (!private_mapping_ok(vma))
  275. return -ENOSYS;
  276. if (!range_is_allowed(vma->vm_pgoff, size))
  277. return -EPERM;
  278. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  279. &vma->vm_page_prot))
  280. return -EINVAL;
  281. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  282. size,
  283. vma->vm_page_prot);
  284. vma->vm_ops = &mmap_mem_ops;
  285. /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
  286. if (remap_pfn_range(vma,
  287. vma->vm_start,
  288. vma->vm_pgoff,
  289. size,
  290. vma->vm_page_prot)) {
  291. return -EAGAIN;
  292. }
  293. return 0;
  294. }
  295. #ifdef CONFIG_DEVKMEM
  296. static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
  297. {
  298. unsigned long pfn;
  299. /* Turn a kernel-virtual address into a physical page frame */
  300. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  301. /*
  302. * RED-PEN: on some architectures there is more mapped memory
  303. * than available in mem_map which pfn_valid checks
  304. * for. Perhaps should add a new macro here.
  305. *
  306. * RED-PEN: vmalloc is not supported right now.
  307. */
  308. if (!pfn_valid(pfn))
  309. return -EIO;
  310. vma->vm_pgoff = pfn;
  311. return mmap_mem(file, vma);
  312. }
  313. #endif
  314. #ifdef CONFIG_CRASH_DUMP
  315. /*
  316. * Read memory corresponding to the old kernel.
  317. */
  318. static ssize_t read_oldmem(struct file *file, char __user *buf,
  319. size_t count, loff_t *ppos)
  320. {
  321. unsigned long pfn, offset;
  322. size_t read = 0, csize;
  323. int rc = 0;
  324. while (count) {
  325. pfn = *ppos / PAGE_SIZE;
  326. if (pfn > saved_max_pfn)
  327. return read;
  328. offset = (unsigned long)(*ppos % PAGE_SIZE);
  329. if (count > PAGE_SIZE - offset)
  330. csize = PAGE_SIZE - offset;
  331. else
  332. csize = count;
  333. rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
  334. if (rc < 0)
  335. return rc;
  336. buf += csize;
  337. *ppos += csize;
  338. read += csize;
  339. count -= csize;
  340. }
  341. return read;
  342. }
  343. #endif
  344. #ifdef CONFIG_DEVKMEM
  345. /*
  346. * This function reads the *virtual* memory as seen by the kernel.
  347. */
  348. static ssize_t read_kmem(struct file *file, char __user *buf,
  349. size_t count, loff_t *ppos)
  350. {
  351. unsigned long p = *ppos;
  352. ssize_t low_count, read, sz;
  353. char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  354. read = 0;
  355. if (p < (unsigned long) high_memory) {
  356. low_count = count;
  357. if (count > (unsigned long) high_memory - p)
  358. low_count = (unsigned long) high_memory - p;
  359. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  360. /* we don't have page 0 mapped on sparc and m68k.. */
  361. if (p < PAGE_SIZE && low_count > 0) {
  362. size_t tmp = PAGE_SIZE - p;
  363. if (tmp > low_count) tmp = low_count;
  364. if (clear_user(buf, tmp))
  365. return -EFAULT;
  366. buf += tmp;
  367. p += tmp;
  368. read += tmp;
  369. low_count -= tmp;
  370. count -= tmp;
  371. }
  372. #endif
  373. while (low_count > 0) {
  374. /*
  375. * Handle first page in case it's not aligned
  376. */
  377. if (-p & (PAGE_SIZE - 1))
  378. sz = -p & (PAGE_SIZE - 1);
  379. else
  380. sz = PAGE_SIZE;
  381. sz = min_t(unsigned long, sz, low_count);
  382. /*
  383. * On ia64 if a page has been mapped somewhere as
  384. * uncached, then it must also be accessed uncached
  385. * by the kernel or data corruption may occur
  386. */
  387. kbuf = xlate_dev_kmem_ptr((char *)p);
  388. if (copy_to_user(buf, kbuf, sz))
  389. return -EFAULT;
  390. buf += sz;
  391. p += sz;
  392. read += sz;
  393. low_count -= sz;
  394. count -= sz;
  395. }
  396. }
  397. if (count > 0) {
  398. kbuf = (char *)__get_free_page(GFP_KERNEL);
  399. if (!kbuf)
  400. return -ENOMEM;
  401. while (count > 0) {
  402. int len = count;
  403. if (len > PAGE_SIZE)
  404. len = PAGE_SIZE;
  405. len = vread(kbuf, (char *)p, len);
  406. if (!len)
  407. break;
  408. if (copy_to_user(buf, kbuf, len)) {
  409. free_page((unsigned long)kbuf);
  410. return -EFAULT;
  411. }
  412. count -= len;
  413. buf += len;
  414. read += len;
  415. p += len;
  416. }
  417. free_page((unsigned long)kbuf);
  418. }
  419. *ppos = p;
  420. return read;
  421. }
  422. static inline ssize_t
  423. do_write_kmem(void *p, unsigned long realp, const char __user * buf,
  424. size_t count, loff_t *ppos)
  425. {
  426. ssize_t written, sz;
  427. unsigned long copied;
  428. written = 0;
  429. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  430. /* we don't have page 0 mapped on sparc and m68k.. */
  431. if (realp < PAGE_SIZE) {
  432. unsigned long sz = PAGE_SIZE - realp;
  433. if (sz > count)
  434. sz = count;
  435. /* Hmm. Do something? */
  436. buf += sz;
  437. p += sz;
  438. realp += sz;
  439. count -= sz;
  440. written += sz;
  441. }
  442. #endif
  443. while (count > 0) {
  444. char *ptr;
  445. /*
  446. * Handle first page in case it's not aligned
  447. */
  448. if (-realp & (PAGE_SIZE - 1))
  449. sz = -realp & (PAGE_SIZE - 1);
  450. else
  451. sz = PAGE_SIZE;
  452. sz = min_t(unsigned long, sz, count);
  453. /*
  454. * On ia64 if a page has been mapped somewhere as
  455. * uncached, then it must also be accessed uncached
  456. * by the kernel or data corruption may occur
  457. */
  458. ptr = xlate_dev_kmem_ptr(p);
  459. copied = copy_from_user(ptr, buf, sz);
  460. if (copied) {
  461. written += sz - copied;
  462. if (written)
  463. break;
  464. return -EFAULT;
  465. }
  466. buf += sz;
  467. p += sz;
  468. realp += sz;
  469. count -= sz;
  470. written += sz;
  471. }
  472. *ppos += written;
  473. return written;
  474. }
  475. /*
  476. * This function writes to the *virtual* memory as seen by the kernel.
  477. */
  478. static ssize_t write_kmem(struct file * file, const char __user * buf,
  479. size_t count, loff_t *ppos)
  480. {
  481. unsigned long p = *ppos;
  482. ssize_t wrote = 0;
  483. ssize_t virtr = 0;
  484. ssize_t written;
  485. char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  486. if (p < (unsigned long) high_memory) {
  487. wrote = count;
  488. if (count > (unsigned long) high_memory - p)
  489. wrote = (unsigned long) high_memory - p;
  490. written = do_write_kmem((void*)p, p, buf, wrote, ppos);
  491. if (written != wrote)
  492. return written;
  493. wrote = written;
  494. p += wrote;
  495. buf += wrote;
  496. count -= wrote;
  497. }
  498. if (count > 0) {
  499. kbuf = (char *)__get_free_page(GFP_KERNEL);
  500. if (!kbuf)
  501. return wrote ? wrote : -ENOMEM;
  502. while (count > 0) {
  503. int len = count;
  504. if (len > PAGE_SIZE)
  505. len = PAGE_SIZE;
  506. if (len) {
  507. written = copy_from_user(kbuf, buf, len);
  508. if (written) {
  509. if (wrote + virtr)
  510. break;
  511. free_page((unsigned long)kbuf);
  512. return -EFAULT;
  513. }
  514. }
  515. len = vwrite(kbuf, (char *)p, len);
  516. count -= len;
  517. buf += len;
  518. virtr += len;
  519. p += len;
  520. }
  521. free_page((unsigned long)kbuf);
  522. }
  523. *ppos = p;
  524. return virtr + wrote;
  525. }
  526. #endif
  527. #ifdef CONFIG_DEVPORT
  528. static ssize_t read_port(struct file * file, char __user * buf,
  529. size_t count, loff_t *ppos)
  530. {
  531. unsigned long i = *ppos;
  532. char __user *tmp = buf;
  533. if (!access_ok(VERIFY_WRITE, buf, count))
  534. return -EFAULT;
  535. while (count-- > 0 && i < 65536) {
  536. if (__put_user(inb(i),tmp) < 0)
  537. return -EFAULT;
  538. i++;
  539. tmp++;
  540. }
  541. *ppos = i;
  542. return tmp-buf;
  543. }
  544. static ssize_t write_port(struct file * file, const char __user * buf,
  545. size_t count, loff_t *ppos)
  546. {
  547. unsigned long i = *ppos;
  548. const char __user * tmp = buf;
  549. if (!access_ok(VERIFY_READ,buf,count))
  550. return -EFAULT;
  551. while (count-- > 0 && i < 65536) {
  552. char c;
  553. if (__get_user(c, tmp)) {
  554. if (tmp > buf)
  555. break;
  556. return -EFAULT;
  557. }
  558. outb(c,i);
  559. i++;
  560. tmp++;
  561. }
  562. *ppos = i;
  563. return tmp-buf;
  564. }
  565. #endif
  566. static ssize_t read_null(struct file * file, char __user * buf,
  567. size_t count, loff_t *ppos)
  568. {
  569. return 0;
  570. }
  571. static ssize_t write_null(struct file * file, const char __user * buf,
  572. size_t count, loff_t *ppos)
  573. {
  574. return count;
  575. }
  576. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  577. struct splice_desc *sd)
  578. {
  579. return sd->len;
  580. }
  581. static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
  582. loff_t *ppos, size_t len, unsigned int flags)
  583. {
  584. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  585. }
  586. static ssize_t read_zero(struct file * file, char __user * buf,
  587. size_t count, loff_t *ppos)
  588. {
  589. size_t written;
  590. if (!count)
  591. return 0;
  592. if (!access_ok(VERIFY_WRITE, buf, count))
  593. return -EFAULT;
  594. written = 0;
  595. while (count) {
  596. unsigned long unwritten;
  597. size_t chunk = count;
  598. if (chunk > PAGE_SIZE)
  599. chunk = PAGE_SIZE; /* Just for latency reasons */
  600. unwritten = clear_user(buf, chunk);
  601. written += chunk - unwritten;
  602. if (unwritten)
  603. break;
  604. buf += chunk;
  605. count -= chunk;
  606. cond_resched();
  607. }
  608. return written ? written : -EFAULT;
  609. }
  610. static int mmap_zero(struct file * file, struct vm_area_struct * vma)
  611. {
  612. #ifndef CONFIG_MMU
  613. return -ENOSYS;
  614. #endif
  615. if (vma->vm_flags & VM_SHARED)
  616. return shmem_zero_setup(vma);
  617. return 0;
  618. }
  619. static ssize_t write_full(struct file * file, const char __user * buf,
  620. size_t count, loff_t *ppos)
  621. {
  622. return -ENOSPC;
  623. }
  624. /*
  625. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  626. * can fopen() both devices with "a" now. This was previously impossible.
  627. * -- SRB.
  628. */
  629. static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  630. {
  631. return file->f_pos = 0;
  632. }
  633. /*
  634. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  635. * check against negative addresses: they are ok. The return value is weird,
  636. * though, in that case (0).
  637. *
  638. * also note that seeking relative to the "end of file" isn't supported:
  639. * it has no meaning, so it returns -EINVAL.
  640. */
  641. static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
  642. {
  643. loff_t ret;
  644. mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
  645. switch (orig) {
  646. case 0:
  647. file->f_pos = offset;
  648. ret = file->f_pos;
  649. force_successful_syscall_return();
  650. break;
  651. case 1:
  652. file->f_pos += offset;
  653. ret = file->f_pos;
  654. force_successful_syscall_return();
  655. break;
  656. default:
  657. ret = -EINVAL;
  658. }
  659. mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
  660. return ret;
  661. }
  662. static int open_port(struct inode * inode, struct file * filp)
  663. {
  664. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  665. }
  666. #define zero_lseek null_lseek
  667. #define full_lseek null_lseek
  668. #define write_zero write_null
  669. #define read_full read_zero
  670. #define open_mem open_port
  671. #define open_kmem open_mem
  672. #define open_oldmem open_mem
  673. static const struct file_operations mem_fops = {
  674. .llseek = memory_lseek,
  675. .read = read_mem,
  676. .write = write_mem,
  677. .mmap = mmap_mem,
  678. .open = open_mem,
  679. .get_unmapped_area = get_unmapped_area_mem,
  680. };
  681. #ifdef CONFIG_DEVKMEM
  682. static const struct file_operations kmem_fops = {
  683. .llseek = memory_lseek,
  684. .read = read_kmem,
  685. .write = write_kmem,
  686. .mmap = mmap_kmem,
  687. .open = open_kmem,
  688. .get_unmapped_area = get_unmapped_area_mem,
  689. };
  690. #endif
  691. static const struct file_operations null_fops = {
  692. .llseek = null_lseek,
  693. .read = read_null,
  694. .write = write_null,
  695. .splice_write = splice_write_null,
  696. };
  697. #ifdef CONFIG_DEVPORT
  698. static const struct file_operations port_fops = {
  699. .llseek = memory_lseek,
  700. .read = read_port,
  701. .write = write_port,
  702. .open = open_port,
  703. };
  704. #endif
  705. static const struct file_operations zero_fops = {
  706. .llseek = zero_lseek,
  707. .read = read_zero,
  708. .write = write_zero,
  709. .mmap = mmap_zero,
  710. };
  711. /*
  712. * capabilities for /dev/zero
  713. * - permits private mappings, "copies" are taken of the source of zeros
  714. */
  715. static struct backing_dev_info zero_bdi = {
  716. .capabilities = BDI_CAP_MAP_COPY,
  717. };
  718. static const struct file_operations full_fops = {
  719. .llseek = full_lseek,
  720. .read = read_full,
  721. .write = write_full,
  722. };
  723. #ifdef CONFIG_CRASH_DUMP
  724. static const struct file_operations oldmem_fops = {
  725. .read = read_oldmem,
  726. .open = open_oldmem,
  727. };
  728. #endif
  729. static ssize_t kmsg_write(struct file * file, const char __user * buf,
  730. size_t count, loff_t *ppos)
  731. {
  732. char *tmp;
  733. ssize_t ret;
  734. tmp = kmalloc(count + 1, GFP_KERNEL);
  735. if (tmp == NULL)
  736. return -ENOMEM;
  737. ret = -EFAULT;
  738. if (!copy_from_user(tmp, buf, count)) {
  739. tmp[count] = 0;
  740. ret = printk("%s", tmp);
  741. if (ret > count)
  742. /* printk can add a prefix */
  743. ret = count;
  744. }
  745. kfree(tmp);
  746. return ret;
  747. }
  748. static const struct file_operations kmsg_fops = {
  749. .write = kmsg_write,
  750. };
  751. static int memory_open(struct inode * inode, struct file * filp)
  752. {
  753. int ret = 0;
  754. lock_kernel();
  755. switch (iminor(inode)) {
  756. case 1:
  757. filp->f_op = &mem_fops;
  758. filp->f_mapping->backing_dev_info =
  759. &directly_mappable_cdev_bdi;
  760. break;
  761. #ifdef CONFIG_DEVKMEM
  762. case 2:
  763. filp->f_op = &kmem_fops;
  764. filp->f_mapping->backing_dev_info =
  765. &directly_mappable_cdev_bdi;
  766. break;
  767. #endif
  768. case 3:
  769. filp->f_op = &null_fops;
  770. break;
  771. #ifdef CONFIG_DEVPORT
  772. case 4:
  773. filp->f_op = &port_fops;
  774. break;
  775. #endif
  776. case 5:
  777. filp->f_mapping->backing_dev_info = &zero_bdi;
  778. filp->f_op = &zero_fops;
  779. break;
  780. case 7:
  781. filp->f_op = &full_fops;
  782. break;
  783. case 8:
  784. filp->f_op = &random_fops;
  785. break;
  786. case 9:
  787. filp->f_op = &urandom_fops;
  788. break;
  789. case 11:
  790. filp->f_op = &kmsg_fops;
  791. break;
  792. #ifdef CONFIG_CRASH_DUMP
  793. case 12:
  794. filp->f_op = &oldmem_fops;
  795. break;
  796. #endif
  797. default:
  798. unlock_kernel();
  799. return -ENXIO;
  800. }
  801. if (filp->f_op && filp->f_op->open)
  802. ret = filp->f_op->open(inode,filp);
  803. unlock_kernel();
  804. return ret;
  805. }
  806. static const struct file_operations memory_fops = {
  807. .open = memory_open, /* just a selector for the real open */
  808. };
  809. static const struct {
  810. unsigned int minor;
  811. char *name;
  812. umode_t mode;
  813. const struct file_operations *fops;
  814. } devlist[] = { /* list of minor devices */
  815. {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
  816. #ifdef CONFIG_DEVKMEM
  817. {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
  818. #endif
  819. {3, "null", S_IRUGO | S_IWUGO, &null_fops},
  820. #ifdef CONFIG_DEVPORT
  821. {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
  822. #endif
  823. {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
  824. {7, "full", S_IRUGO | S_IWUGO, &full_fops},
  825. {8, "random", S_IRUGO | S_IWUSR, &random_fops},
  826. {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
  827. {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
  828. #ifdef CONFIG_CRASH_DUMP
  829. {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
  830. #endif
  831. };
  832. static struct class *mem_class;
  833. static int __init chr_dev_init(void)
  834. {
  835. int i;
  836. int err;
  837. err = bdi_init(&zero_bdi);
  838. if (err)
  839. return err;
  840. if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
  841. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  842. mem_class = class_create(THIS_MODULE, "mem");
  843. for (i = 0; i < ARRAY_SIZE(devlist); i++)
  844. device_create(mem_class, NULL,
  845. MKDEV(MEM_MAJOR, devlist[i].minor), NULL,
  846. devlist[i].name);
  847. return 0;
  848. }
  849. fs_initcall(chr_dev_init);