filemap.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. /*
  2. * linux/mm/filemap.h
  3. *
  4. * Copyright (C) 1994-1999 Linus Torvalds
  5. */
  6. #ifndef __FILEMAP_H
  7. #define __FILEMAP_H
  8. #include <linux/types.h>
  9. #include <linux/fs.h>
  10. #include <linux/mm.h>
  11. #include <linux/highmem.h>
  12. #include <linux/uio.h>
  13. #include <linux/config.h>
  14. #include <linux/uaccess.h>
  15. size_t
  16. __filemap_copy_from_user_iovec_inatomic(char *vaddr,
  17. const struct iovec *iov,
  18. size_t base,
  19. size_t bytes);
  20. /*
  21. * Copy as much as we can into the page and return the number of bytes which
  22. * were sucessfully copied. If a fault is encountered then clear the page
  23. * out to (offset+bytes) and return the number of bytes which were copied.
  24. *
  25. * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
  26. * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
  27. * and if the following non-atomic copy succeeds, then there is a small window
  28. * where the target page contains neither the data before the write, nor the
  29. * data after the write (it contains zero). A read at this time will see
  30. * data that is inconsistent with any ordering of the read and the write.
  31. * (This has been detected in practice).
  32. */
  33. static inline size_t
  34. filemap_copy_from_user(struct page *page, unsigned long offset,
  35. const char __user *buf, unsigned bytes)
  36. {
  37. char *kaddr;
  38. int left;
  39. kaddr = kmap_atomic(page, KM_USER0);
  40. left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
  41. kunmap_atomic(kaddr, KM_USER0);
  42. if (left != 0) {
  43. /* Do it the slow way */
  44. kaddr = kmap(page);
  45. left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
  46. kunmap(page);
  47. }
  48. return bytes - left;
  49. }
  50. /*
  51. * This has the same sideeffects and return value as filemap_copy_from_user().
  52. * The difference is that on a fault we need to memset the remainder of the
  53. * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
  54. * single-segment behaviour.
  55. */
  56. static inline size_t
  57. filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
  58. const struct iovec *iov, size_t base, size_t bytes)
  59. {
  60. char *kaddr;
  61. size_t copied;
  62. kaddr = kmap_atomic(page, KM_USER0);
  63. copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
  64. base, bytes);
  65. kunmap_atomic(kaddr, KM_USER0);
  66. if (copied != bytes) {
  67. kaddr = kmap(page);
  68. copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
  69. base, bytes);
  70. if (bytes - copied)
  71. memset(kaddr + offset + copied, 0, bytes - copied);
  72. kunmap(page);
  73. }
  74. return copied;
  75. }
  76. static inline void
  77. filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
  78. {
  79. const struct iovec *iov = *iovp;
  80. size_t base = *basep;
  81. do {
  82. int copy = min(bytes, iov->iov_len - base);
  83. bytes -= copy;
  84. base += copy;
  85. if (iov->iov_len == base) {
  86. iov++;
  87. base = 0;
  88. }
  89. } while (bytes);
  90. *iovp = iov;
  91. *basep = base;
  92. }
  93. #endif