file.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244
  1. /*
  2. * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
  3. *
  4. * Copyright (c) 2001-2007 Anton Altaparmakov
  5. *
  6. * This program/include file is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as published
  8. * by the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program/include file is distributed in the hope that it will be
  12. * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program (in the main directory of the Linux-NTFS
  18. * distribution in the file COPYING); if not, write to the Free Software
  19. * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. #include <linux/buffer_head.h>
  22. #include <linux/gfp.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/pagevec.h>
  25. #include <linux/sched.h>
  26. #include <linux/swap.h>
  27. #include <linux/uio.h>
  28. #include <linux/writeback.h>
  29. #include <asm/page.h>
  30. #include <asm/uaccess.h>
  31. #include "attrib.h"
  32. #include "bitmap.h"
  33. #include "inode.h"
  34. #include "debug.h"
  35. #include "lcnalloc.h"
  36. #include "malloc.h"
  37. #include "mft.h"
  38. #include "ntfs.h"
  39. /**
  40. * ntfs_file_open - called when an inode is about to be opened
  41. * @vi: inode to be opened
  42. * @filp: file structure describing the inode
  43. *
  44. * Limit file size to the page cache limit on architectures where unsigned long
  45. * is 32-bits. This is the most we can do for now without overflowing the page
  46. * cache page index. Doing it this way means we don't run into problems because
  47. * of existing too large files. It would be better to allow the user to read
  48. * the beginning of the file but I doubt very much anyone is going to hit this
  49. * check on a 32-bit architecture, so there is no point in adding the extra
  50. * complexity required to support this.
  51. *
  52. * On 64-bit architectures, the check is hopefully optimized away by the
  53. * compiler.
  54. *
  55. * After the check passes, just call generic_file_open() to do its work.
  56. */
  57. static int ntfs_file_open(struct inode *vi, struct file *filp)
  58. {
  59. if (sizeof(unsigned long) < 8) {
  60. if (i_size_read(vi) > MAX_LFS_FILESIZE)
  61. return -EOVERFLOW;
  62. }
  63. return generic_file_open(vi, filp);
  64. }
  65. #ifdef NTFS_RW
  66. /**
  67. * ntfs_attr_extend_initialized - extend the initialized size of an attribute
  68. * @ni: ntfs inode of the attribute to extend
  69. * @new_init_size: requested new initialized size in bytes
  70. * @cached_page: store any allocated but unused page here
  71. * @lru_pvec: lru-buffering pagevec of the caller
  72. *
  73. * Extend the initialized size of an attribute described by the ntfs inode @ni
  74. * to @new_init_size bytes. This involves zeroing any non-sparse space between
  75. * the old initialized size and @new_init_size both in the page cache and on
  76. * disk (if relevant complete pages are already uptodate in the page cache then
  77. * these are simply marked dirty).
  78. *
  79. * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
  80. * in the resident attribute case, it is tied to the initialized size and, in
  81. * the non-resident attribute case, it may not fall below the initialized size.
  82. *
  83. * Note that if the attribute is resident, we do not need to touch the page
  84. * cache at all. This is because if the page cache page is not uptodate we
  85. * bring it uptodate later, when doing the write to the mft record since we
  86. * then already have the page mapped. And if the page is uptodate, the
  87. * non-initialized region will already have been zeroed when the page was
  88. * brought uptodate and the region may in fact already have been overwritten
  89. * with new data via mmap() based writes, so we cannot just zero it. And since
  90. * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
  91. * is unspecified, we choose not to do zeroing and thus we do not need to touch
  92. * the page at all. For a more detailed explanation see ntfs_truncate() in
  93. * fs/ntfs/inode.c.
  94. *
  95. * Return 0 on success and -errno on error. In the case that an error is
  96. * encountered it is possible that the initialized size will already have been
  97. * incremented some way towards @new_init_size but it is guaranteed that if
  98. * this is the case, the necessary zeroing will also have happened and that all
  99. * metadata is self-consistent.
  100. *
  101. * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
  102. * held by the caller.
  103. */
  104. static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
  105. {
  106. s64 old_init_size;
  107. loff_t old_i_size;
  108. pgoff_t index, end_index;
  109. unsigned long flags;
  110. struct inode *vi = VFS_I(ni);
  111. ntfs_inode *base_ni;
  112. MFT_RECORD *m = NULL;
  113. ATTR_RECORD *a;
  114. ntfs_attr_search_ctx *ctx = NULL;
  115. struct address_space *mapping;
  116. struct page *page = NULL;
  117. u8 *kattr;
  118. int err;
  119. u32 attr_len;
  120. read_lock_irqsave(&ni->size_lock, flags);
  121. old_init_size = ni->initialized_size;
  122. old_i_size = i_size_read(vi);
  123. BUG_ON(new_init_size > ni->allocated_size);
  124. read_unlock_irqrestore(&ni->size_lock, flags);
  125. ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
  126. "old_initialized_size 0x%llx, "
  127. "new_initialized_size 0x%llx, i_size 0x%llx.",
  128. vi->i_ino, (unsigned)le32_to_cpu(ni->type),
  129. (unsigned long long)old_init_size,
  130. (unsigned long long)new_init_size, old_i_size);
  131. if (!NInoAttr(ni))
  132. base_ni = ni;
  133. else
  134. base_ni = ni->ext.base_ntfs_ino;
  135. /* Use goto to reduce indentation and we need the label below anyway. */
  136. if (NInoNonResident(ni))
  137. goto do_non_resident_extend;
  138. BUG_ON(old_init_size != old_i_size);
  139. m = map_mft_record(base_ni);
  140. if (IS_ERR(m)) {
  141. err = PTR_ERR(m);
  142. m = NULL;
  143. goto err_out;
  144. }
  145. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  146. if (unlikely(!ctx)) {
  147. err = -ENOMEM;
  148. goto err_out;
  149. }
  150. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  151. CASE_SENSITIVE, 0, NULL, 0, ctx);
  152. if (unlikely(err)) {
  153. if (err == -ENOENT)
  154. err = -EIO;
  155. goto err_out;
  156. }
  157. m = ctx->mrec;
  158. a = ctx->attr;
  159. BUG_ON(a->non_resident);
  160. /* The total length of the attribute value. */
  161. attr_len = le32_to_cpu(a->data.resident.value_length);
  162. BUG_ON(old_i_size != (loff_t)attr_len);
  163. /*
  164. * Do the zeroing in the mft record and update the attribute size in
  165. * the mft record.
  166. */
  167. kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
  168. memset(kattr + attr_len, 0, new_init_size - attr_len);
  169. a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
  170. /* Finally, update the sizes in the vfs and ntfs inodes. */
  171. write_lock_irqsave(&ni->size_lock, flags);
  172. i_size_write(vi, new_init_size);
  173. ni->initialized_size = new_init_size;
  174. write_unlock_irqrestore(&ni->size_lock, flags);
  175. goto done;
  176. do_non_resident_extend:
  177. /*
  178. * If the new initialized size @new_init_size exceeds the current file
  179. * size (vfs inode->i_size), we need to extend the file size to the
  180. * new initialized size.
  181. */
  182. if (new_init_size > old_i_size) {
  183. m = map_mft_record(base_ni);
  184. if (IS_ERR(m)) {
  185. err = PTR_ERR(m);
  186. m = NULL;
  187. goto err_out;
  188. }
  189. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  190. if (unlikely(!ctx)) {
  191. err = -ENOMEM;
  192. goto err_out;
  193. }
  194. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  195. CASE_SENSITIVE, 0, NULL, 0, ctx);
  196. if (unlikely(err)) {
  197. if (err == -ENOENT)
  198. err = -EIO;
  199. goto err_out;
  200. }
  201. m = ctx->mrec;
  202. a = ctx->attr;
  203. BUG_ON(!a->non_resident);
  204. BUG_ON(old_i_size != (loff_t)
  205. sle64_to_cpu(a->data.non_resident.data_size));
  206. a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
  207. flush_dcache_mft_record_page(ctx->ntfs_ino);
  208. mark_mft_record_dirty(ctx->ntfs_ino);
  209. /* Update the file size in the vfs inode. */
  210. i_size_write(vi, new_init_size);
  211. ntfs_attr_put_search_ctx(ctx);
  212. ctx = NULL;
  213. unmap_mft_record(base_ni);
  214. m = NULL;
  215. }
  216. mapping = vi->i_mapping;
  217. index = old_init_size >> PAGE_CACHE_SHIFT;
  218. end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  219. do {
  220. /*
  221. * Read the page. If the page is not present, this will zero
  222. * the uninitialized regions for us.
  223. */
  224. page = read_mapping_page(mapping, index, NULL);
  225. if (IS_ERR(page)) {
  226. err = PTR_ERR(page);
  227. goto init_err_out;
  228. }
  229. if (unlikely(PageError(page))) {
  230. page_cache_release(page);
  231. err = -EIO;
  232. goto init_err_out;
  233. }
  234. /*
  235. * Update the initialized size in the ntfs inode. This is
  236. * enough to make ntfs_writepage() work.
  237. */
  238. write_lock_irqsave(&ni->size_lock, flags);
  239. ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
  240. if (ni->initialized_size > new_init_size)
  241. ni->initialized_size = new_init_size;
  242. write_unlock_irqrestore(&ni->size_lock, flags);
  243. /* Set the page dirty so it gets written out. */
  244. set_page_dirty(page);
  245. page_cache_release(page);
  246. /*
  247. * Play nice with the vm and the rest of the system. This is
  248. * very much needed as we can potentially be modifying the
  249. * initialised size from a very small value to a really huge
  250. * value, e.g.
  251. * f = open(somefile, O_TRUNC);
  252. * truncate(f, 10GiB);
  253. * seek(f, 10GiB);
  254. * write(f, 1);
  255. * And this would mean we would be marking dirty hundreds of
  256. * thousands of pages or as in the above example more than
  257. * two and a half million pages!
  258. *
  259. * TODO: For sparse pages could optimize this workload by using
  260. * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
  261. * would be set in readpage for sparse pages and here we would
  262. * not need to mark dirty any pages which have this bit set.
  263. * The only caveat is that we have to clear the bit everywhere
  264. * where we allocate any clusters that lie in the page or that
  265. * contain the page.
  266. *
  267. * TODO: An even greater optimization would be for us to only
  268. * call readpage() on pages which are not in sparse regions as
  269. * determined from the runlist. This would greatly reduce the
  270. * number of pages we read and make dirty in the case of sparse
  271. * files.
  272. */
  273. balance_dirty_pages_ratelimited(mapping);
  274. cond_resched();
  275. } while (++index < end_index);
  276. read_lock_irqsave(&ni->size_lock, flags);
  277. BUG_ON(ni->initialized_size != new_init_size);
  278. read_unlock_irqrestore(&ni->size_lock, flags);
  279. /* Now bring in sync the initialized_size in the mft record. */
  280. m = map_mft_record(base_ni);
  281. if (IS_ERR(m)) {
  282. err = PTR_ERR(m);
  283. m = NULL;
  284. goto init_err_out;
  285. }
  286. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  287. if (unlikely(!ctx)) {
  288. err = -ENOMEM;
  289. goto init_err_out;
  290. }
  291. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  292. CASE_SENSITIVE, 0, NULL, 0, ctx);
  293. if (unlikely(err)) {
  294. if (err == -ENOENT)
  295. err = -EIO;
  296. goto init_err_out;
  297. }
  298. m = ctx->mrec;
  299. a = ctx->attr;
  300. BUG_ON(!a->non_resident);
  301. a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
  302. done:
  303. flush_dcache_mft_record_page(ctx->ntfs_ino);
  304. mark_mft_record_dirty(ctx->ntfs_ino);
  305. if (ctx)
  306. ntfs_attr_put_search_ctx(ctx);
  307. if (m)
  308. unmap_mft_record(base_ni);
  309. ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
  310. (unsigned long long)new_init_size, i_size_read(vi));
  311. return 0;
  312. init_err_out:
  313. write_lock_irqsave(&ni->size_lock, flags);
  314. ni->initialized_size = old_init_size;
  315. write_unlock_irqrestore(&ni->size_lock, flags);
  316. err_out:
  317. if (ctx)
  318. ntfs_attr_put_search_ctx(ctx);
  319. if (m)
  320. unmap_mft_record(base_ni);
  321. ntfs_debug("Failed. Returning error code %i.", err);
  322. return err;
  323. }
  324. /**
  325. * ntfs_fault_in_pages_readable -
  326. *
  327. * Fault a number of userspace pages into pagetables.
  328. *
  329. * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
  330. * with more than two userspace pages as well as handling the single page case
  331. * elegantly.
  332. *
  333. * If you find this difficult to understand, then think of the while loop being
  334. * the following code, except that we do without the integer variable ret:
  335. *
  336. * do {
  337. * ret = __get_user(c, uaddr);
  338. * uaddr += PAGE_SIZE;
  339. * } while (!ret && uaddr < end);
  340. *
  341. * Note, the final __get_user() may well run out-of-bounds of the user buffer,
  342. * but _not_ out-of-bounds of the page the user buffer belongs to, and since
  343. * this is only a read and not a write, and since it is still in the same page,
  344. * it should not matter and this makes the code much simpler.
  345. */
  346. static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
  347. int bytes)
  348. {
  349. const char __user *end;
  350. volatile char c;
  351. /* Set @end to the first byte outside the last page we care about. */
  352. end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
  353. while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
  354. ;
  355. }
  356. /**
  357. * ntfs_fault_in_pages_readable_iovec -
  358. *
  359. * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
  360. */
  361. static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
  362. size_t iov_ofs, int bytes)
  363. {
  364. do {
  365. const char __user *buf;
  366. unsigned len;
  367. buf = iov->iov_base + iov_ofs;
  368. len = iov->iov_len - iov_ofs;
  369. if (len > bytes)
  370. len = bytes;
  371. ntfs_fault_in_pages_readable(buf, len);
  372. bytes -= len;
  373. iov++;
  374. iov_ofs = 0;
  375. } while (bytes);
  376. }
  377. /**
  378. * __ntfs_grab_cache_pages - obtain a number of locked pages
  379. * @mapping: address space mapping from which to obtain page cache pages
  380. * @index: starting index in @mapping at which to begin obtaining pages
  381. * @nr_pages: number of page cache pages to obtain
  382. * @pages: array of pages in which to return the obtained page cache pages
  383. * @cached_page: allocated but as yet unused page
  384. * @lru_pvec: lru-buffering pagevec of caller
  385. *
  386. * Obtain @nr_pages locked page cache pages from the mapping @mapping and
  387. * starting at index @index.
  388. *
  389. * If a page is newly created, increment its refcount and add it to the
  390. * caller's lru-buffering pagevec @lru_pvec.
  391. *
  392. * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
  393. * are obtained at once instead of just one page and that 0 is returned on
  394. * success and -errno on error.
  395. *
  396. * Note, the page locks are obtained in ascending page index order.
  397. */
  398. static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
  399. pgoff_t index, const unsigned nr_pages, struct page **pages,
  400. struct page **cached_page, struct pagevec *lru_pvec)
  401. {
  402. int err, nr;
  403. BUG_ON(!nr_pages);
  404. err = nr = 0;
  405. do {
  406. pages[nr] = find_lock_page(mapping, index);
  407. if (!pages[nr]) {
  408. if (!*cached_page) {
  409. *cached_page = page_cache_alloc(mapping);
  410. if (unlikely(!*cached_page)) {
  411. err = -ENOMEM;
  412. goto err_out;
  413. }
  414. }
  415. err = add_to_page_cache(*cached_page, mapping, index,
  416. GFP_KERNEL);
  417. if (unlikely(err)) {
  418. if (err == -EEXIST)
  419. continue;
  420. goto err_out;
  421. }
  422. pages[nr] = *cached_page;
  423. page_cache_get(*cached_page);
  424. if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
  425. __pagevec_lru_add_file(lru_pvec);
  426. *cached_page = NULL;
  427. }
  428. index++;
  429. nr++;
  430. } while (nr < nr_pages);
  431. out:
  432. return err;
  433. err_out:
  434. while (nr > 0) {
  435. unlock_page(pages[--nr]);
  436. page_cache_release(pages[nr]);
  437. }
  438. goto out;
  439. }
  440. static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
  441. {
  442. lock_buffer(bh);
  443. get_bh(bh);
  444. bh->b_end_io = end_buffer_read_sync;
  445. return submit_bh(READ, bh);
  446. }
  447. /**
  448. * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
  449. * @pages: array of destination pages
  450. * @nr_pages: number of pages in @pages
  451. * @pos: byte position in file at which the write begins
  452. * @bytes: number of bytes to be written
  453. *
  454. * This is called for non-resident attributes from ntfs_file_buffered_write()
  455. * with i_mutex held on the inode (@pages[0]->mapping->host). There are
  456. * @nr_pages pages in @pages which are locked but not kmap()ped. The source
  457. * data has not yet been copied into the @pages.
  458. *
  459. * Need to fill any holes with actual clusters, allocate buffers if necessary,
  460. * ensure all the buffers are mapped, and bring uptodate any buffers that are
  461. * only partially being written to.
  462. *
  463. * If @nr_pages is greater than one, we are guaranteed that the cluster size is
  464. * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
  465. * the same cluster and that they are the entirety of that cluster, and that
  466. * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
  467. *
  468. * i_size is not to be modified yet.
  469. *
  470. * Return 0 on success or -errno on error.
  471. */
  472. static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
  473. unsigned nr_pages, s64 pos, size_t bytes)
  474. {
  475. VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
  476. LCN lcn;
  477. s64 bh_pos, vcn_len, end, initialized_size;
  478. sector_t lcn_block;
  479. struct page *page;
  480. struct inode *vi;
  481. ntfs_inode *ni, *base_ni = NULL;
  482. ntfs_volume *vol;
  483. runlist_element *rl, *rl2;
  484. struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
  485. ntfs_attr_search_ctx *ctx = NULL;
  486. MFT_RECORD *m = NULL;
  487. ATTR_RECORD *a = NULL;
  488. unsigned long flags;
  489. u32 attr_rec_len = 0;
  490. unsigned blocksize, u;
  491. int err, mp_size;
  492. bool rl_write_locked, was_hole, is_retry;
  493. unsigned char blocksize_bits;
  494. struct {
  495. u8 runlist_merged:1;
  496. u8 mft_attr_mapped:1;
  497. u8 mp_rebuilt:1;
  498. u8 attr_switched:1;
  499. } status = { 0, 0, 0, 0 };
  500. BUG_ON(!nr_pages);
  501. BUG_ON(!pages);
  502. BUG_ON(!*pages);
  503. vi = pages[0]->mapping->host;
  504. ni = NTFS_I(vi);
  505. vol = ni->vol;
  506. ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
  507. "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
  508. vi->i_ino, ni->type, pages[0]->index, nr_pages,
  509. (long long)pos, bytes);
  510. blocksize = vol->sb->s_blocksize;
  511. blocksize_bits = vol->sb->s_blocksize_bits;
  512. u = 0;
  513. do {
  514. page = pages[u];
  515. BUG_ON(!page);
  516. /*
  517. * create_empty_buffers() will create uptodate/dirty buffers if
  518. * the page is uptodate/dirty.
  519. */
  520. if (!page_has_buffers(page)) {
  521. create_empty_buffers(page, blocksize, 0);
  522. if (unlikely(!page_has_buffers(page)))
  523. return -ENOMEM;
  524. }
  525. } while (++u < nr_pages);
  526. rl_write_locked = false;
  527. rl = NULL;
  528. err = 0;
  529. vcn = lcn = -1;
  530. vcn_len = 0;
  531. lcn_block = -1;
  532. was_hole = false;
  533. cpos = pos >> vol->cluster_size_bits;
  534. end = pos + bytes;
  535. cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
  536. /*
  537. * Loop over each page and for each page over each buffer. Use goto to
  538. * reduce indentation.
  539. */
  540. u = 0;
  541. do_next_page:
  542. page = pages[u];
  543. bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
  544. bh = head = page_buffers(page);
  545. do {
  546. VCN cdelta;
  547. s64 bh_end;
  548. unsigned bh_cofs;
  549. /* Clear buffer_new on all buffers to reinitialise state. */
  550. if (buffer_new(bh))
  551. clear_buffer_new(bh);
  552. bh_end = bh_pos + blocksize;
  553. bh_cpos = bh_pos >> vol->cluster_size_bits;
  554. bh_cofs = bh_pos & vol->cluster_size_mask;
  555. if (buffer_mapped(bh)) {
  556. /*
  557. * The buffer is already mapped. If it is uptodate,
  558. * ignore it.
  559. */
  560. if (buffer_uptodate(bh))
  561. continue;
  562. /*
  563. * The buffer is not uptodate. If the page is uptodate
  564. * set the buffer uptodate and otherwise ignore it.
  565. */
  566. if (PageUptodate(page)) {
  567. set_buffer_uptodate(bh);
  568. continue;
  569. }
  570. /*
  571. * Neither the page nor the buffer are uptodate. If
  572. * the buffer is only partially being written to, we
  573. * need to read it in before the write, i.e. now.
  574. */
  575. if ((bh_pos < pos && bh_end > pos) ||
  576. (bh_pos < end && bh_end > end)) {
  577. /*
  578. * If the buffer is fully or partially within
  579. * the initialized size, do an actual read.
  580. * Otherwise, simply zero the buffer.
  581. */
  582. read_lock_irqsave(&ni->size_lock, flags);
  583. initialized_size = ni->initialized_size;
  584. read_unlock_irqrestore(&ni->size_lock, flags);
  585. if (bh_pos < initialized_size) {
  586. ntfs_submit_bh_for_read(bh);
  587. *wait_bh++ = bh;
  588. } else {
  589. zero_user(page, bh_offset(bh),
  590. blocksize);
  591. set_buffer_uptodate(bh);
  592. }
  593. }
  594. continue;
  595. }
  596. /* Unmapped buffer. Need to map it. */
  597. bh->b_bdev = vol->sb->s_bdev;
  598. /*
  599. * If the current buffer is in the same clusters as the map
  600. * cache, there is no need to check the runlist again. The
  601. * map cache is made up of @vcn, which is the first cached file
  602. * cluster, @vcn_len which is the number of cached file
  603. * clusters, @lcn is the device cluster corresponding to @vcn,
  604. * and @lcn_block is the block number corresponding to @lcn.
  605. */
  606. cdelta = bh_cpos - vcn;
  607. if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
  608. map_buffer_cached:
  609. BUG_ON(lcn < 0);
  610. bh->b_blocknr = lcn_block +
  611. (cdelta << (vol->cluster_size_bits -
  612. blocksize_bits)) +
  613. (bh_cofs >> blocksize_bits);
  614. set_buffer_mapped(bh);
  615. /*
  616. * If the page is uptodate so is the buffer. If the
  617. * buffer is fully outside the write, we ignore it if
  618. * it was already allocated and we mark it dirty so it
  619. * gets written out if we allocated it. On the other
  620. * hand, if we allocated the buffer but we are not
  621. * marking it dirty we set buffer_new so we can do
  622. * error recovery.
  623. */
  624. if (PageUptodate(page)) {
  625. if (!buffer_uptodate(bh))
  626. set_buffer_uptodate(bh);
  627. if (unlikely(was_hole)) {
  628. /* We allocated the buffer. */
  629. unmap_underlying_metadata(bh->b_bdev,
  630. bh->b_blocknr);
  631. if (bh_end <= pos || bh_pos >= end)
  632. mark_buffer_dirty(bh);
  633. else
  634. set_buffer_new(bh);
  635. }
  636. continue;
  637. }
  638. /* Page is _not_ uptodate. */
  639. if (likely(!was_hole)) {
  640. /*
  641. * Buffer was already allocated. If it is not
  642. * uptodate and is only partially being written
  643. * to, we need to read it in before the write,
  644. * i.e. now.
  645. */
  646. if (!buffer_uptodate(bh) && bh_pos < end &&
  647. bh_end > pos &&
  648. (bh_pos < pos ||
  649. bh_end > end)) {
  650. /*
  651. * If the buffer is fully or partially
  652. * within the initialized size, do an
  653. * actual read. Otherwise, simply zero
  654. * the buffer.
  655. */
  656. read_lock_irqsave(&ni->size_lock,
  657. flags);
  658. initialized_size = ni->initialized_size;
  659. read_unlock_irqrestore(&ni->size_lock,
  660. flags);
  661. if (bh_pos < initialized_size) {
  662. ntfs_submit_bh_for_read(bh);
  663. *wait_bh++ = bh;
  664. } else {
  665. zero_user(page, bh_offset(bh),
  666. blocksize);
  667. set_buffer_uptodate(bh);
  668. }
  669. }
  670. continue;
  671. }
  672. /* We allocated the buffer. */
  673. unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
  674. /*
  675. * If the buffer is fully outside the write, zero it,
  676. * set it uptodate, and mark it dirty so it gets
  677. * written out. If it is partially being written to,
  678. * zero region surrounding the write but leave it to
  679. * commit write to do anything else. Finally, if the
  680. * buffer is fully being overwritten, do nothing.
  681. */
  682. if (bh_end <= pos || bh_pos >= end) {
  683. if (!buffer_uptodate(bh)) {
  684. zero_user(page, bh_offset(bh),
  685. blocksize);
  686. set_buffer_uptodate(bh);
  687. }
  688. mark_buffer_dirty(bh);
  689. continue;
  690. }
  691. set_buffer_new(bh);
  692. if (!buffer_uptodate(bh) &&
  693. (bh_pos < pos || bh_end > end)) {
  694. u8 *kaddr;
  695. unsigned pofs;
  696. kaddr = kmap_atomic(page, KM_USER0);
  697. if (bh_pos < pos) {
  698. pofs = bh_pos & ~PAGE_CACHE_MASK;
  699. memset(kaddr + pofs, 0, pos - bh_pos);
  700. }
  701. if (bh_end > end) {
  702. pofs = end & ~PAGE_CACHE_MASK;
  703. memset(kaddr + pofs, 0, bh_end - end);
  704. }
  705. kunmap_atomic(kaddr, KM_USER0);
  706. flush_dcache_page(page);
  707. }
  708. continue;
  709. }
  710. /*
  711. * Slow path: this is the first buffer in the cluster. If it
  712. * is outside allocated size and is not uptodate, zero it and
  713. * set it uptodate.
  714. */
  715. read_lock_irqsave(&ni->size_lock, flags);
  716. initialized_size = ni->allocated_size;
  717. read_unlock_irqrestore(&ni->size_lock, flags);
  718. if (bh_pos > initialized_size) {
  719. if (PageUptodate(page)) {
  720. if (!buffer_uptodate(bh))
  721. set_buffer_uptodate(bh);
  722. } else if (!buffer_uptodate(bh)) {
  723. zero_user(page, bh_offset(bh), blocksize);
  724. set_buffer_uptodate(bh);
  725. }
  726. continue;
  727. }
  728. is_retry = false;
  729. if (!rl) {
  730. down_read(&ni->runlist.lock);
  731. retry_remap:
  732. rl = ni->runlist.rl;
  733. }
  734. if (likely(rl != NULL)) {
  735. /* Seek to element containing target cluster. */
  736. while (rl->length && rl[1].vcn <= bh_cpos)
  737. rl++;
  738. lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
  739. if (likely(lcn >= 0)) {
  740. /*
  741. * Successful remap, setup the map cache and
  742. * use that to deal with the buffer.
  743. */
  744. was_hole = false;
  745. vcn = bh_cpos;
  746. vcn_len = rl[1].vcn - vcn;
  747. lcn_block = lcn << (vol->cluster_size_bits -
  748. blocksize_bits);
  749. cdelta = 0;
  750. /*
  751. * If the number of remaining clusters touched
  752. * by the write is smaller or equal to the
  753. * number of cached clusters, unlock the
  754. * runlist as the map cache will be used from
  755. * now on.
  756. */
  757. if (likely(vcn + vcn_len >= cend)) {
  758. if (rl_write_locked) {
  759. up_write(&ni->runlist.lock);
  760. rl_write_locked = false;
  761. } else
  762. up_read(&ni->runlist.lock);
  763. rl = NULL;
  764. }
  765. goto map_buffer_cached;
  766. }
  767. } else
  768. lcn = LCN_RL_NOT_MAPPED;
  769. /*
  770. * If it is not a hole and not out of bounds, the runlist is
  771. * probably unmapped so try to map it now.
  772. */
  773. if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
  774. if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
  775. /* Attempt to map runlist. */
  776. if (!rl_write_locked) {
  777. /*
  778. * We need the runlist locked for
  779. * writing, so if it is locked for
  780. * reading relock it now and retry in
  781. * case it changed whilst we dropped
  782. * the lock.
  783. */
  784. up_read(&ni->runlist.lock);
  785. down_write(&ni->runlist.lock);
  786. rl_write_locked = true;
  787. goto retry_remap;
  788. }
  789. err = ntfs_map_runlist_nolock(ni, bh_cpos,
  790. NULL);
  791. if (likely(!err)) {
  792. is_retry = true;
  793. goto retry_remap;
  794. }
  795. /*
  796. * If @vcn is out of bounds, pretend @lcn is
  797. * LCN_ENOENT. As long as the buffer is out
  798. * of bounds this will work fine.
  799. */
  800. if (err == -ENOENT) {
  801. lcn = LCN_ENOENT;
  802. err = 0;
  803. goto rl_not_mapped_enoent;
  804. }
  805. } else
  806. err = -EIO;
  807. /* Failed to map the buffer, even after retrying. */
  808. bh->b_blocknr = -1;
  809. ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
  810. "attribute type 0x%x, vcn 0x%llx, "
  811. "vcn offset 0x%x, because its "
  812. "location on disk could not be "
  813. "determined%s (error code %i).",
  814. ni->mft_no, ni->type,
  815. (unsigned long long)bh_cpos,
  816. (unsigned)bh_pos &
  817. vol->cluster_size_mask,
  818. is_retry ? " even after retrying" : "",
  819. err);
  820. break;
  821. }
  822. rl_not_mapped_enoent:
  823. /*
  824. * The buffer is in a hole or out of bounds. We need to fill
  825. * the hole, unless the buffer is in a cluster which is not
  826. * touched by the write, in which case we just leave the buffer
  827. * unmapped. This can only happen when the cluster size is
  828. * less than the page cache size.
  829. */
  830. if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
  831. bh_cend = (bh_end + vol->cluster_size - 1) >>
  832. vol->cluster_size_bits;
  833. if ((bh_cend <= cpos || bh_cpos >= cend)) {
  834. bh->b_blocknr = -1;
  835. /*
  836. * If the buffer is uptodate we skip it. If it
  837. * is not but the page is uptodate, we can set
  838. * the buffer uptodate. If the page is not
  839. * uptodate, we can clear the buffer and set it
  840. * uptodate. Whether this is worthwhile is
  841. * debatable and this could be removed.
  842. */
  843. if (PageUptodate(page)) {
  844. if (!buffer_uptodate(bh))
  845. set_buffer_uptodate(bh);
  846. } else if (!buffer_uptodate(bh)) {
  847. zero_user(page, bh_offset(bh),
  848. blocksize);
  849. set_buffer_uptodate(bh);
  850. }
  851. continue;
  852. }
  853. }
  854. /*
  855. * Out of bounds buffer is invalid if it was not really out of
  856. * bounds.
  857. */
  858. BUG_ON(lcn != LCN_HOLE);
  859. /*
  860. * We need the runlist locked for writing, so if it is locked
  861. * for reading relock it now and retry in case it changed
  862. * whilst we dropped the lock.
  863. */
  864. BUG_ON(!rl);
  865. if (!rl_write_locked) {
  866. up_read(&ni->runlist.lock);
  867. down_write(&ni->runlist.lock);
  868. rl_write_locked = true;
  869. goto retry_remap;
  870. }
  871. /* Find the previous last allocated cluster. */
  872. BUG_ON(rl->lcn != LCN_HOLE);
  873. lcn = -1;
  874. rl2 = rl;
  875. while (--rl2 >= ni->runlist.rl) {
  876. if (rl2->lcn >= 0) {
  877. lcn = rl2->lcn + rl2->length;
  878. break;
  879. }
  880. }
  881. rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
  882. false);
  883. if (IS_ERR(rl2)) {
  884. err = PTR_ERR(rl2);
  885. ntfs_debug("Failed to allocate cluster, error code %i.",
  886. err);
  887. break;
  888. }
  889. lcn = rl2->lcn;
  890. rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
  891. if (IS_ERR(rl)) {
  892. err = PTR_ERR(rl);
  893. if (err != -ENOMEM)
  894. err = -EIO;
  895. if (ntfs_cluster_free_from_rl(vol, rl2)) {
  896. ntfs_error(vol->sb, "Failed to release "
  897. "allocated cluster in error "
  898. "code path. Run chkdsk to "
  899. "recover the lost cluster.");
  900. NVolSetErrors(vol);
  901. }
  902. ntfs_free(rl2);
  903. break;
  904. }
  905. ni->runlist.rl = rl;
  906. status.runlist_merged = 1;
  907. ntfs_debug("Allocated cluster, lcn 0x%llx.",
  908. (unsigned long long)lcn);
  909. /* Map and lock the mft record and get the attribute record. */
  910. if (!NInoAttr(ni))
  911. base_ni = ni;
  912. else
  913. base_ni = ni->ext.base_ntfs_ino;
  914. m = map_mft_record(base_ni);
  915. if (IS_ERR(m)) {
  916. err = PTR_ERR(m);
  917. break;
  918. }
  919. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  920. if (unlikely(!ctx)) {
  921. err = -ENOMEM;
  922. unmap_mft_record(base_ni);
  923. break;
  924. }
  925. status.mft_attr_mapped = 1;
  926. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  927. CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
  928. if (unlikely(err)) {
  929. if (err == -ENOENT)
  930. err = -EIO;
  931. break;
  932. }
  933. m = ctx->mrec;
  934. a = ctx->attr;
  935. /*
  936. * Find the runlist element with which the attribute extent
  937. * starts. Note, we cannot use the _attr_ version because we
  938. * have mapped the mft record. That is ok because we know the
  939. * runlist fragment must be mapped already to have ever gotten
  940. * here, so we can just use the _rl_ version.
  941. */
  942. vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
  943. rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
  944. BUG_ON(!rl2);
  945. BUG_ON(!rl2->length);
  946. BUG_ON(rl2->lcn < LCN_HOLE);
  947. highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
  948. /*
  949. * If @highest_vcn is zero, calculate the real highest_vcn
  950. * (which can really be zero).
  951. */
  952. if (!highest_vcn)
  953. highest_vcn = (sle64_to_cpu(
  954. a->data.non_resident.allocated_size) >>
  955. vol->cluster_size_bits) - 1;
  956. /*
  957. * Determine the size of the mapping pairs array for the new
  958. * extent, i.e. the old extent with the hole filled.
  959. */
  960. mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
  961. highest_vcn);
  962. if (unlikely(mp_size <= 0)) {
  963. if (!(err = mp_size))
  964. err = -EIO;
  965. ntfs_debug("Failed to get size for mapping pairs "
  966. "array, error code %i.", err);
  967. break;
  968. }
  969. /*
  970. * Resize the attribute record to fit the new mapping pairs
  971. * array.
  972. */
  973. attr_rec_len = le32_to_cpu(a->length);
  974. err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
  975. a->data.non_resident.mapping_pairs_offset));
  976. if (unlikely(err)) {
  977. BUG_ON(err != -ENOSPC);
  978. // TODO: Deal with this by using the current attribute
  979. // and fill it with as much of the mapping pairs
  980. // array as possible. Then loop over each attribute
  981. // extent rewriting the mapping pairs arrays as we go
  982. // along and if when we reach the end we have not
  983. // enough space, try to resize the last attribute
  984. // extent and if even that fails, add a new attribute
  985. // extent.
  986. // We could also try to resize at each step in the hope
  987. // that we will not need to rewrite every single extent.
  988. // Note, we may need to decompress some extents to fill
  989. // the runlist as we are walking the extents...
  990. ntfs_error(vol->sb, "Not enough space in the mft "
  991. "record for the extended attribute "
  992. "record. This case is not "
  993. "implemented yet.");
  994. err = -EOPNOTSUPP;
  995. break ;
  996. }
  997. status.mp_rebuilt = 1;
  998. /*
  999. * Generate the mapping pairs array directly into the attribute
  1000. * record.
  1001. */
  1002. err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
  1003. a->data.non_resident.mapping_pairs_offset),
  1004. mp_size, rl2, vcn, highest_vcn, NULL);
  1005. if (unlikely(err)) {
  1006. ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
  1007. "attribute type 0x%x, because building "
  1008. "the mapping pairs failed with error "
  1009. "code %i.", vi->i_ino,
  1010. (unsigned)le32_to_cpu(ni->type), err);
  1011. err = -EIO;
  1012. break;
  1013. }
  1014. /* Update the highest_vcn but only if it was not set. */
  1015. if (unlikely(!a->data.non_resident.highest_vcn))
  1016. a->data.non_resident.highest_vcn =
  1017. cpu_to_sle64(highest_vcn);
  1018. /*
  1019. * If the attribute is sparse/compressed, update the compressed
  1020. * size in the ntfs_inode structure and the attribute record.
  1021. */
  1022. if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
  1023. /*
  1024. * If we are not in the first attribute extent, switch
  1025. * to it, but first ensure the changes will make it to
  1026. * disk later.
  1027. */
  1028. if (a->data.non_resident.lowest_vcn) {
  1029. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1030. mark_mft_record_dirty(ctx->ntfs_ino);
  1031. ntfs_attr_reinit_search_ctx(ctx);
  1032. err = ntfs_attr_lookup(ni->type, ni->name,
  1033. ni->name_len, CASE_SENSITIVE,
  1034. 0, NULL, 0, ctx);
  1035. if (unlikely(err)) {
  1036. status.attr_switched = 1;
  1037. break;
  1038. }
  1039. /* @m is not used any more so do not set it. */
  1040. a = ctx->attr;
  1041. }
  1042. write_lock_irqsave(&ni->size_lock, flags);
  1043. ni->itype.compressed.size += vol->cluster_size;
  1044. a->data.non_resident.compressed_size =
  1045. cpu_to_sle64(ni->itype.compressed.size);
  1046. write_unlock_irqrestore(&ni->size_lock, flags);
  1047. }
  1048. /* Ensure the changes make it to disk. */
  1049. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1050. mark_mft_record_dirty(ctx->ntfs_ino);
  1051. ntfs_attr_put_search_ctx(ctx);
  1052. unmap_mft_record(base_ni);
  1053. /* Successfully filled the hole. */
  1054. status.runlist_merged = 0;
  1055. status.mft_attr_mapped = 0;
  1056. status.mp_rebuilt = 0;
  1057. /* Setup the map cache and use that to deal with the buffer. */
  1058. was_hole = true;
  1059. vcn = bh_cpos;
  1060. vcn_len = 1;
  1061. lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
  1062. cdelta = 0;
  1063. /*
  1064. * If the number of remaining clusters in the @pages is smaller
  1065. * or equal to the number of cached clusters, unlock the
  1066. * runlist as the map cache will be used from now on.
  1067. */
  1068. if (likely(vcn + vcn_len >= cend)) {
  1069. up_write(&ni->runlist.lock);
  1070. rl_write_locked = false;
  1071. rl = NULL;
  1072. }
  1073. goto map_buffer_cached;
  1074. } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
  1075. /* If there are no errors, do the next page. */
  1076. if (likely(!err && ++u < nr_pages))
  1077. goto do_next_page;
  1078. /* If there are no errors, release the runlist lock if we took it. */
  1079. if (likely(!err)) {
  1080. if (unlikely(rl_write_locked)) {
  1081. up_write(&ni->runlist.lock);
  1082. rl_write_locked = false;
  1083. } else if (unlikely(rl))
  1084. up_read(&ni->runlist.lock);
  1085. rl = NULL;
  1086. }
  1087. /* If we issued read requests, let them complete. */
  1088. read_lock_irqsave(&ni->size_lock, flags);
  1089. initialized_size = ni->initialized_size;
  1090. read_unlock_irqrestore(&ni->size_lock, flags);
  1091. while (wait_bh > wait) {
  1092. bh = *--wait_bh;
  1093. wait_on_buffer(bh);
  1094. if (likely(buffer_uptodate(bh))) {
  1095. page = bh->b_page;
  1096. bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
  1097. bh_offset(bh);
  1098. /*
  1099. * If the buffer overflows the initialized size, need
  1100. * to zero the overflowing region.
  1101. */
  1102. if (unlikely(bh_pos + blocksize > initialized_size)) {
  1103. int ofs = 0;
  1104. if (likely(bh_pos < initialized_size))
  1105. ofs = initialized_size - bh_pos;
  1106. zero_user_segment(page, bh_offset(bh) + ofs,
  1107. blocksize);
  1108. }
  1109. } else /* if (unlikely(!buffer_uptodate(bh))) */
  1110. err = -EIO;
  1111. }
  1112. if (likely(!err)) {
  1113. /* Clear buffer_new on all buffers. */
  1114. u = 0;
  1115. do {
  1116. bh = head = page_buffers(pages[u]);
  1117. do {
  1118. if (buffer_new(bh))
  1119. clear_buffer_new(bh);
  1120. } while ((bh = bh->b_this_page) != head);
  1121. } while (++u < nr_pages);
  1122. ntfs_debug("Done.");
  1123. return err;
  1124. }
  1125. if (status.attr_switched) {
  1126. /* Get back to the attribute extent we modified. */
  1127. ntfs_attr_reinit_search_ctx(ctx);
  1128. if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1129. CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
  1130. ntfs_error(vol->sb, "Failed to find required "
  1131. "attribute extent of attribute in "
  1132. "error code path. Run chkdsk to "
  1133. "recover.");
  1134. write_lock_irqsave(&ni->size_lock, flags);
  1135. ni->itype.compressed.size += vol->cluster_size;
  1136. write_unlock_irqrestore(&ni->size_lock, flags);
  1137. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1138. mark_mft_record_dirty(ctx->ntfs_ino);
  1139. /*
  1140. * The only thing that is now wrong is the compressed
  1141. * size of the base attribute extent which chkdsk
  1142. * should be able to fix.
  1143. */
  1144. NVolSetErrors(vol);
  1145. } else {
  1146. m = ctx->mrec;
  1147. a = ctx->attr;
  1148. status.attr_switched = 0;
  1149. }
  1150. }
  1151. /*
  1152. * If the runlist has been modified, need to restore it by punching a
  1153. * hole into it and we then need to deallocate the on-disk cluster as
  1154. * well. Note, we only modify the runlist if we are able to generate a
  1155. * new mapping pairs array, i.e. only when the mapped attribute extent
  1156. * is not switched.
  1157. */
  1158. if (status.runlist_merged && !status.attr_switched) {
  1159. BUG_ON(!rl_write_locked);
  1160. /* Make the file cluster we allocated sparse in the runlist. */
  1161. if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
  1162. ntfs_error(vol->sb, "Failed to punch hole into "
  1163. "attribute runlist in error code "
  1164. "path. Run chkdsk to recover the "
  1165. "lost cluster.");
  1166. NVolSetErrors(vol);
  1167. } else /* if (success) */ {
  1168. status.runlist_merged = 0;
  1169. /*
  1170. * Deallocate the on-disk cluster we allocated but only
  1171. * if we succeeded in punching its vcn out of the
  1172. * runlist.
  1173. */
  1174. down_write(&vol->lcnbmp_lock);
  1175. if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
  1176. ntfs_error(vol->sb, "Failed to release "
  1177. "allocated cluster in error "
  1178. "code path. Run chkdsk to "
  1179. "recover the lost cluster.");
  1180. NVolSetErrors(vol);
  1181. }
  1182. up_write(&vol->lcnbmp_lock);
  1183. }
  1184. }
  1185. /*
  1186. * Resize the attribute record to its old size and rebuild the mapping
  1187. * pairs array. Note, we only can do this if the runlist has been
  1188. * restored to its old state which also implies that the mapped
  1189. * attribute extent is not switched.
  1190. */
  1191. if (status.mp_rebuilt && !status.runlist_merged) {
  1192. if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
  1193. ntfs_error(vol->sb, "Failed to restore attribute "
  1194. "record in error code path. Run "
  1195. "chkdsk to recover.");
  1196. NVolSetErrors(vol);
  1197. } else /* if (success) */ {
  1198. if (ntfs_mapping_pairs_build(vol, (u8*)a +
  1199. le16_to_cpu(a->data.non_resident.
  1200. mapping_pairs_offset), attr_rec_len -
  1201. le16_to_cpu(a->data.non_resident.
  1202. mapping_pairs_offset), ni->runlist.rl,
  1203. vcn, highest_vcn, NULL)) {
  1204. ntfs_error(vol->sb, "Failed to restore "
  1205. "mapping pairs array in error "
  1206. "code path. Run chkdsk to "
  1207. "recover.");
  1208. NVolSetErrors(vol);
  1209. }
  1210. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1211. mark_mft_record_dirty(ctx->ntfs_ino);
  1212. }
  1213. }
  1214. /* Release the mft record and the attribute. */
  1215. if (status.mft_attr_mapped) {
  1216. ntfs_attr_put_search_ctx(ctx);
  1217. unmap_mft_record(base_ni);
  1218. }
  1219. /* Release the runlist lock. */
  1220. if (rl_write_locked)
  1221. up_write(&ni->runlist.lock);
  1222. else if (rl)
  1223. up_read(&ni->runlist.lock);
  1224. /*
  1225. * Zero out any newly allocated blocks to avoid exposing stale data.
  1226. * If BH_New is set, we know that the block was newly allocated above
  1227. * and that it has not been fully zeroed and marked dirty yet.
  1228. */
  1229. nr_pages = u;
  1230. u = 0;
  1231. end = bh_cpos << vol->cluster_size_bits;
  1232. do {
  1233. page = pages[u];
  1234. bh = head = page_buffers(page);
  1235. do {
  1236. if (u == nr_pages &&
  1237. ((s64)page->index << PAGE_CACHE_SHIFT) +
  1238. bh_offset(bh) >= end)
  1239. break;
  1240. if (!buffer_new(bh))
  1241. continue;
  1242. clear_buffer_new(bh);
  1243. if (!buffer_uptodate(bh)) {
  1244. if (PageUptodate(page))
  1245. set_buffer_uptodate(bh);
  1246. else {
  1247. zero_user(page, bh_offset(bh),
  1248. blocksize);
  1249. set_buffer_uptodate(bh);
  1250. }
  1251. }
  1252. mark_buffer_dirty(bh);
  1253. } while ((bh = bh->b_this_page) != head);
  1254. } while (++u <= nr_pages);
  1255. ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
  1256. return err;
  1257. }
  1258. /*
  1259. * Copy as much as we can into the pages and return the number of bytes which
  1260. * were successfully copied. If a fault is encountered then clear the pages
  1261. * out to (ofs + bytes) and return the number of bytes which were copied.
  1262. */
  1263. static inline size_t ntfs_copy_from_user(struct page **pages,
  1264. unsigned nr_pages, unsigned ofs, const char __user *buf,
  1265. size_t bytes)
  1266. {
  1267. struct page **last_page = pages + nr_pages;
  1268. char *addr;
  1269. size_t total = 0;
  1270. unsigned len;
  1271. int left;
  1272. do {
  1273. len = PAGE_CACHE_SIZE - ofs;
  1274. if (len > bytes)
  1275. len = bytes;
  1276. addr = kmap_atomic(*pages, KM_USER0);
  1277. left = __copy_from_user_inatomic(addr + ofs, buf, len);
  1278. kunmap_atomic(addr, KM_USER0);
  1279. if (unlikely(left)) {
  1280. /* Do it the slow way. */
  1281. addr = kmap(*pages);
  1282. left = __copy_from_user(addr + ofs, buf, len);
  1283. kunmap(*pages);
  1284. if (unlikely(left))
  1285. goto err_out;
  1286. }
  1287. total += len;
  1288. bytes -= len;
  1289. if (!bytes)
  1290. break;
  1291. buf += len;
  1292. ofs = 0;
  1293. } while (++pages < last_page);
  1294. out:
  1295. return total;
  1296. err_out:
  1297. total += len - left;
  1298. /* Zero the rest of the target like __copy_from_user(). */
  1299. while (++pages < last_page) {
  1300. bytes -= len;
  1301. if (!bytes)
  1302. break;
  1303. len = PAGE_CACHE_SIZE;
  1304. if (len > bytes)
  1305. len = bytes;
  1306. zero_user(*pages, 0, len);
  1307. }
  1308. goto out;
  1309. }
  1310. static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
  1311. const struct iovec *iov, size_t iov_ofs, size_t bytes)
  1312. {
  1313. size_t total = 0;
  1314. while (1) {
  1315. const char __user *buf = iov->iov_base + iov_ofs;
  1316. unsigned len;
  1317. size_t left;
  1318. len = iov->iov_len - iov_ofs;
  1319. if (len > bytes)
  1320. len = bytes;
  1321. left = __copy_from_user_inatomic(vaddr, buf, len);
  1322. total += len;
  1323. bytes -= len;
  1324. vaddr += len;
  1325. if (unlikely(left)) {
  1326. total -= left;
  1327. break;
  1328. }
  1329. if (!bytes)
  1330. break;
  1331. iov++;
  1332. iov_ofs = 0;
  1333. }
  1334. return total;
  1335. }
  1336. static inline void ntfs_set_next_iovec(const struct iovec **iovp,
  1337. size_t *iov_ofsp, size_t bytes)
  1338. {
  1339. const struct iovec *iov = *iovp;
  1340. size_t iov_ofs = *iov_ofsp;
  1341. while (bytes) {
  1342. unsigned len;
  1343. len = iov->iov_len - iov_ofs;
  1344. if (len > bytes)
  1345. len = bytes;
  1346. bytes -= len;
  1347. iov_ofs += len;
  1348. if (iov->iov_len == iov_ofs) {
  1349. iov++;
  1350. iov_ofs = 0;
  1351. }
  1352. }
  1353. *iovp = iov;
  1354. *iov_ofsp = iov_ofs;
  1355. }
  1356. /*
  1357. * This has the same side-effects and return value as ntfs_copy_from_user().
  1358. * The difference is that on a fault we need to memset the remainder of the
  1359. * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
  1360. * single-segment behaviour.
  1361. *
  1362. * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
  1363. * when atomic and when not atomic. This is ok because
  1364. * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
  1365. * and it is ok to call this when non-atomic.
  1366. * Infact, the only difference between __copy_from_user_inatomic() and
  1367. * __copy_from_user() is that the latter calls might_sleep() and the former
  1368. * should not zero the tail of the buffer on error. And on many
  1369. * architectures __copy_from_user_inatomic() is just defined to
  1370. * __copy_from_user() so it makes no difference at all on those architectures.
  1371. */
  1372. static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
  1373. unsigned nr_pages, unsigned ofs, const struct iovec **iov,
  1374. size_t *iov_ofs, size_t bytes)
  1375. {
  1376. struct page **last_page = pages + nr_pages;
  1377. char *addr;
  1378. size_t copied, len, total = 0;
  1379. do {
  1380. len = PAGE_CACHE_SIZE - ofs;
  1381. if (len > bytes)
  1382. len = bytes;
  1383. addr = kmap_atomic(*pages, KM_USER0);
  1384. copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
  1385. *iov, *iov_ofs, len);
  1386. kunmap_atomic(addr, KM_USER0);
  1387. if (unlikely(copied != len)) {
  1388. /* Do it the slow way. */
  1389. addr = kmap(*pages);
  1390. copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
  1391. *iov, *iov_ofs, len);
  1392. /*
  1393. * Zero the rest of the target like __copy_from_user().
  1394. */
  1395. memset(addr + ofs + copied, 0, len - copied);
  1396. kunmap(*pages);
  1397. if (unlikely(copied != len))
  1398. goto err_out;
  1399. }
  1400. total += len;
  1401. bytes -= len;
  1402. if (!bytes)
  1403. break;
  1404. ntfs_set_next_iovec(iov, iov_ofs, len);
  1405. ofs = 0;
  1406. } while (++pages < last_page);
  1407. out:
  1408. return total;
  1409. err_out:
  1410. total += copied;
  1411. /* Zero the rest of the target like __copy_from_user(). */
  1412. while (++pages < last_page) {
  1413. bytes -= len;
  1414. if (!bytes)
  1415. break;
  1416. len = PAGE_CACHE_SIZE;
  1417. if (len > bytes)
  1418. len = bytes;
  1419. zero_user(*pages, 0, len);
  1420. }
  1421. goto out;
  1422. }
  1423. static inline void ntfs_flush_dcache_pages(struct page **pages,
  1424. unsigned nr_pages)
  1425. {
  1426. BUG_ON(!nr_pages);
  1427. /*
  1428. * Warning: Do not do the decrement at the same time as the call to
  1429. * flush_dcache_page() because it is a NULL macro on i386 and hence the
  1430. * decrement never happens so the loop never terminates.
  1431. */
  1432. do {
  1433. --nr_pages;
  1434. flush_dcache_page(pages[nr_pages]);
  1435. } while (nr_pages > 0);
  1436. }
  1437. /**
  1438. * ntfs_commit_pages_after_non_resident_write - commit the received data
  1439. * @pages: array of destination pages
  1440. * @nr_pages: number of pages in @pages
  1441. * @pos: byte position in file at which the write begins
  1442. * @bytes: number of bytes to be written
  1443. *
  1444. * See description of ntfs_commit_pages_after_write(), below.
  1445. */
  1446. static inline int ntfs_commit_pages_after_non_resident_write(
  1447. struct page **pages, const unsigned nr_pages,
  1448. s64 pos, size_t bytes)
  1449. {
  1450. s64 end, initialized_size;
  1451. struct inode *vi;
  1452. ntfs_inode *ni, *base_ni;
  1453. struct buffer_head *bh, *head;
  1454. ntfs_attr_search_ctx *ctx;
  1455. MFT_RECORD *m;
  1456. ATTR_RECORD *a;
  1457. unsigned long flags;
  1458. unsigned blocksize, u;
  1459. int err;
  1460. vi = pages[0]->mapping->host;
  1461. ni = NTFS_I(vi);
  1462. blocksize = vi->i_sb->s_blocksize;
  1463. end = pos + bytes;
  1464. u = 0;
  1465. do {
  1466. s64 bh_pos;
  1467. struct page *page;
  1468. bool partial;
  1469. page = pages[u];
  1470. bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
  1471. bh = head = page_buffers(page);
  1472. partial = false;
  1473. do {
  1474. s64 bh_end;
  1475. bh_end = bh_pos + blocksize;
  1476. if (bh_end <= pos || bh_pos >= end) {
  1477. if (!buffer_uptodate(bh))
  1478. partial = true;
  1479. } else {
  1480. set_buffer_uptodate(bh);
  1481. mark_buffer_dirty(bh);
  1482. }
  1483. } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
  1484. /*
  1485. * If all buffers are now uptodate but the page is not, set the
  1486. * page uptodate.
  1487. */
  1488. if (!partial && !PageUptodate(page))
  1489. SetPageUptodate(page);
  1490. } while (++u < nr_pages);
  1491. /*
  1492. * Finally, if we do not need to update initialized_size or i_size we
  1493. * are finished.
  1494. */
  1495. read_lock_irqsave(&ni->size_lock, flags);
  1496. initialized_size = ni->initialized_size;
  1497. read_unlock_irqrestore(&ni->size_lock, flags);
  1498. if (end <= initialized_size) {
  1499. ntfs_debug("Done.");
  1500. return 0;
  1501. }
  1502. /*
  1503. * Update initialized_size/i_size as appropriate, both in the inode and
  1504. * the mft record.
  1505. */
  1506. if (!NInoAttr(ni))
  1507. base_ni = ni;
  1508. else
  1509. base_ni = ni->ext.base_ntfs_ino;
  1510. /* Map, pin, and lock the mft record. */
  1511. m = map_mft_record(base_ni);
  1512. if (IS_ERR(m)) {
  1513. err = PTR_ERR(m);
  1514. m = NULL;
  1515. ctx = NULL;
  1516. goto err_out;
  1517. }
  1518. BUG_ON(!NInoNonResident(ni));
  1519. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  1520. if (unlikely(!ctx)) {
  1521. err = -ENOMEM;
  1522. goto err_out;
  1523. }
  1524. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1525. CASE_SENSITIVE, 0, NULL, 0, ctx);
  1526. if (unlikely(err)) {
  1527. if (err == -ENOENT)
  1528. err = -EIO;
  1529. goto err_out;
  1530. }
  1531. a = ctx->attr;
  1532. BUG_ON(!a->non_resident);
  1533. write_lock_irqsave(&ni->size_lock, flags);
  1534. BUG_ON(end > ni->allocated_size);
  1535. ni->initialized_size = end;
  1536. a->data.non_resident.initialized_size = cpu_to_sle64(end);
  1537. if (end > i_size_read(vi)) {
  1538. i_size_write(vi, end);
  1539. a->data.non_resident.data_size =
  1540. a->data.non_resident.initialized_size;
  1541. }
  1542. write_unlock_irqrestore(&ni->size_lock, flags);
  1543. /* Mark the mft record dirty, so it gets written back. */
  1544. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1545. mark_mft_record_dirty(ctx->ntfs_ino);
  1546. ntfs_attr_put_search_ctx(ctx);
  1547. unmap_mft_record(base_ni);
  1548. ntfs_debug("Done.");
  1549. return 0;
  1550. err_out:
  1551. if (ctx)
  1552. ntfs_attr_put_search_ctx(ctx);
  1553. if (m)
  1554. unmap_mft_record(base_ni);
  1555. ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
  1556. "code %i).", err);
  1557. if (err != -ENOMEM)
  1558. NVolSetErrors(ni->vol);
  1559. return err;
  1560. }
  1561. /**
  1562. * ntfs_commit_pages_after_write - commit the received data
  1563. * @pages: array of destination pages
  1564. * @nr_pages: number of pages in @pages
  1565. * @pos: byte position in file at which the write begins
  1566. * @bytes: number of bytes to be written
  1567. *
  1568. * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
  1569. * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
  1570. * locked but not kmap()ped. The source data has already been copied into the
  1571. * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
  1572. * the data was copied (for non-resident attributes only) and it returned
  1573. * success.
  1574. *
  1575. * Need to set uptodate and mark dirty all buffers within the boundary of the
  1576. * write. If all buffers in a page are uptodate we set the page uptodate, too.
  1577. *
  1578. * Setting the buffers dirty ensures that they get written out later when
  1579. * ntfs_writepage() is invoked by the VM.
  1580. *
  1581. * Finally, we need to update i_size and initialized_size as appropriate both
  1582. * in the inode and the mft record.
  1583. *
  1584. * This is modelled after fs/buffer.c::generic_commit_write(), which marks
  1585. * buffers uptodate and dirty, sets the page uptodate if all buffers in the
  1586. * page are uptodate, and updates i_size if the end of io is beyond i_size. In
  1587. * that case, it also marks the inode dirty.
  1588. *
  1589. * If things have gone as outlined in
  1590. * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
  1591. * content modifications here for non-resident attributes. For resident
  1592. * attributes we need to do the uptodate bringing here which we combine with
  1593. * the copying into the mft record which means we save one atomic kmap.
  1594. *
  1595. * Return 0 on success or -errno on error.
  1596. */
  1597. static int ntfs_commit_pages_after_write(struct page **pages,
  1598. const unsigned nr_pages, s64 pos, size_t bytes)
  1599. {
  1600. s64 end, initialized_size;
  1601. loff_t i_size;
  1602. struct inode *vi;
  1603. ntfs_inode *ni, *base_ni;
  1604. struct page *page;
  1605. ntfs_attr_search_ctx *ctx;
  1606. MFT_RECORD *m;
  1607. ATTR_RECORD *a;
  1608. char *kattr, *kaddr;
  1609. unsigned long flags;
  1610. u32 attr_len;
  1611. int err;
  1612. BUG_ON(!nr_pages);
  1613. BUG_ON(!pages);
  1614. page = pages[0];
  1615. BUG_ON(!page);
  1616. vi = page->mapping->host;
  1617. ni = NTFS_I(vi);
  1618. ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
  1619. "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
  1620. vi->i_ino, ni->type, page->index, nr_pages,
  1621. (long long)pos, bytes);
  1622. if (NInoNonResident(ni))
  1623. return ntfs_commit_pages_after_non_resident_write(pages,
  1624. nr_pages, pos, bytes);
  1625. BUG_ON(nr_pages > 1);
  1626. /*
  1627. * Attribute is resident, implying it is not compressed, encrypted, or
  1628. * sparse.
  1629. */
  1630. if (!NInoAttr(ni))
  1631. base_ni = ni;
  1632. else
  1633. base_ni = ni->ext.base_ntfs_ino;
  1634. BUG_ON(NInoNonResident(ni));
  1635. /* Map, pin, and lock the mft record. */
  1636. m = map_mft_record(base_ni);
  1637. if (IS_ERR(m)) {
  1638. err = PTR_ERR(m);
  1639. m = NULL;
  1640. ctx = NULL;
  1641. goto err_out;
  1642. }
  1643. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  1644. if (unlikely(!ctx)) {
  1645. err = -ENOMEM;
  1646. goto err_out;
  1647. }
  1648. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1649. CASE_SENSITIVE, 0, NULL, 0, ctx);
  1650. if (unlikely(err)) {
  1651. if (err == -ENOENT)
  1652. err = -EIO;
  1653. goto err_out;
  1654. }
  1655. a = ctx->attr;
  1656. BUG_ON(a->non_resident);
  1657. /* The total length of the attribute value. */
  1658. attr_len = le32_to_cpu(a->data.resident.value_length);
  1659. i_size = i_size_read(vi);
  1660. BUG_ON(attr_len != i_size);
  1661. BUG_ON(pos > attr_len);
  1662. end = pos + bytes;
  1663. BUG_ON(end > le32_to_cpu(a->length) -
  1664. le16_to_cpu(a->data.resident.value_offset));
  1665. kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
  1666. kaddr = kmap_atomic(page, KM_USER0);
  1667. /* Copy the received data from the page to the mft record. */
  1668. memcpy(kattr + pos, kaddr + pos, bytes);
  1669. /* Update the attribute length if necessary. */
  1670. if (end > attr_len) {
  1671. attr_len = end;
  1672. a->data.resident.value_length = cpu_to_le32(attr_len);
  1673. }
  1674. /*
  1675. * If the page is not uptodate, bring the out of bounds area(s)
  1676. * uptodate by copying data from the mft record to the page.
  1677. */
  1678. if (!PageUptodate(page)) {
  1679. if (pos > 0)
  1680. memcpy(kaddr, kattr, pos);
  1681. if (end < attr_len)
  1682. memcpy(kaddr + end, kattr + end, attr_len - end);
  1683. /* Zero the region outside the end of the attribute value. */
  1684. memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
  1685. flush_dcache_page(page);
  1686. SetPageUptodate(page);
  1687. }
  1688. kunmap_atomic(kaddr, KM_USER0);
  1689. /* Update initialized_size/i_size if necessary. */
  1690. read_lock_irqsave(&ni->size_lock, flags);
  1691. initialized_size = ni->initialized_size;
  1692. BUG_ON(end > ni->allocated_size);
  1693. read_unlock_irqrestore(&ni->size_lock, flags);
  1694. BUG_ON(initialized_size != i_size);
  1695. if (end > initialized_size) {
  1696. write_lock_irqsave(&ni->size_lock, flags);
  1697. ni->initialized_size = end;
  1698. i_size_write(vi, end);
  1699. write_unlock_irqrestore(&ni->size_lock, flags);
  1700. }
  1701. /* Mark the mft record dirty, so it gets written back. */
  1702. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1703. mark_mft_record_dirty(ctx->ntfs_ino);
  1704. ntfs_attr_put_search_ctx(ctx);
  1705. unmap_mft_record(base_ni);
  1706. ntfs_debug("Done.");
  1707. return 0;
  1708. err_out:
  1709. if (err == -ENOMEM) {
  1710. ntfs_warning(vi->i_sb, "Error allocating memory required to "
  1711. "commit the write.");
  1712. if (PageUptodate(page)) {
  1713. ntfs_warning(vi->i_sb, "Page is uptodate, setting "
  1714. "dirty so the write will be retried "
  1715. "later on by the VM.");
  1716. /*
  1717. * Put the page on mapping->dirty_pages, but leave its
  1718. * buffers' dirty state as-is.
  1719. */
  1720. __set_page_dirty_nobuffers(page);
  1721. err = 0;
  1722. } else
  1723. ntfs_error(vi->i_sb, "Page is not uptodate. Written "
  1724. "data has been lost.");
  1725. } else {
  1726. ntfs_error(vi->i_sb, "Resident attribute commit write failed "
  1727. "with error %i.", err);
  1728. NVolSetErrors(ni->vol);
  1729. }
  1730. if (ctx)
  1731. ntfs_attr_put_search_ctx(ctx);
  1732. if (m)
  1733. unmap_mft_record(base_ni);
  1734. return err;
  1735. }
  1736. /**
  1737. * ntfs_file_buffered_write -
  1738. *
  1739. * Locking: The vfs is holding ->i_mutex on the inode.
  1740. */
  1741. static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
  1742. const struct iovec *iov, unsigned long nr_segs,
  1743. loff_t pos, loff_t *ppos, size_t count)
  1744. {
  1745. struct file *file = iocb->ki_filp;
  1746. struct address_space *mapping = file->f_mapping;
  1747. struct inode *vi = mapping->host;
  1748. ntfs_inode *ni = NTFS_I(vi);
  1749. ntfs_volume *vol = ni->vol;
  1750. struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
  1751. struct page *cached_page = NULL;
  1752. char __user *buf = NULL;
  1753. s64 end, ll;
  1754. VCN last_vcn;
  1755. LCN lcn;
  1756. unsigned long flags;
  1757. size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */
  1758. ssize_t status, written;
  1759. unsigned nr_pages;
  1760. int err;
  1761. struct pagevec lru_pvec;
  1762. ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
  1763. "pos 0x%llx, count 0x%lx.",
  1764. vi->i_ino, (unsigned)le32_to_cpu(ni->type),
  1765. (unsigned long long)pos, (unsigned long)count);
  1766. if (unlikely(!count))
  1767. return 0;
  1768. BUG_ON(NInoMstProtected(ni));
  1769. /*
  1770. * If the attribute is not an index root and it is encrypted or
  1771. * compressed, we cannot write to it yet. Note we need to check for
  1772. * AT_INDEX_ALLOCATION since this is the type of both directory and
  1773. * index inodes.
  1774. */
  1775. if (ni->type != AT_INDEX_ALLOCATION) {
  1776. /* If file is encrypted, deny access, just like NT4. */
  1777. if (NInoEncrypted(ni)) {
  1778. /*
  1779. * Reminder for later: Encrypted files are _always_
  1780. * non-resident so that the content can always be
  1781. * encrypted.
  1782. */
  1783. ntfs_debug("Denying write access to encrypted file.");
  1784. return -EACCES;
  1785. }
  1786. if (NInoCompressed(ni)) {
  1787. /* Only unnamed $DATA attribute can be compressed. */
  1788. BUG_ON(ni->type != AT_DATA);
  1789. BUG_ON(ni->name_len);
  1790. /*
  1791. * Reminder for later: If resident, the data is not
  1792. * actually compressed. Only on the switch to non-
  1793. * resident does compression kick in. This is in
  1794. * contrast to encrypted files (see above).
  1795. */
  1796. ntfs_error(vi->i_sb, "Writing to compressed files is "
  1797. "not implemented yet. Sorry.");
  1798. return -EOPNOTSUPP;
  1799. }
  1800. }
  1801. /*
  1802. * If a previous ntfs_truncate() failed, repeat it and abort if it
  1803. * fails again.
  1804. */
  1805. if (unlikely(NInoTruncateFailed(ni))) {
  1806. down_write(&vi->i_alloc_sem);
  1807. err = ntfs_truncate(vi);
  1808. up_write(&vi->i_alloc_sem);
  1809. if (err || NInoTruncateFailed(ni)) {
  1810. if (!err)
  1811. err = -EIO;
  1812. ntfs_error(vol->sb, "Cannot perform write to inode "
  1813. "0x%lx, attribute type 0x%x, because "
  1814. "ntfs_truncate() failed (error code "
  1815. "%i).", vi->i_ino,
  1816. (unsigned)le32_to_cpu(ni->type), err);
  1817. return err;
  1818. }
  1819. }
  1820. /* The first byte after the write. */
  1821. end = pos + count;
  1822. /*
  1823. * If the write goes beyond the allocated size, extend the allocation
  1824. * to cover the whole of the write, rounded up to the nearest cluster.
  1825. */
  1826. read_lock_irqsave(&ni->size_lock, flags);
  1827. ll = ni->allocated_size;
  1828. read_unlock_irqrestore(&ni->size_lock, flags);
  1829. if (end > ll) {
  1830. /* Extend the allocation without changing the data size. */
  1831. ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
  1832. if (likely(ll >= 0)) {
  1833. BUG_ON(pos >= ll);
  1834. /* If the extension was partial truncate the write. */
  1835. if (end > ll) {
  1836. ntfs_debug("Truncating write to inode 0x%lx, "
  1837. "attribute type 0x%x, because "
  1838. "the allocation was only "
  1839. "partially extended.",
  1840. vi->i_ino, (unsigned)
  1841. le32_to_cpu(ni->type));
  1842. end = ll;
  1843. count = ll - pos;
  1844. }
  1845. } else {
  1846. err = ll;
  1847. read_lock_irqsave(&ni->size_lock, flags);
  1848. ll = ni->allocated_size;
  1849. read_unlock_irqrestore(&ni->size_lock, flags);
  1850. /* Perform a partial write if possible or fail. */
  1851. if (pos < ll) {
  1852. ntfs_debug("Truncating write to inode 0x%lx, "
  1853. "attribute type 0x%x, because "
  1854. "extending the allocation "
  1855. "failed (error code %i).",
  1856. vi->i_ino, (unsigned)
  1857. le32_to_cpu(ni->type), err);
  1858. end = ll;
  1859. count = ll - pos;
  1860. } else {
  1861. ntfs_error(vol->sb, "Cannot perform write to "
  1862. "inode 0x%lx, attribute type "
  1863. "0x%x, because extending the "
  1864. "allocation failed (error "
  1865. "code %i).", vi->i_ino,
  1866. (unsigned)
  1867. le32_to_cpu(ni->type), err);
  1868. return err;
  1869. }
  1870. }
  1871. }
  1872. pagevec_init(&lru_pvec, 0);
  1873. written = 0;
  1874. /*
  1875. * If the write starts beyond the initialized size, extend it up to the
  1876. * beginning of the write and initialize all non-sparse space between
  1877. * the old initialized size and the new one. This automatically also
  1878. * increments the vfs inode->i_size to keep it above or equal to the
  1879. * initialized_size.
  1880. */
  1881. read_lock_irqsave(&ni->size_lock, flags);
  1882. ll = ni->initialized_size;
  1883. read_unlock_irqrestore(&ni->size_lock, flags);
  1884. if (pos > ll) {
  1885. err = ntfs_attr_extend_initialized(ni, pos);
  1886. if (err < 0) {
  1887. ntfs_error(vol->sb, "Cannot perform write to inode "
  1888. "0x%lx, attribute type 0x%x, because "
  1889. "extending the initialized size "
  1890. "failed (error code %i).", vi->i_ino,
  1891. (unsigned)le32_to_cpu(ni->type), err);
  1892. status = err;
  1893. goto err_out;
  1894. }
  1895. }
  1896. /*
  1897. * Determine the number of pages per cluster for non-resident
  1898. * attributes.
  1899. */
  1900. nr_pages = 1;
  1901. if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
  1902. nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
  1903. /* Finally, perform the actual write. */
  1904. last_vcn = -1;
  1905. if (likely(nr_segs == 1))
  1906. buf = iov->iov_base;
  1907. do {
  1908. VCN vcn;
  1909. pgoff_t idx, start_idx;
  1910. unsigned ofs, do_pages, u;
  1911. size_t copied;
  1912. start_idx = idx = pos >> PAGE_CACHE_SHIFT;
  1913. ofs = pos & ~PAGE_CACHE_MASK;
  1914. bytes = PAGE_CACHE_SIZE - ofs;
  1915. do_pages = 1;
  1916. if (nr_pages > 1) {
  1917. vcn = pos >> vol->cluster_size_bits;
  1918. if (vcn != last_vcn) {
  1919. last_vcn = vcn;
  1920. /*
  1921. * Get the lcn of the vcn the write is in. If
  1922. * it is a hole, need to lock down all pages in
  1923. * the cluster.
  1924. */
  1925. down_read(&ni->runlist.lock);
  1926. lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
  1927. vol->cluster_size_bits, false);
  1928. up_read(&ni->runlist.lock);
  1929. if (unlikely(lcn < LCN_HOLE)) {
  1930. status = -EIO;
  1931. if (lcn == LCN_ENOMEM)
  1932. status = -ENOMEM;
  1933. else
  1934. ntfs_error(vol->sb, "Cannot "
  1935. "perform write to "
  1936. "inode 0x%lx, "
  1937. "attribute type 0x%x, "
  1938. "because the attribute "
  1939. "is corrupt.",
  1940. vi->i_ino, (unsigned)
  1941. le32_to_cpu(ni->type));
  1942. break;
  1943. }
  1944. if (lcn == LCN_HOLE) {
  1945. start_idx = (pos & ~(s64)
  1946. vol->cluster_size_mask)
  1947. >> PAGE_CACHE_SHIFT;
  1948. bytes = vol->cluster_size - (pos &
  1949. vol->cluster_size_mask);
  1950. do_pages = nr_pages;
  1951. }
  1952. }
  1953. }
  1954. if (bytes > count)
  1955. bytes = count;
  1956. /*
  1957. * Bring in the user page(s) that we will copy from _first_.
  1958. * Otherwise there is a nasty deadlock on copying from the same
  1959. * page(s) as we are writing to, without it/them being marked
  1960. * up-to-date. Note, at present there is nothing to stop the
  1961. * pages being swapped out between us bringing them into memory
  1962. * and doing the actual copying.
  1963. */
  1964. if (likely(nr_segs == 1))
  1965. ntfs_fault_in_pages_readable(buf, bytes);
  1966. else
  1967. ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
  1968. /* Get and lock @do_pages starting at index @start_idx. */
  1969. status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
  1970. pages, &cached_page, &lru_pvec);
  1971. if (unlikely(status))
  1972. break;
  1973. /*
  1974. * For non-resident attributes, we need to fill any holes with
  1975. * actual clusters and ensure all bufferes are mapped. We also
  1976. * need to bring uptodate any buffers that are only partially
  1977. * being written to.
  1978. */
  1979. if (NInoNonResident(ni)) {
  1980. status = ntfs_prepare_pages_for_non_resident_write(
  1981. pages, do_pages, pos, bytes);
  1982. if (unlikely(status)) {
  1983. loff_t i_size;
  1984. do {
  1985. unlock_page(pages[--do_pages]);
  1986. page_cache_release(pages[do_pages]);
  1987. } while (do_pages);
  1988. /*
  1989. * The write preparation may have instantiated
  1990. * allocated space outside i_size. Trim this
  1991. * off again. We can ignore any errors in this
  1992. * case as we will just be waisting a bit of
  1993. * allocated space, which is not a disaster.
  1994. */
  1995. i_size = i_size_read(vi);
  1996. if (pos + bytes > i_size)
  1997. vmtruncate(vi, i_size);
  1998. break;
  1999. }
  2000. }
  2001. u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
  2002. if (likely(nr_segs == 1)) {
  2003. copied = ntfs_copy_from_user(pages + u, do_pages - u,
  2004. ofs, buf, bytes);
  2005. buf += copied;
  2006. } else
  2007. copied = ntfs_copy_from_user_iovec(pages + u,
  2008. do_pages - u, ofs, &iov, &iov_ofs,
  2009. bytes);
  2010. ntfs_flush_dcache_pages(pages + u, do_pages - u);
  2011. status = ntfs_commit_pages_after_write(pages, do_pages, pos,
  2012. bytes);
  2013. if (likely(!status)) {
  2014. written += copied;
  2015. count -= copied;
  2016. pos += copied;
  2017. if (unlikely(copied != bytes))
  2018. status = -EFAULT;
  2019. }
  2020. do {
  2021. unlock_page(pages[--do_pages]);
  2022. mark_page_accessed(pages[do_pages]);
  2023. page_cache_release(pages[do_pages]);
  2024. } while (do_pages);
  2025. if (unlikely(status))
  2026. break;
  2027. balance_dirty_pages_ratelimited(mapping);
  2028. cond_resched();
  2029. } while (count);
  2030. err_out:
  2031. *ppos = pos;
  2032. if (cached_page)
  2033. page_cache_release(cached_page);
  2034. pagevec_lru_add_file(&lru_pvec);
  2035. ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
  2036. written ? "written" : "status", (unsigned long)written,
  2037. (long)status);
  2038. return written ? written : status;
  2039. }
  2040. /**
  2041. * ntfs_file_aio_write_nolock -
  2042. */
  2043. static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
  2044. const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
  2045. {
  2046. struct file *file = iocb->ki_filp;
  2047. struct address_space *mapping = file->f_mapping;
  2048. struct inode *inode = mapping->host;
  2049. loff_t pos;
  2050. size_t count; /* after file limit checks */
  2051. ssize_t written, err;
  2052. count = 0;
  2053. err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
  2054. if (err)
  2055. return err;
  2056. pos = *ppos;
  2057. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  2058. /* We can write back this queue in page reclaim. */
  2059. current->backing_dev_info = mapping->backing_dev_info;
  2060. written = 0;
  2061. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  2062. if (err)
  2063. goto out;
  2064. if (!count)
  2065. goto out;
  2066. err = file_remove_suid(file);
  2067. if (err)
  2068. goto out;
  2069. file_update_time(file);
  2070. written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
  2071. count);
  2072. out:
  2073. current->backing_dev_info = NULL;
  2074. return written ? written : err;
  2075. }
  2076. /**
  2077. * ntfs_file_aio_write -
  2078. */
  2079. static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  2080. unsigned long nr_segs, loff_t pos)
  2081. {
  2082. struct file *file = iocb->ki_filp;
  2083. struct address_space *mapping = file->f_mapping;
  2084. struct inode *inode = mapping->host;
  2085. ssize_t ret;
  2086. BUG_ON(iocb->ki_pos != pos);
  2087. mutex_lock(&inode->i_mutex);
  2088. ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
  2089. mutex_unlock(&inode->i_mutex);
  2090. if (ret > 0) {
  2091. int err = generic_write_sync(file, pos, ret);
  2092. if (err < 0)
  2093. ret = err;
  2094. }
  2095. return ret;
  2096. }
  2097. /**
  2098. * ntfs_file_fsync - sync a file to disk
  2099. * @filp: file to be synced
  2100. * @dentry: dentry describing the file to sync
  2101. * @datasync: if non-zero only flush user data and not metadata
  2102. *
  2103. * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
  2104. * system calls. This function is inspired by fs/buffer.c::file_fsync().
  2105. *
  2106. * If @datasync is false, write the mft record and all associated extent mft
  2107. * records as well as the $DATA attribute and then sync the block device.
  2108. *
  2109. * If @datasync is true and the attribute is non-resident, we skip the writing
  2110. * of the mft record and all associated extent mft records (this might still
  2111. * happen due to the write_inode_now() call).
  2112. *
  2113. * Also, if @datasync is true, we do not wait on the inode to be written out
  2114. * but we always wait on the page cache pages to be written out.
  2115. *
  2116. * Note: In the past @filp could be NULL so we ignore it as we don't need it
  2117. * anyway.
  2118. *
  2119. * Locking: Caller must hold i_mutex on the inode.
  2120. *
  2121. * TODO: We should probably also write all attribute/index inodes associated
  2122. * with this inode but since we have no simple way of getting to them we ignore
  2123. * this problem for now.
  2124. */
  2125. static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
  2126. int datasync)
  2127. {
  2128. struct inode *vi = dentry->d_inode;
  2129. int err, ret = 0;
  2130. ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
  2131. BUG_ON(S_ISDIR(vi->i_mode));
  2132. if (!datasync || !NInoNonResident(NTFS_I(vi)))
  2133. ret = __ntfs_write_inode(vi, 1);
  2134. write_inode_now(vi, !datasync);
  2135. /*
  2136. * NOTE: If we were to use mapping->private_list (see ext2 and
  2137. * fs/buffer.c) for dirty blocks then we could optimize the below to be
  2138. * sync_mapping_buffers(vi->i_mapping).
  2139. */
  2140. err = sync_blockdev(vi->i_sb->s_bdev);
  2141. if (unlikely(err && !ret))
  2142. ret = err;
  2143. if (likely(!ret))
  2144. ntfs_debug("Done.");
  2145. else
  2146. ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
  2147. "%u.", datasync ? "data" : "", vi->i_ino, -ret);
  2148. return ret;
  2149. }
  2150. #endif /* NTFS_RW */
  2151. const struct file_operations ntfs_file_ops = {
  2152. .llseek = generic_file_llseek, /* Seek inside file. */
  2153. .read = do_sync_read, /* Read from file. */
  2154. .aio_read = generic_file_aio_read, /* Async read from file. */
  2155. #ifdef NTFS_RW
  2156. .write = do_sync_write, /* Write to file. */
  2157. .aio_write = ntfs_file_aio_write, /* Async write to file. */
  2158. /*.release = ,*/ /* Last file is closed. See
  2159. fs/ext2/file.c::
  2160. ext2_release_file() for
  2161. how to use this to discard
  2162. preallocated space for
  2163. write opened files. */
  2164. .fsync = ntfs_file_fsync, /* Sync a file to disk. */
  2165. /*.aio_fsync = ,*/ /* Sync all outstanding async
  2166. i/o operations on a
  2167. kiocb. */
  2168. #endif /* NTFS_RW */
  2169. /*.ioctl = ,*/ /* Perform function on the
  2170. mounted filesystem. */
  2171. .mmap = generic_file_mmap, /* Mmap file. */
  2172. .open = ntfs_file_open, /* Open file. */
  2173. .splice_read = generic_file_splice_read /* Zero-copy data send with
  2174. the data source being on
  2175. the ntfs partition. We do
  2176. not need to care about the
  2177. data destination. */
  2178. /*.sendpage = ,*/ /* Zero-copy data send with
  2179. the data destination being
  2180. on the ntfs partition. We
  2181. do not need to care about
  2182. the data source. */
  2183. };
  2184. const struct inode_operations ntfs_file_inode_ops = {
  2185. #ifdef NTFS_RW
  2186. .truncate = ntfs_truncate_vfs,
  2187. .setattr = ntfs_setattr,
  2188. #endif /* NTFS_RW */
  2189. };
  2190. const struct file_operations ntfs_empty_file_ops = {};
  2191. const struct inode_operations ntfs_empty_inode_ops = {};