zftape-compress.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * Copyright (C) 1994-1997 Claus-Justus Heine
  3. This program is free software; you can redistribute it and/or
  4. modify it under the terms of the GNU General Public License as
  5. published by the Free Software Foundation; either version 2, or (at
  6. your option) any later version.
  7. This program is distributed in the hope that it will be useful, but
  8. WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; see the file COPYING. If not, write to
  13. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  14. USA.
  15. *
  16. * This file implements a "generic" interface between the *
  17. * zftape-driver and a compression-algorithm. The *
  18. * compression-algorithm currently used is a LZ77. I use the *
  19. * implementation lzrw3 by Ross N. Williams (Renaissance *
  20. * Software). The compression program itself is in the file
  21. * lzrw3.c * and lzrw3.h. To adopt another compression algorithm
  22. * the functions * zft_compress() and zft_uncompress() must be
  23. * changed * appropriately. See below.
  24. */
  25. #include <linux/errno.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/zftape.h>
  29. #include <asm/uaccess.h>
  30. #include "../zftape/zftape-init.h"
  31. #include "../zftape/zftape-eof.h"
  32. #include "../zftape/zftape-ctl.h"
  33. #include "../zftape/zftape-write.h"
  34. #include "../zftape/zftape-read.h"
  35. #include "../zftape/zftape-rw.h"
  36. #include "../compressor/zftape-compress.h"
  37. #include "../zftape/zftape-vtbl.h"
  38. #include "../compressor/lzrw3.h"
  39. /*
  40. * global variables
  41. */
  42. /* I handle the allocation of this buffer as a special case, because
  43. * it's size varies depending on the tape length inserted.
  44. */
  45. /* local variables
  46. */
  47. static void *zftc_wrk_mem = NULL;
  48. static __u8 *zftc_buf = NULL;
  49. static void *zftc_scratch_buf = NULL;
  50. /* compression statistics
  51. */
  52. static unsigned int zftc_wr_uncompressed = 0;
  53. static unsigned int zftc_wr_compressed = 0;
  54. static unsigned int zftc_rd_uncompressed = 0;
  55. static unsigned int zftc_rd_compressed = 0;
  56. /* forward */
  57. static int zftc_write(int *write_cnt,
  58. __u8 *dst_buf, const int seg_sz,
  59. const __u8 __user *src_buf, const int req_len,
  60. const zft_position *pos, const zft_volinfo *volume);
  61. static int zftc_read(int *read_cnt,
  62. __u8 __user *dst_buf, const int to_do,
  63. const __u8 *src_buf, const int seg_sz,
  64. const zft_position *pos, const zft_volinfo *volume);
  65. static int zftc_seek(unsigned int new_block_pos,
  66. zft_position *pos, const zft_volinfo *volume,
  67. __u8 *buffer);
  68. static void zftc_lock (void);
  69. static void zftc_reset (void);
  70. static void zftc_cleanup(void);
  71. static void zftc_stats (void);
  72. /* compressed segment. This conforms to QIC-80-MC, Revision K.
  73. *
  74. * Rev. K applies to tapes with `fixed length format' which is
  75. * indicated by format code 2,3 and 5. See below for format code 4 and 6
  76. *
  77. * 2 bytes: offset of compression segment structure
  78. * 29k > offset >= 29k-18: data from previous segment ens in this
  79. * segment and no compressed block starts
  80. * in this segment
  81. * offset == 0: data from previous segment occupies entire
  82. * segment and continues in next segment
  83. * n bytes: remainder from previous segment
  84. *
  85. * Rev. K:
  86. * 4 bytes: 4 bytes: files set byte offset
  87. * Post Rev. K and QIC-3020/3020:
  88. * 8 bytes: 8 bytes: files set byte offset
  89. * 2 bytes: byte count N (amount of data following)
  90. * bit 15 is set if data is compressed, bit 15 is not
  91. * set if data is uncompressed
  92. * N bytes: data (as much as specified in the byte count)
  93. * 2 bytes: byte count N_1 of next cluster
  94. * N_1 bytes: data of next cluset
  95. * 2 bytes: byte count N_2 of next cluster
  96. * N_2 bytes: ...
  97. *
  98. * Note that the `N' byte count accounts only for the bytes that in the
  99. * current segment if the cluster spans to the next segment.
  100. */
  101. typedef struct
  102. {
  103. int cmpr_pos; /* actual position in compression buffer */
  104. int cmpr_sz; /* what is left in the compression buffer
  105. * when copying the compressed data to the
  106. * deblock buffer
  107. */
  108. unsigned int first_block; /* location of header information in
  109. * this segment
  110. */
  111. unsigned int count; /* amount of data of current block
  112. * contained in current segment
  113. */
  114. unsigned int offset; /* offset in current segment */
  115. unsigned int spans:1; /* might continue in next segment */
  116. unsigned int uncmpr; /* 0x8000 if this block contains
  117. * uncompressed data
  118. */
  119. __s64 foffs; /* file set byte offset, same as in
  120. * compression map segment
  121. */
  122. } cmpr_info;
  123. static cmpr_info cseg; /* static data. Must be kept uptodate and shared by
  124. * read, write and seek functions
  125. */
  126. #define DUMP_CMPR_INFO(level, msg, info) \
  127. TRACE(level, msg "\n" \
  128. KERN_INFO "cmpr_pos : %d\n" \
  129. KERN_INFO "cmpr_sz : %d\n" \
  130. KERN_INFO "first_block: %d\n" \
  131. KERN_INFO "count : %d\n" \
  132. KERN_INFO "offset : %d\n" \
  133. KERN_INFO "spans : %d\n" \
  134. KERN_INFO "uncmpr : 0x%04x\n" \
  135. KERN_INFO "foffs : " LL_X, \
  136. (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block, \
  137. (info)->count, (info)->offset, (info)->spans == 1, \
  138. (info)->uncmpr, LL((info)->foffs))
  139. /* dispatch compression segment info, return error code
  140. *
  141. * afterwards, cseg->offset points to start of data of the NEXT
  142. * compressed block, and cseg->count contains the amount of data
  143. * left in the actual compressed block. cseg->spans is set to 1 if
  144. * the block is continued in the following segment. Otherwise it is
  145. * set to 0.
  146. */
  147. static int get_cseg (cmpr_info *cinfo, const __u8 *buff,
  148. const unsigned int seg_sz,
  149. const zft_volinfo *volume)
  150. {
  151. TRACE_FUN(ft_t_flow);
  152. cinfo->first_block = GET2(buff, 0);
  153. if (cinfo->first_block == 0) { /* data spans to next segment */
  154. cinfo->count = seg_sz - sizeof(__u16);
  155. cinfo->offset = seg_sz;
  156. cinfo->spans = 1;
  157. } else { /* cluster definetely ends in this segment */
  158. if (cinfo->first_block > seg_sz) {
  159. /* data corrupted */
  160. TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
  161. KERN_INFO "segment size: %d\n"
  162. KERN_INFO "first block : %d",
  163. seg_sz, cinfo->first_block);
  164. }
  165. cinfo->count = cinfo->first_block - sizeof(__u16);
  166. cinfo->offset = cinfo->first_block;
  167. cinfo->spans = 0;
  168. }
  169. /* now get the offset the first block should have in the
  170. * uncompressed data stream.
  171. *
  172. * For this magic `18' refer to CRF-3 standard or QIC-80MC,
  173. * Rev. K.
  174. */
  175. if ((seg_sz - cinfo->offset) > 18) {
  176. if (volume->qic113) { /* > revision K */
  177. TRACE(ft_t_data_flow, "New QIC-113 compliance");
  178. cinfo->foffs = GET8(buff, cinfo->offset);
  179. cinfo->offset += sizeof(__s64);
  180. } else {
  181. TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
  182. cinfo->foffs = (__s64)GET4(buff, cinfo->offset);
  183. cinfo->offset += sizeof(__u32);
  184. }
  185. }
  186. if (cinfo->foffs > volume->size) {
  187. TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
  188. KERN_INFO "offset in current volume: %d\n"
  189. KERN_INFO "size of current volume : %d",
  190. (int)(cinfo->foffs>>10), (int)(volume->size>>10));
  191. }
  192. if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
  193. TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
  194. KERN_INFO "block size : %d\n"
  195. KERN_INFO "data record: %d",
  196. volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
  197. }
  198. DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
  199. TRACE_EXIT 0;
  200. }
  201. /* This one is called, when a new cluster starts in same segment.
  202. *
  203. * Note: if this is the first cluster in the current segment, we must
  204. * not check whether there are more than 18 bytes available because
  205. * this have already been done in get_cseg() and there may be less
  206. * than 18 bytes available due to header information.
  207. *
  208. */
  209. static void get_next_cluster(cmpr_info *cluster, const __u8 *buff,
  210. const int seg_sz, const int finish)
  211. {
  212. TRACE_FUN(ft_t_flow);
  213. if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
  214. cluster->count = GET2(buff, cluster->offset);
  215. cluster->uncmpr = cluster->count & 0x8000;
  216. cluster->count -= cluster->uncmpr;
  217. cluster->offset += sizeof(__u16);
  218. cluster->foffs = 0;
  219. if ((cluster->offset + cluster->count) < seg_sz) {
  220. cluster->spans = 0;
  221. } else if (cluster->offset + cluster->count == seg_sz) {
  222. cluster->spans = !finish;
  223. } else {
  224. /* either an error or a volume written by an
  225. * old version. If this is a data error, then we'll
  226. * catch it later.
  227. */
  228. TRACE(ft_t_data_flow, "Either error or old volume");
  229. cluster->spans = 1;
  230. cluster->count = seg_sz - cluster->offset;
  231. }
  232. } else {
  233. cluster->count = 0;
  234. cluster->spans = 0;
  235. cluster->foffs = 0;
  236. }
  237. DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
  238. TRACE_EXIT;
  239. }
  240. static void zftc_lock(void)
  241. {
  242. }
  243. /* this function is needed for zftape_reset_position in zftape-io.c
  244. */
  245. static void zftc_reset(void)
  246. {
  247. TRACE_FUN(ft_t_flow);
  248. memset((void *)&cseg, '\0', sizeof(cseg));
  249. zftc_stats();
  250. TRACE_EXIT;
  251. }
  252. static int cmpr_mem_initialized = 0;
  253. static unsigned int alloc_blksz = 0;
  254. static int zft_allocate_cmpr_mem(unsigned int blksz)
  255. {
  256. TRACE_FUN(ft_t_flow);
  257. if (cmpr_mem_initialized && blksz == alloc_blksz) {
  258. TRACE_EXIT 0;
  259. }
  260. TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
  261. zftc_cleanup());
  262. TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
  263. zftc_cleanup());
  264. alloc_blksz = blksz;
  265. TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
  266. zftc_cleanup());
  267. cmpr_mem_initialized = 1;
  268. TRACE_EXIT 0;
  269. }
  270. static void zftc_cleanup(void)
  271. {
  272. TRACE_FUN(ft_t_flow);
  273. zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
  274. zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
  275. zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
  276. cmpr_mem_initialized = alloc_blksz = 0;
  277. TRACE_EXIT;
  278. }
  279. /*****************************************************************************
  280. * *
  281. * The following two functions "ftape_compress()" and *
  282. * "ftape_uncompress()" are the interface to the actual compression *
  283. * algorithm (i.e. they are calling the "compress()" function from *
  284. * the lzrw3 package for now). These routines could quite easily be *
  285. * changed to adopt another compression algorithm instead of lzrw3, *
  286. * which currently is used. *
  287. * *
  288. *****************************************************************************/
  289. /* called by zft_compress_write() to perform the compression. Must
  290. * return the size of the compressed data.
  291. *
  292. * NOTE: The size of the compressed data should not exceed the size of
  293. * the uncompressed data. Most compression algorithms have means
  294. * to store data unchanged if the "compressed" data amount would
  295. * exceed the original one. Mostly this is done by storing some
  296. * flag-bytes in front of the compressed data to indicate if it
  297. * is compressed or not. Thus the worst compression result
  298. * length is the original length plus those flag-bytes.
  299. *
  300. * We don't want that, as the QIC-80 standard provides a means
  301. * of marking uncompressed blocks by simply setting bit 15 of
  302. * the compressed block's length. Thus a compessed block can
  303. * have at most a length of 2^15-1 bytes. The QIC-80 standard
  304. * restricts the block-length even further, allowing only 29k -
  305. * 6 bytes.
  306. *
  307. * Currently, the maximum blocksize used by zftape is 28k.
  308. *
  309. * In short: don't exceed the length of the input-package, set
  310. * bit 15 of the compressed size to 1 if you have copied data
  311. * instead of compressing it.
  312. */
  313. static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
  314. {
  315. __s32 compressed_sz;
  316. TRACE_FUN(ft_t_flow);
  317. lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
  318. in_buffer, in_sz, out_buffer, &compressed_sz);
  319. if (TRACE_LEVEL >= ft_t_info) {
  320. /* the compiler will optimize this away when
  321. * compiled with NO_TRACE_AT_ALL option
  322. */
  323. TRACE(ft_t_data_flow, "\n"
  324. KERN_INFO "before compression: %d bytes\n"
  325. KERN_INFO "after compresison : %d bytes",
  326. in_sz,
  327. (int)(compressed_sz < 0
  328. ? -compressed_sz : compressed_sz));
  329. /* for statistical purposes
  330. */
  331. zftc_wr_compressed += (compressed_sz < 0
  332. ? -compressed_sz : compressed_sz);
  333. zftc_wr_uncompressed += in_sz;
  334. }
  335. TRACE_EXIT (int)compressed_sz;
  336. }
  337. /* called by zft_compress_read() to decompress the data. Must
  338. * return the size of the decompressed data for sanity checks
  339. * (compared with zft_blk_sz)
  340. *
  341. * NOTE: Read the note for zft_compress() above! If bit 15 of the
  342. * parameter in_sz is set, then the data in in_buffer isn't
  343. * compressed, which must be handled by the un-compression
  344. * algorithm. (I changed lzrw3 to handle this.)
  345. *
  346. * The parameter max_out_sz is needed to prevent buffer overruns when
  347. * uncompressing corrupt data.
  348. */
  349. static unsigned int zft_uncompress(__u8 *in_buffer,
  350. int in_sz,
  351. __u8 *out_buffer,
  352. unsigned int max_out_sz)
  353. {
  354. TRACE_FUN(ft_t_flow);
  355. lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
  356. in_buffer, (__s32)in_sz,
  357. out_buffer, (__u32 *)&max_out_sz);
  358. if (TRACE_LEVEL >= ft_t_info) {
  359. TRACE(ft_t_data_flow, "\n"
  360. KERN_INFO "before decompression: %d bytes\n"
  361. KERN_INFO "after decompression : %d bytes",
  362. in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
  363. /* for statistical purposes
  364. */
  365. zftc_rd_compressed += in_sz < 0 ? -in_sz : in_sz;
  366. zftc_rd_uncompressed += max_out_sz;
  367. }
  368. TRACE_EXIT (unsigned int)max_out_sz;
  369. }
  370. /* print some statistics about the efficiency of the compression to
  371. * the kernel log
  372. */
  373. static void zftc_stats(void)
  374. {
  375. TRACE_FUN(ft_t_flow);
  376. if (TRACE_LEVEL < ft_t_info) {
  377. TRACE_EXIT;
  378. }
  379. if (zftc_wr_uncompressed != 0) {
  380. if (zftc_wr_compressed > (1<<14)) {
  381. TRACE(ft_t_info, "compression statistics (writing):\n"
  382. KERN_INFO " compr./uncmpr. : %3d %%",
  383. (((zftc_wr_compressed>>10) * 100)
  384. / (zftc_wr_uncompressed>>10)));
  385. } else {
  386. TRACE(ft_t_info, "compression statistics (writing):\n"
  387. KERN_INFO " compr./uncmpr. : %3d %%",
  388. ((zftc_wr_compressed * 100)
  389. / zftc_wr_uncompressed));
  390. }
  391. }
  392. if (zftc_rd_uncompressed != 0) {
  393. if (zftc_rd_compressed > (1<<14)) {
  394. TRACE(ft_t_info, "compression statistics (reading):\n"
  395. KERN_INFO " compr./uncmpr. : %3d %%",
  396. (((zftc_rd_compressed>>10) * 100)
  397. / (zftc_rd_uncompressed>>10)));
  398. } else {
  399. TRACE(ft_t_info, "compression statistics (reading):\n"
  400. KERN_INFO " compr./uncmpr. : %3d %%",
  401. ((zftc_rd_compressed * 100)
  402. / zftc_rd_uncompressed));
  403. }
  404. }
  405. /* only print it once: */
  406. zftc_wr_uncompressed =
  407. zftc_wr_compressed =
  408. zftc_rd_uncompressed =
  409. zftc_rd_compressed = 0;
  410. TRACE_EXIT;
  411. }
  412. /* start new compressed block
  413. */
  414. static int start_new_cseg(cmpr_info *cluster,
  415. char *dst_buf,
  416. const zft_position *pos,
  417. const unsigned int blk_sz,
  418. const char *src_buf,
  419. const int this_segs_sz,
  420. const int qic113)
  421. {
  422. int size_left;
  423. int cp_cnt;
  424. int buf_pos;
  425. TRACE_FUN(ft_t_flow);
  426. size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
  427. TRACE(ft_t_data_flow,"\n"
  428. KERN_INFO "segment size : %d\n"
  429. KERN_INFO "compressed_sz: %d\n"
  430. KERN_INFO "size_left : %d",
  431. this_segs_sz, cluster->cmpr_sz, size_left);
  432. if (size_left > 18) { /* start a new cluseter */
  433. cp_cnt = cluster->cmpr_sz;
  434. cluster->cmpr_sz = 0;
  435. buf_pos = cp_cnt + sizeof(__u16);
  436. PUT2(dst_buf, 0, buf_pos);
  437. if (qic113) {
  438. __s64 foffs = pos->volume_pos;
  439. if (cp_cnt) foffs += (__s64)blk_sz;
  440. TRACE(ft_t_data_flow, "new style QIC-113 header");
  441. PUT8(dst_buf, buf_pos, foffs);
  442. buf_pos += sizeof(__s64);
  443. } else {
  444. __u32 foffs = (__u32)pos->volume_pos;
  445. if (cp_cnt) foffs += (__u32)blk_sz;
  446. TRACE(ft_t_data_flow, "old style QIC-80MC header");
  447. PUT4(dst_buf, buf_pos, foffs);
  448. buf_pos += sizeof(__u32);
  449. }
  450. } else if (size_left >= 0) {
  451. cp_cnt = cluster->cmpr_sz;
  452. cluster->cmpr_sz = 0;
  453. buf_pos = cp_cnt + sizeof(__u16);
  454. PUT2(dst_buf, 0, buf_pos);
  455. /* zero unused part of segment. */
  456. memset(dst_buf + buf_pos, '\0', size_left);
  457. buf_pos = this_segs_sz;
  458. } else { /* need entire segment and more space */
  459. PUT2(dst_buf, 0, 0);
  460. cp_cnt = this_segs_sz - sizeof(__u16);
  461. cluster->cmpr_sz -= cp_cnt;
  462. buf_pos = this_segs_sz;
  463. }
  464. memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
  465. cluster->cmpr_pos += cp_cnt;
  466. TRACE_EXIT buf_pos;
  467. }
  468. /* return-value: the number of bytes removed from the user-buffer
  469. * `src_buf' or error code
  470. *
  471. * int *write_cnt : how much actually has been moved to the
  472. * dst_buf. Need not be initialized when
  473. * function returns with an error code
  474. * (negativ return value)
  475. * __u8 *dst_buf : kernel space buffer where the has to be
  476. * copied to. The contents of this buffers
  477. * goes to a specific segment.
  478. * const int seg_sz : the size of the segment dst_buf will be
  479. * copied to.
  480. * const zft_position *pos : struct containing the coordinates in
  481. * the current volume (byte position,
  482. * segment id of current segment etc)
  483. * const zft_volinfo *volume: information about the current volume,
  484. * size etc.
  485. * const __u8 *src_buf : user space buffer that contains the
  486. * data the user wants to be written to
  487. * tape.
  488. * const int req_len : the amount of data the user wants to be
  489. * written to tape.
  490. */
  491. static int zftc_write(int *write_cnt,
  492. __u8 *dst_buf, const int seg_sz,
  493. const __u8 __user *src_buf, const int req_len,
  494. const zft_position *pos, const zft_volinfo *volume)
  495. {
  496. int req_len_left = req_len;
  497. int result;
  498. int len_left;
  499. int buf_pos_write = pos->seg_byte_pos;
  500. TRACE_FUN(ft_t_flow);
  501. /* Note: we do not unlock the module because
  502. * there are some values cached in that `cseg' variable. We
  503. * don't don't want to use this information when being
  504. * unloaded by kerneld even when the tape is full or when we
  505. * cannot allocate enough memory.
  506. */
  507. if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
  508. TRACE_EXIT -ENOSPC;
  509. }
  510. if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
  511. /* should we unlock the module? But it shouldn't
  512. * be locked anyway ...
  513. */
  514. TRACE_EXIT -ENOMEM;
  515. }
  516. if (buf_pos_write == 0) { /* fill a new segment */
  517. *write_cnt = buf_pos_write = start_new_cseg(&cseg,
  518. dst_buf,
  519. pos,
  520. volume->blk_sz,
  521. zftc_buf,
  522. seg_sz,
  523. volume->qic113);
  524. if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
  525. req_len_left -= result = volume->blk_sz;
  526. cseg.cmpr_pos = 0;
  527. } else {
  528. result = 0;
  529. }
  530. } else {
  531. *write_cnt = result = 0;
  532. }
  533. len_left = seg_sz - buf_pos_write;
  534. while ((req_len_left > 0) && (len_left > 18)) {
  535. /* now we have some size left for a new compressed
  536. * block. We know, that the compression buffer is
  537. * empty (else there wouldn't be any space left).
  538. */
  539. if (copy_from_user(zftc_scratch_buf, src_buf + result,
  540. volume->blk_sz) != 0) {
  541. TRACE_EXIT -EFAULT;
  542. }
  543. req_len_left -= volume->blk_sz;
  544. cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz,
  545. zftc_buf);
  546. if (cseg.cmpr_sz < 0) {
  547. cseg.uncmpr = 0x8000;
  548. cseg.cmpr_sz = -cseg.cmpr_sz;
  549. } else {
  550. cseg.uncmpr = 0;
  551. }
  552. /* increment "result" iff we copied the entire
  553. * compressed block to the zft_deblock_buf
  554. */
  555. len_left -= sizeof(__u16);
  556. if (len_left >= cseg.cmpr_sz) {
  557. len_left -= cseg.count = cseg.cmpr_sz;
  558. cseg.cmpr_pos = cseg.cmpr_sz = 0;
  559. result += volume->blk_sz;
  560. } else {
  561. cseg.cmpr_sz -=
  562. cseg.cmpr_pos =
  563. cseg.count = len_left;
  564. len_left = 0;
  565. }
  566. PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
  567. buf_pos_write += sizeof(__u16);
  568. memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
  569. buf_pos_write += cseg.count;
  570. *write_cnt += cseg.count + sizeof(__u16);
  571. FT_SIGNAL_EXIT(_DONT_BLOCK);
  572. }
  573. /* erase the remainder of the segment if less than 18 bytes
  574. * left (18 bytes is due to the QIC-80 standard)
  575. */
  576. if (len_left <= 18) {
  577. memset(dst_buf + buf_pos_write, '\0', len_left);
  578. (*write_cnt) += len_left;
  579. }
  580. TRACE(ft_t_data_flow, "returning %d", result);
  581. TRACE_EXIT result;
  582. }
  583. /* out:
  584. *
  585. * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
  586. * (result)
  587. * int *to_do : the remaining size of the read-request.
  588. *
  589. * in:
  590. *
  591. * char *buff : buff is the address of the upper part of the user
  592. * buffer, that hasn't been filled with data yet.
  593. * int buf_pos_read : copy of from _ftape_read()
  594. * int buf_len_read : copy of buf_len_rd from _ftape_read()
  595. * char *zft_deblock_buf: zft_deblock_buf
  596. * unsigned short blk_sz: the block size valid for this volume, may differ
  597. * from zft_blk_sz.
  598. * int finish: if != 0 means that this is the last segment belonging
  599. * to this volume
  600. * returns the amount of data actually copied to the user-buffer
  601. *
  602. * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
  603. * be set to 0
  604. */
  605. static int zftc_read (int *read_cnt,
  606. __u8 __user *dst_buf, const int to_do,
  607. const __u8 *src_buf, const int seg_sz,
  608. const zft_position *pos, const zft_volinfo *volume)
  609. {
  610. int uncompressed_sz;
  611. int result = 0;
  612. int remaining = to_do;
  613. TRACE_FUN(ft_t_flow);
  614. TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
  615. if (pos->seg_byte_pos == 0) {
  616. /* new segment just read
  617. */
  618. TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
  619. *read_cnt = 0);
  620. memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16),
  621. cseg.count);
  622. cseg.cmpr_pos += cseg.count;
  623. *read_cnt = cseg.offset;
  624. DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
  625. } else {
  626. *read_cnt = 0;
  627. }
  628. /* loop and uncompress until user buffer full or
  629. * deblock-buffer empty
  630. */
  631. TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
  632. cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
  633. while ((cseg.spans == 0) && (remaining > 0)) {
  634. if (cseg.cmpr_pos != 0) { /* cmpr buf is not empty */
  635. uncompressed_sz =
  636. zft_uncompress(zftc_buf,
  637. cseg.uncmpr == 0x8000 ?
  638. -cseg.cmpr_pos : cseg.cmpr_pos,
  639. zftc_scratch_buf,
  640. volume->blk_sz);
  641. if (uncompressed_sz != volume->blk_sz) {
  642. *read_cnt = 0;
  643. TRACE_ABORT(-EIO, ft_t_warn,
  644. "Uncompressed blk (%d) != blk size (%d)",
  645. uncompressed_sz, volume->blk_sz);
  646. }
  647. if (copy_to_user(dst_buf + result,
  648. zftc_scratch_buf,
  649. uncompressed_sz) != 0 ) {
  650. TRACE_EXIT -EFAULT;
  651. }
  652. remaining -= uncompressed_sz;
  653. result += uncompressed_sz;
  654. cseg.cmpr_pos = 0;
  655. }
  656. if (remaining > 0) {
  657. get_next_cluster(&cseg, src_buf, seg_sz,
  658. volume->end_seg == pos->seg_pos);
  659. if (cseg.count != 0) {
  660. memcpy(zftc_buf, src_buf + cseg.offset,
  661. cseg.count);
  662. cseg.cmpr_pos = cseg.count;
  663. cseg.offset += cseg.count;
  664. *read_cnt += cseg.count + sizeof(__u16);
  665. } else {
  666. remaining = 0;
  667. }
  668. }
  669. TRACE(ft_t_data_flow, "\n"
  670. KERN_INFO "compressed_sz: %d\n"
  671. KERN_INFO "compos : %d\n"
  672. KERN_INFO "*read_cnt : %d",
  673. cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
  674. }
  675. if (seg_sz - cseg.offset <= 18) {
  676. *read_cnt += seg_sz - cseg.offset;
  677. TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
  678. }
  679. TRACE(ft_t_data_flow, "\n"
  680. KERN_INFO "segment size : %d\n"
  681. KERN_INFO "read count : %d\n"
  682. KERN_INFO "buf_pos_read : %d\n"
  683. KERN_INFO "remaining : %d",
  684. seg_sz, *read_cnt, pos->seg_byte_pos,
  685. seg_sz - *read_cnt - pos->seg_byte_pos);
  686. TRACE(ft_t_data_flow, "returning: %d", result);
  687. TRACE_EXIT result;
  688. }
  689. /* seeks to the new data-position. Reads sometimes a segment.
  690. *
  691. * start_seg and end_seg give the boundaries of the current volume
  692. * blk_sz is the blk_sz of the current volume as stored in the
  693. * volume label
  694. *
  695. * We don't allow blocksizes less than 1024 bytes, therefore we don't need
  696. * a 64 bit argument for new_block_pos.
  697. */
  698. static int seek_in_segment(const unsigned int to_do, cmpr_info *c_info,
  699. const char *src_buf, const int seg_sz,
  700. const int seg_pos, const zft_volinfo *volume);
  701. static int slow_seek_forward_until_error(const unsigned int distance,
  702. cmpr_info *c_info, zft_position *pos,
  703. const zft_volinfo *volume, __u8 *buf);
  704. static int search_valid_segment(unsigned int segment,
  705. const unsigned int end_seg,
  706. const unsigned int max_foffs,
  707. zft_position *pos, cmpr_info *c_info,
  708. const zft_volinfo *volume, __u8 *buf);
  709. static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
  710. zft_position *pos, const zft_volinfo *volume,
  711. __u8 *buf);
  712. static int compute_seg_pos(unsigned int dest, zft_position *pos,
  713. const zft_volinfo *volume);
  714. #define ZFT_SLOW_SEEK_THRESHOLD 10 /* segments */
  715. #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
  716. #define ZFT_FAST_SEEK_BACKUP 10 /* segments */
  717. static int zftc_seek(unsigned int new_block_pos,
  718. zft_position *pos, const zft_volinfo *volume, __u8 *buf)
  719. {
  720. unsigned int dest;
  721. int limit;
  722. int distance;
  723. int result = 0;
  724. int seg_dist;
  725. int new_seg;
  726. int old_seg = 0;
  727. int fast_seek_trials = 0;
  728. TRACE_FUN(ft_t_flow);
  729. if (new_block_pos == 0) {
  730. pos->seg_pos = volume->start_seg;
  731. pos->seg_byte_pos = 0;
  732. pos->volume_pos = 0;
  733. zftc_reset();
  734. TRACE_EXIT 0;
  735. }
  736. dest = new_block_pos * (volume->blk_sz >> 10);
  737. distance = dest - (pos->volume_pos >> 10);
  738. while (distance != 0) {
  739. seg_dist = compute_seg_pos(dest, pos, volume);
  740. TRACE(ft_t_noise, "\n"
  741. KERN_INFO "seg_dist: %d\n"
  742. KERN_INFO "distance: %d\n"
  743. KERN_INFO "dest : %d\n"
  744. KERN_INFO "vpos : %d\n"
  745. KERN_INFO "seg_pos : %d\n"
  746. KERN_INFO "trials : %d",
  747. seg_dist, distance, dest,
  748. (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
  749. fast_seek_trials);
  750. if (distance > 0) {
  751. if (seg_dist < 0) {
  752. TRACE(ft_t_bug, "BUG: distance %d > 0, "
  753. "segment difference %d < 0",
  754. distance, seg_dist);
  755. result = -EIO;
  756. break;
  757. }
  758. new_seg = pos->seg_pos + seg_dist;
  759. if (new_seg > volume->end_seg) {
  760. new_seg = volume->end_seg;
  761. }
  762. if (old_seg == new_seg || /* loop */
  763. seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
  764. fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
  765. TRACE(ft_t_noise, "starting slow seek:\n"
  766. KERN_INFO "fast seek failed too often: %s\n"
  767. KERN_INFO "near target position : %s\n"
  768. KERN_INFO "looping between two segs : %s",
  769. (fast_seek_trials >=
  770. ZFT_FAST_SEEK_MAX_TRIALS)
  771. ? "yes" : "no",
  772. (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD)
  773. ? "yes" : "no",
  774. (old_seg == new_seg)
  775. ? "yes" : "no");
  776. result = slow_seek_forward(dest, &cseg,
  777. pos, volume, buf);
  778. break;
  779. }
  780. old_seg = new_seg;
  781. limit = volume->end_seg;
  782. fast_seek_trials ++;
  783. for (;;) {
  784. result = search_valid_segment(new_seg, limit,
  785. volume->size,
  786. pos, &cseg,
  787. volume, buf);
  788. if (result == 0 || result == -EINTR) {
  789. break;
  790. }
  791. if (new_seg == volume->start_seg) {
  792. result = -EIO; /* set errror
  793. * condition
  794. */
  795. break;
  796. }
  797. limit = new_seg;
  798. new_seg -= ZFT_FAST_SEEK_BACKUP;
  799. if (new_seg < volume->start_seg) {
  800. new_seg = volume->start_seg;
  801. }
  802. }
  803. if (result < 0) {
  804. TRACE(ft_t_warn,
  805. "Couldn't find a readable segment");
  806. break;
  807. }
  808. } else /* if (distance < 0) */ {
  809. if (seg_dist > 0) {
  810. TRACE(ft_t_bug, "BUG: distance %d < 0, "
  811. "segment difference %d >0",
  812. distance, seg_dist);
  813. result = -EIO;
  814. break;
  815. }
  816. new_seg = pos->seg_pos + seg_dist;
  817. if (fast_seek_trials > 0 && seg_dist == 0) {
  818. /* this avoids sticking to the same
  819. * segment all the time. On the other hand:
  820. * if we got here for the first time, and the
  821. * deblock_buffer still contains a valid
  822. * segment, then there is no need to skip to
  823. * the previous segment if the desired position
  824. * is inside this segment.
  825. */
  826. new_seg --;
  827. }
  828. if (new_seg < volume->start_seg) {
  829. new_seg = volume->start_seg;
  830. }
  831. limit = pos->seg_pos;
  832. fast_seek_trials ++;
  833. for (;;) {
  834. result = search_valid_segment(new_seg, limit,
  835. pos->volume_pos,
  836. pos, &cseg,
  837. volume, buf);
  838. if (result == 0 || result == -EINTR) {
  839. break;
  840. }
  841. if (new_seg == volume->start_seg) {
  842. result = -EIO; /* set errror
  843. * condition
  844. */
  845. break;
  846. }
  847. limit = new_seg;
  848. new_seg -= ZFT_FAST_SEEK_BACKUP;
  849. if (new_seg < volume->start_seg) {
  850. new_seg = volume->start_seg;
  851. }
  852. }
  853. if (result < 0) {
  854. TRACE(ft_t_warn,
  855. "Couldn't find a readable segment");
  856. break;
  857. }
  858. }
  859. distance = dest - (pos->volume_pos >> 10);
  860. }
  861. TRACE_EXIT result;
  862. }
  863. /* advance inside the given segment at most to_do bytes.
  864. * of kilobytes moved
  865. */
  866. static int seek_in_segment(const unsigned int to_do,
  867. cmpr_info *c_info,
  868. const char *src_buf,
  869. const int seg_sz,
  870. const int seg_pos,
  871. const zft_volinfo *volume)
  872. {
  873. int result = 0;
  874. int blk_sz = volume->blk_sz >> 10;
  875. int remaining = to_do;
  876. TRACE_FUN(ft_t_flow);
  877. if (c_info->offset == 0) {
  878. /* new segment just read
  879. */
  880. TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
  881. c_info->cmpr_pos += c_info->count;
  882. DUMP_CMPR_INFO(ft_t_noise, "", c_info);
  883. }
  884. /* loop and uncompress until user buffer full or
  885. * deblock-buffer empty
  886. */
  887. TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
  888. c_info->cmpr_sz, c_info->cmpr_pos);
  889. while (c_info->spans == 0 && remaining > 0) {
  890. if (c_info->cmpr_pos != 0) { /* cmpr buf is not empty */
  891. result += blk_sz;
  892. remaining -= blk_sz;
  893. c_info->cmpr_pos = 0;
  894. }
  895. if (remaining > 0) {
  896. get_next_cluster(c_info, src_buf, seg_sz,
  897. volume->end_seg == seg_pos);
  898. if (c_info->count != 0) {
  899. c_info->cmpr_pos = c_info->count;
  900. c_info->offset += c_info->count;
  901. } else {
  902. break;
  903. }
  904. }
  905. /* Allow escape from this loop on signal!
  906. */
  907. FT_SIGNAL_EXIT(_DONT_BLOCK);
  908. DUMP_CMPR_INFO(ft_t_noise, "", c_info);
  909. TRACE(ft_t_noise, "to_do: %d", remaining);
  910. }
  911. if (seg_sz - c_info->offset <= 18) {
  912. c_info->offset = seg_sz;
  913. }
  914. TRACE(ft_t_noise, "\n"
  915. KERN_INFO "segment size : %d\n"
  916. KERN_INFO "buf_pos_read : %d\n"
  917. KERN_INFO "remaining : %d",
  918. seg_sz, c_info->offset,
  919. seg_sz - c_info->offset);
  920. TRACE_EXIT result;
  921. }
  922. static int slow_seek_forward_until_error(const unsigned int distance,
  923. cmpr_info *c_info,
  924. zft_position *pos,
  925. const zft_volinfo *volume,
  926. __u8 *buf)
  927. {
  928. unsigned int remaining = distance;
  929. int seg_sz;
  930. int seg_pos;
  931. int result;
  932. TRACE_FUN(ft_t_flow);
  933. seg_pos = pos->seg_pos;
  934. do {
  935. TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf,
  936. FT_RD_AHEAD),);
  937. /* now we have the contents of the actual segment in
  938. * the deblock buffer
  939. */
  940. TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
  941. seg_sz, seg_pos,volume),);
  942. remaining -= result;
  943. pos->volume_pos += result<<10;
  944. pos->seg_pos = seg_pos;
  945. pos->seg_byte_pos = c_info->offset;
  946. seg_pos ++;
  947. if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
  948. pos->seg_pos ++;
  949. pos->seg_byte_pos = 0;
  950. c_info->offset = 0;
  951. }
  952. /* Allow escape from this loop on signal!
  953. */
  954. FT_SIGNAL_EXIT(_DONT_BLOCK);
  955. TRACE(ft_t_noise, "\n"
  956. KERN_INFO "remaining: %d\n"
  957. KERN_INFO "seg_pos: %d\n"
  958. KERN_INFO "end_seg: %d\n"
  959. KERN_INFO "result: %d",
  960. remaining, seg_pos, volume->end_seg, result);
  961. } while (remaining > 0 && seg_pos <= volume->end_seg);
  962. TRACE_EXIT 0;
  963. }
  964. /* return segment id of next segment containing valid data, -EIO otherwise
  965. */
  966. static int search_valid_segment(unsigned int segment,
  967. const unsigned int end_seg,
  968. const unsigned int max_foffs,
  969. zft_position *pos,
  970. cmpr_info *c_info,
  971. const zft_volinfo *volume,
  972. __u8 *buf)
  973. {
  974. cmpr_info tmp_info;
  975. int seg_sz;
  976. TRACE_FUN(ft_t_flow);
  977. memset(&tmp_info, 0, sizeof(cmpr_info));
  978. while (segment <= end_seg) {
  979. FT_SIGNAL_EXIT(_DONT_BLOCK);
  980. TRACE(ft_t_noise,
  981. "Searching readable segment between %d and %d",
  982. segment, end_seg);
  983. seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
  984. if ((seg_sz > 0) &&
  985. (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
  986. (tmp_info.foffs != 0 || segment == volume->start_seg)) {
  987. if ((tmp_info.foffs>>10) > max_foffs) {
  988. TRACE_ABORT(-EIO, ft_t_noise, "\n"
  989. KERN_INFO "cseg.foff: %d\n"
  990. KERN_INFO "dest : %d",
  991. (int)(tmp_info.foffs >> 10),
  992. max_foffs);
  993. }
  994. DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
  995. *c_info = tmp_info;
  996. pos->seg_pos = segment;
  997. pos->volume_pos = c_info->foffs;
  998. pos->seg_byte_pos = c_info->offset;
  999. TRACE(ft_t_noise, "found segment at %d", segment);
  1000. TRACE_EXIT 0;
  1001. }
  1002. segment++;
  1003. }
  1004. TRACE_EXIT -EIO;
  1005. }
  1006. static int slow_seek_forward(unsigned int dest,
  1007. cmpr_info *c_info,
  1008. zft_position *pos,
  1009. const zft_volinfo *volume,
  1010. __u8 *buf)
  1011. {
  1012. unsigned int distance;
  1013. int result = 0;
  1014. TRACE_FUN(ft_t_flow);
  1015. distance = dest - (pos->volume_pos >> 10);
  1016. while ((distance > 0) &&
  1017. (result = slow_seek_forward_until_error(distance,
  1018. c_info,
  1019. pos,
  1020. volume,
  1021. buf)) < 0) {
  1022. if (result == -EINTR) {
  1023. break;
  1024. }
  1025. TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
  1026. /* the failing segment is either pos->seg_pos or
  1027. * pos->seg_pos + 1. There is no need to further try
  1028. * that segment, because ftape_read_segment() already
  1029. * has tried very much to read it. So we start with
  1030. * following segment, which is pos->seg_pos + 1
  1031. */
  1032. if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
  1033. pos, c_info,
  1034. volume, buf) < 0) {
  1035. TRACE(ft_t_noise, "search_valid_segment() failed");
  1036. result = -EIO;
  1037. break;
  1038. }
  1039. distance = dest - (pos->volume_pos >> 10);
  1040. result = 0;
  1041. TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
  1042. /* found valid segment, retry the seek */
  1043. }
  1044. TRACE_EXIT result;
  1045. }
  1046. static int compute_seg_pos(const unsigned int dest,
  1047. zft_position *pos,
  1048. const zft_volinfo *volume)
  1049. {
  1050. int segment;
  1051. int distance = dest - (pos->volume_pos >> 10);
  1052. unsigned int raw_size;
  1053. unsigned int virt_size;
  1054. unsigned int factor;
  1055. TRACE_FUN(ft_t_flow);
  1056. if (distance >= 0) {
  1057. raw_size = volume->end_seg - pos->seg_pos + 1;
  1058. virt_size = ((unsigned int)(volume->size>>10)
  1059. - (unsigned int)(pos->volume_pos>>10)
  1060. + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
  1061. virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
  1062. if (virt_size == 0 || raw_size == 0) {
  1063. TRACE_EXIT 0;
  1064. }
  1065. if (raw_size >= (1<<25)) {
  1066. factor = raw_size/(virt_size>>7);
  1067. } else {
  1068. factor = (raw_size<<7)/virt_size;
  1069. }
  1070. segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
  1071. segment = (segment * factor)>>7;
  1072. } else {
  1073. raw_size = pos->seg_pos - volume->start_seg + 1;
  1074. virt_size = ((unsigned int)(pos->volume_pos>>10)
  1075. + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
  1076. virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
  1077. if (virt_size == 0 || raw_size == 0) {
  1078. TRACE_EXIT 0;
  1079. }
  1080. if (raw_size >= (1<<25)) {
  1081. factor = raw_size/(virt_size>>7);
  1082. } else {
  1083. factor = (raw_size<<7)/virt_size;
  1084. }
  1085. segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
  1086. }
  1087. TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
  1088. TRACE_EXIT segment;
  1089. }
  1090. static struct zft_cmpr_ops cmpr_ops = {
  1091. zftc_write,
  1092. zftc_read,
  1093. zftc_seek,
  1094. zftc_lock,
  1095. zftc_reset,
  1096. zftc_cleanup
  1097. };
  1098. int zft_compressor_init(void)
  1099. {
  1100. TRACE_FUN(ft_t_flow);
  1101. #ifdef MODULE
  1102. printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
  1103. if (TRACE_LEVEL >= ft_t_info) {
  1104. printk(
  1105. KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
  1106. KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n");
  1107. }
  1108. #else /* !MODULE */
  1109. /* print a short no-nonsense boot message */
  1110. printk(KERN_INFO "zftape compressor v1.00a 970514\n");
  1111. printk(KERN_INFO "For use with " FTAPE_VERSION "\n");
  1112. #endif /* MODULE */
  1113. TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
  1114. TRACE(ft_t_info, "installing compressor for zftape ...");
  1115. TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
  1116. TRACE_EXIT 0;
  1117. }
  1118. #ifdef MODULE
  1119. MODULE_AUTHOR(
  1120. "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
  1121. MODULE_DESCRIPTION(
  1122. "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
  1123. MODULE_LICENSE("GPL");
  1124. /* Called by modules package when installing the driver
  1125. */
  1126. int init_module(void)
  1127. {
  1128. return zft_compressor_init();
  1129. }
  1130. #endif /* MODULE */