rfd_ftl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /*
  2. * rfd_ftl.c -- resident flash disk (flash translation layer)
  3. *
  4. * Copyright (C) 2005 Sean Young <sean@mess.org>
  5. *
  6. * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
  7. *
  8. * This type of flash translation layer (FTL) is used by the Embedded BIOS
  9. * by General Software. It is known as the Resident Flash Disk (RFD), see:
  10. *
  11. * http://www.gensw.com/pages/prod/bios/rfd.htm
  12. *
  13. * based on ftl.c
  14. */
  15. #include <linux/hdreg.h>
  16. #include <linux/init.h>
  17. #include <linux/mtd/blktrans.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/slab.h>
  21. #include <linux/jiffies.h>
  22. #include <asm/types.h>
  23. #define const_cpu_to_le16 __constant_cpu_to_le16
  24. static int block_size = 0;
  25. module_param(block_size, int, 0);
  26. MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
  27. #define PREFIX "rfd_ftl: "
  28. /* This major has been assigned by device@lanana.org */
  29. #ifndef RFD_FTL_MAJOR
  30. #define RFD_FTL_MAJOR 256
  31. #endif
  32. /* Maximum number of partitions in an FTL region */
  33. #define PART_BITS 4
  34. /* An erase unit should start with this value */
  35. #define RFD_MAGIC 0x9193
  36. /* the second value is 0xffff or 0xffc8; function unknown */
  37. /* the third value is always 0xffff, ignored */
  38. /* next is an array of mapping for each corresponding sector */
  39. #define HEADER_MAP_OFFSET 3
  40. #define SECTOR_DELETED 0x0000
  41. #define SECTOR_ZERO 0xfffe
  42. #define SECTOR_FREE 0xffff
  43. #define SECTOR_SIZE 512
  44. #define SECTORS_PER_TRACK 63
  45. struct block {
  46. enum {
  47. BLOCK_OK,
  48. BLOCK_ERASING,
  49. BLOCK_ERASED,
  50. BLOCK_UNUSED,
  51. BLOCK_FAILED
  52. } state;
  53. int free_sectors;
  54. int used_sectors;
  55. int erases;
  56. u_long offset;
  57. };
  58. struct partition {
  59. struct mtd_blktrans_dev mbd;
  60. u_int block_size; /* size of erase unit */
  61. u_int total_blocks; /* number of erase units */
  62. u_int header_sectors_per_block; /* header sectors in erase unit */
  63. u_int data_sectors_per_block; /* data sectors in erase unit */
  64. u_int sector_count; /* sectors in translated disk */
  65. u_int header_size; /* bytes in header sector */
  66. int reserved_block; /* block next up for reclaim */
  67. int current_block; /* block to write to */
  68. u16 *header_cache; /* cached header */
  69. int is_reclaiming;
  70. int cylinders;
  71. int errors;
  72. u_long *sector_map;
  73. struct block *blocks;
  74. };
  75. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
  76. static int build_block_map(struct partition *part, int block_no)
  77. {
  78. struct block *block = &part->blocks[block_no];
  79. int i;
  80. block->offset = part->block_size * block_no;
  81. if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
  82. block->state = BLOCK_UNUSED;
  83. return -ENOENT;
  84. }
  85. block->state = BLOCK_OK;
  86. for (i=0; i<part->data_sectors_per_block; i++) {
  87. u16 entry;
  88. entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
  89. if (entry == SECTOR_DELETED)
  90. continue;
  91. if (entry == SECTOR_FREE) {
  92. block->free_sectors++;
  93. continue;
  94. }
  95. if (entry == SECTOR_ZERO)
  96. entry = 0;
  97. if (entry >= part->sector_count) {
  98. printk(KERN_WARNING PREFIX
  99. "'%s': unit #%d: entry %d corrupt, "
  100. "sector %d out of range\n",
  101. part->mbd.mtd->name, block_no, i, entry);
  102. continue;
  103. }
  104. if (part->sector_map[entry] != -1) {
  105. printk(KERN_WARNING PREFIX
  106. "'%s': more than one entry for sector %d\n",
  107. part->mbd.mtd->name, entry);
  108. part->errors = 1;
  109. continue;
  110. }
  111. part->sector_map[entry] = block->offset +
  112. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  113. block->used_sectors++;
  114. }
  115. if (block->free_sectors == part->data_sectors_per_block)
  116. part->reserved_block = block_no;
  117. return 0;
  118. }
  119. static int scan_header(struct partition *part)
  120. {
  121. int sectors_per_block;
  122. int i, rc = -ENOMEM;
  123. int blocks_found;
  124. size_t retlen;
  125. sectors_per_block = part->block_size / SECTOR_SIZE;
  126. part->total_blocks = part->mbd.mtd->size / part->block_size;
  127. if (part->total_blocks < 2)
  128. return -ENOENT;
  129. /* each erase block has three bytes header, followed by the map */
  130. part->header_sectors_per_block =
  131. ((HEADER_MAP_OFFSET + sectors_per_block) *
  132. sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
  133. part->data_sectors_per_block = sectors_per_block -
  134. part->header_sectors_per_block;
  135. part->header_size = (HEADER_MAP_OFFSET +
  136. part->data_sectors_per_block) * sizeof(u16);
  137. part->cylinders = (part->data_sectors_per_block *
  138. (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
  139. part->sector_count = part->cylinders * SECTORS_PER_TRACK;
  140. part->current_block = -1;
  141. part->reserved_block = -1;
  142. part->is_reclaiming = 0;
  143. part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
  144. if (!part->header_cache)
  145. goto err;
  146. part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
  147. GFP_KERNEL);
  148. if (!part->blocks)
  149. goto err;
  150. part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
  151. if (!part->sector_map) {
  152. printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
  153. "sector map", part->mbd.mtd->name);
  154. goto err;
  155. }
  156. for (i=0; i<part->sector_count; i++)
  157. part->sector_map[i] = -1;
  158. for (i=0, blocks_found=0; i<part->total_blocks; i++) {
  159. rc = part->mbd.mtd->read(part->mbd.mtd,
  160. i * part->block_size, part->header_size,
  161. &retlen, (u_char*)part->header_cache);
  162. if (!rc && retlen != part->header_size)
  163. rc = -EIO;
  164. if (rc)
  165. goto err;
  166. if (!build_block_map(part, i))
  167. blocks_found++;
  168. }
  169. if (blocks_found == 0) {
  170. printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
  171. part->mbd.mtd->name);
  172. rc = -ENOENT;
  173. goto err;
  174. }
  175. if (part->reserved_block == -1) {
  176. printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
  177. part->mbd.mtd->name);
  178. part->errors = 1;
  179. }
  180. return 0;
  181. err:
  182. vfree(part->sector_map);
  183. kfree(part->header_cache);
  184. kfree(part->blocks);
  185. return rc;
  186. }
  187. static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  188. {
  189. struct partition *part = (struct partition*)dev;
  190. u_long addr;
  191. size_t retlen;
  192. int rc;
  193. if (sector >= part->sector_count)
  194. return -EIO;
  195. addr = part->sector_map[sector];
  196. if (addr != -1) {
  197. rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
  198. &retlen, (u_char*)buf);
  199. if (!rc && retlen != SECTOR_SIZE)
  200. rc = -EIO;
  201. if (rc) {
  202. printk(KERN_WARNING PREFIX "error reading '%s' at "
  203. "0x%lx\n", part->mbd.mtd->name, addr);
  204. return rc;
  205. }
  206. } else
  207. memset(buf, 0, SECTOR_SIZE);
  208. return 0;
  209. }
  210. static void erase_callback(struct erase_info *erase)
  211. {
  212. struct partition *part;
  213. u16 magic;
  214. int i, rc;
  215. size_t retlen;
  216. part = (struct partition*)erase->priv;
  217. i = erase->addr / part->block_size;
  218. if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
  219. printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
  220. "on '%s'\n", erase->addr, part->mbd.mtd->name);
  221. return;
  222. }
  223. if (erase->state != MTD_ERASE_DONE) {
  224. printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
  225. "state %d\n", erase->addr,
  226. part->mbd.mtd->name, erase->state);
  227. part->blocks[i].state = BLOCK_FAILED;
  228. part->blocks[i].free_sectors = 0;
  229. part->blocks[i].used_sectors = 0;
  230. kfree(erase);
  231. return;
  232. }
  233. magic = const_cpu_to_le16(RFD_MAGIC);
  234. part->blocks[i].state = BLOCK_ERASED;
  235. part->blocks[i].free_sectors = part->data_sectors_per_block;
  236. part->blocks[i].used_sectors = 0;
  237. part->blocks[i].erases++;
  238. rc = part->mbd.mtd->write(part->mbd.mtd,
  239. part->blocks[i].offset, sizeof(magic), &retlen,
  240. (u_char*)&magic);
  241. if (!rc && retlen != sizeof(magic))
  242. rc = -EIO;
  243. if (rc) {
  244. printk(KERN_ERR PREFIX "'%s': unable to write RFD "
  245. "header at 0x%lx\n",
  246. part->mbd.mtd->name,
  247. part->blocks[i].offset);
  248. part->blocks[i].state = BLOCK_FAILED;
  249. }
  250. else
  251. part->blocks[i].state = BLOCK_OK;
  252. kfree(erase);
  253. }
  254. static int erase_block(struct partition *part, int block)
  255. {
  256. struct erase_info *erase;
  257. int rc = -ENOMEM;
  258. erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
  259. if (!erase)
  260. goto err;
  261. erase->mtd = part->mbd.mtd;
  262. erase->callback = erase_callback;
  263. erase->addr = part->blocks[block].offset;
  264. erase->len = part->block_size;
  265. erase->priv = (u_long)part;
  266. part->blocks[block].state = BLOCK_ERASING;
  267. part->blocks[block].free_sectors = 0;
  268. rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
  269. if (rc) {
  270. printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
  271. "failed\n", erase->addr, erase->len,
  272. part->mbd.mtd->name);
  273. kfree(erase);
  274. }
  275. err:
  276. return rc;
  277. }
  278. static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
  279. {
  280. void *sector_data;
  281. u16 *map;
  282. size_t retlen;
  283. int i, rc = -ENOMEM;
  284. part->is_reclaiming = 1;
  285. sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
  286. if (!sector_data)
  287. goto err3;
  288. map = kmalloc(part->header_size, GFP_KERNEL);
  289. if (!map)
  290. goto err2;
  291. rc = part->mbd.mtd->read(part->mbd.mtd,
  292. part->blocks[block_no].offset, part->header_size,
  293. &retlen, (u_char*)map);
  294. if (!rc && retlen != part->header_size)
  295. rc = -EIO;
  296. if (rc) {
  297. printk(KERN_ERR PREFIX "error reading '%s' at "
  298. "0x%lx\n", part->mbd.mtd->name,
  299. part->blocks[block_no].offset);
  300. goto err;
  301. }
  302. for (i=0; i<part->data_sectors_per_block; i++) {
  303. u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
  304. u_long addr;
  305. if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
  306. continue;
  307. if (entry == SECTOR_ZERO)
  308. entry = 0;
  309. /* already warned about and ignored in build_block_map() */
  310. if (entry >= part->sector_count)
  311. continue;
  312. addr = part->blocks[block_no].offset +
  313. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  314. if (*old_sector == addr) {
  315. *old_sector = -1;
  316. if (!part->blocks[block_no].used_sectors--) {
  317. rc = erase_block(part, block_no);
  318. break;
  319. }
  320. continue;
  321. }
  322. rc = part->mbd.mtd->read(part->mbd.mtd, addr,
  323. SECTOR_SIZE, &retlen, sector_data);
  324. if (!rc && retlen != SECTOR_SIZE)
  325. rc = -EIO;
  326. if (rc) {
  327. printk(KERN_ERR PREFIX "'%s': Unable to "
  328. "read sector for relocation\n",
  329. part->mbd.mtd->name);
  330. goto err;
  331. }
  332. rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
  333. entry, sector_data);
  334. if (rc)
  335. goto err;
  336. }
  337. err:
  338. kfree(map);
  339. err2:
  340. kfree(sector_data);
  341. err3:
  342. part->is_reclaiming = 0;
  343. return rc;
  344. }
  345. static int reclaim_block(struct partition *part, u_long *old_sector)
  346. {
  347. int block, best_block, score, old_sector_block;
  348. int rc;
  349. /* we have a race if sync doesn't exist */
  350. if (part->mbd.mtd->sync)
  351. part->mbd.mtd->sync(part->mbd.mtd);
  352. score = 0x7fffffff; /* MAX_INT */
  353. best_block = -1;
  354. if (*old_sector != -1)
  355. old_sector_block = *old_sector / part->block_size;
  356. else
  357. old_sector_block = -1;
  358. for (block=0; block<part->total_blocks; block++) {
  359. int this_score;
  360. if (block == part->reserved_block)
  361. continue;
  362. /*
  363. * Postpone reclaiming if there is a free sector as
  364. * more removed sectors is more efficient (have to move
  365. * less).
  366. */
  367. if (part->blocks[block].free_sectors)
  368. return 0;
  369. this_score = part->blocks[block].used_sectors;
  370. if (block == old_sector_block)
  371. this_score--;
  372. else {
  373. /* no point in moving a full block */
  374. if (part->blocks[block].used_sectors ==
  375. part->data_sectors_per_block)
  376. continue;
  377. }
  378. this_score += part->blocks[block].erases;
  379. if (this_score < score) {
  380. best_block = block;
  381. score = this_score;
  382. }
  383. }
  384. if (best_block == -1)
  385. return -ENOSPC;
  386. part->current_block = -1;
  387. part->reserved_block = best_block;
  388. pr_debug("reclaim_block: reclaiming block #%d with %d used "
  389. "%d free sectors\n", best_block,
  390. part->blocks[best_block].used_sectors,
  391. part->blocks[best_block].free_sectors);
  392. if (part->blocks[best_block].used_sectors)
  393. rc = move_block_contents(part, best_block, old_sector);
  394. else
  395. rc = erase_block(part, best_block);
  396. return rc;
  397. }
  398. /*
  399. * IMPROVE: It would be best to choose the block with the most deleted sectors,
  400. * because if we fill that one up first it'll have the most chance of having
  401. * the least live sectors at reclaim.
  402. */
  403. static int find_free_block(struct partition *part)
  404. {
  405. int block, stop;
  406. block = part->current_block == -1 ?
  407. jiffies % part->total_blocks : part->current_block;
  408. stop = block;
  409. do {
  410. if (part->blocks[block].free_sectors &&
  411. block != part->reserved_block)
  412. return block;
  413. if (part->blocks[block].state == BLOCK_UNUSED)
  414. erase_block(part, block);
  415. if (++block >= part->total_blocks)
  416. block = 0;
  417. } while (block != stop);
  418. return -1;
  419. }
  420. static int find_writable_block(struct partition *part, u_long *old_sector)
  421. {
  422. int rc, block;
  423. size_t retlen;
  424. block = find_free_block(part);
  425. if (block == -1) {
  426. if (!part->is_reclaiming) {
  427. rc = reclaim_block(part, old_sector);
  428. if (rc)
  429. goto err;
  430. block = find_free_block(part);
  431. }
  432. if (block == -1) {
  433. rc = -ENOSPC;
  434. goto err;
  435. }
  436. }
  437. rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
  438. part->header_size, &retlen, (u_char*)part->header_cache);
  439. if (!rc && retlen != part->header_size)
  440. rc = -EIO;
  441. if (rc) {
  442. printk(KERN_ERR PREFIX "'%s': unable to read header at "
  443. "0x%lx\n", part->mbd.mtd->name,
  444. part->blocks[block].offset);
  445. goto err;
  446. }
  447. part->current_block = block;
  448. err:
  449. return rc;
  450. }
  451. static int mark_sector_deleted(struct partition *part, u_long old_addr)
  452. {
  453. int block, offset, rc;
  454. u_long addr;
  455. size_t retlen;
  456. u16 del = const_cpu_to_le16(SECTOR_DELETED);
  457. block = old_addr / part->block_size;
  458. offset = (old_addr % part->block_size) / SECTOR_SIZE -
  459. part->header_sectors_per_block;
  460. addr = part->blocks[block].offset +
  461. (HEADER_MAP_OFFSET + offset) * sizeof(u16);
  462. rc = part->mbd.mtd->write(part->mbd.mtd, addr,
  463. sizeof(del), &retlen, (u_char*)&del);
  464. if (!rc && retlen != sizeof(del))
  465. rc = -EIO;
  466. if (rc) {
  467. printk(KERN_ERR PREFIX "error writing '%s' at "
  468. "0x%lx\n", part->mbd.mtd->name, addr);
  469. if (rc)
  470. goto err;
  471. }
  472. if (block == part->current_block)
  473. part->header_cache[offset + HEADER_MAP_OFFSET] = del;
  474. part->blocks[block].used_sectors--;
  475. if (!part->blocks[block].used_sectors &&
  476. !part->blocks[block].free_sectors)
  477. rc = erase_block(part, block);
  478. err:
  479. return rc;
  480. }
  481. static int find_free_sector(const struct partition *part, const struct block *block)
  482. {
  483. int i, stop;
  484. i = stop = part->data_sectors_per_block - block->free_sectors;
  485. do {
  486. if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
  487. == SECTOR_FREE)
  488. return i;
  489. if (++i == part->data_sectors_per_block)
  490. i = 0;
  491. }
  492. while(i != stop);
  493. return -1;
  494. }
  495. static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
  496. {
  497. struct partition *part = (struct partition*)dev;
  498. struct block *block;
  499. u_long addr;
  500. int i;
  501. int rc;
  502. size_t retlen;
  503. u16 entry;
  504. if (part->current_block == -1 ||
  505. !part->blocks[part->current_block].free_sectors) {
  506. rc = find_writable_block(part, old_addr);
  507. if (rc)
  508. goto err;
  509. }
  510. block = &part->blocks[part->current_block];
  511. i = find_free_sector(part, block);
  512. if (i < 0) {
  513. rc = -ENOSPC;
  514. goto err;
  515. }
  516. addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
  517. block->offset;
  518. rc = part->mbd.mtd->write(part->mbd.mtd,
  519. addr, SECTOR_SIZE, &retlen, (u_char*)buf);
  520. if (!rc && retlen != SECTOR_SIZE)
  521. rc = -EIO;
  522. if (rc) {
  523. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  524. part->mbd.mtd->name, addr);
  525. if (rc)
  526. goto err;
  527. }
  528. part->sector_map[sector] = addr;
  529. entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
  530. part->header_cache[i + HEADER_MAP_OFFSET] = entry;
  531. addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
  532. rc = part->mbd.mtd->write(part->mbd.mtd, addr,
  533. sizeof(entry), &retlen, (u_char*)&entry);
  534. if (!rc && retlen != sizeof(entry))
  535. rc = -EIO;
  536. if (rc) {
  537. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  538. part->mbd.mtd->name, addr);
  539. if (rc)
  540. goto err;
  541. }
  542. block->used_sectors++;
  543. block->free_sectors--;
  544. err:
  545. return rc;
  546. }
  547. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  548. {
  549. struct partition *part = (struct partition*)dev;
  550. u_long old_addr;
  551. int i;
  552. int rc = 0;
  553. pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
  554. if (part->reserved_block == -1) {
  555. rc = -EACCES;
  556. goto err;
  557. }
  558. if (sector >= part->sector_count) {
  559. rc = -EIO;
  560. goto err;
  561. }
  562. old_addr = part->sector_map[sector];
  563. for (i=0; i<SECTOR_SIZE; i++) {
  564. if (!buf[i])
  565. continue;
  566. rc = do_writesect(dev, sector, buf, &old_addr);
  567. if (rc)
  568. goto err;
  569. break;
  570. }
  571. if (i == SECTOR_SIZE)
  572. part->sector_map[sector] = -1;
  573. if (old_addr != -1)
  574. rc = mark_sector_deleted(part, old_addr);
  575. err:
  576. return rc;
  577. }
  578. static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
  579. {
  580. struct partition *part = (struct partition*)dev;
  581. geo->heads = 1;
  582. geo->sectors = SECTORS_PER_TRACK;
  583. geo->cylinders = part->cylinders;
  584. return 0;
  585. }
  586. static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
  587. {
  588. struct partition *part;
  589. if (mtd->type != MTD_NORFLASH)
  590. return;
  591. part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
  592. if (!part)
  593. return;
  594. part->mbd.mtd = mtd;
  595. if (block_size)
  596. part->block_size = block_size;
  597. else {
  598. if (!mtd->erasesize) {
  599. printk(KERN_WARNING PREFIX "please provide block_size");
  600. return;
  601. }
  602. else
  603. part->block_size = mtd->erasesize;
  604. }
  605. if (scan_header(part) == 0) {
  606. part->mbd.size = part->sector_count;
  607. part->mbd.blksize = SECTOR_SIZE;
  608. part->mbd.tr = tr;
  609. part->mbd.devnum = -1;
  610. if (!(mtd->flags & MTD_WRITEABLE))
  611. part->mbd.readonly = 1;
  612. else if (part->errors) {
  613. printk(KERN_WARNING PREFIX "'%s': errors found, "
  614. "setting read-only\n", mtd->name);
  615. part->mbd.readonly = 1;
  616. }
  617. printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
  618. mtd->name, mtd->type, mtd->flags);
  619. if (!add_mtd_blktrans_dev((void*)part))
  620. return;
  621. }
  622. kfree(part);
  623. }
  624. static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
  625. {
  626. struct partition *part = (struct partition*)dev;
  627. int i;
  628. for (i=0; i<part->total_blocks; i++) {
  629. pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
  630. part->mbd.mtd->name, i, part->blocks[i].erases);
  631. }
  632. del_mtd_blktrans_dev(dev);
  633. vfree(part->sector_map);
  634. kfree(part->header_cache);
  635. kfree(part->blocks);
  636. kfree(part);
  637. }
  638. struct mtd_blktrans_ops rfd_ftl_tr = {
  639. .name = "rfd",
  640. .major = RFD_FTL_MAJOR,
  641. .part_bits = PART_BITS,
  642. .readsect = rfd_ftl_readsect,
  643. .writesect = rfd_ftl_writesect,
  644. .getgeo = rfd_ftl_getgeo,
  645. .add_mtd = rfd_ftl_add_mtd,
  646. .remove_dev = rfd_ftl_remove_dev,
  647. .owner = THIS_MODULE,
  648. };
  649. static int __init init_rfd_ftl(void)
  650. {
  651. return register_mtd_blktrans(&rfd_ftl_tr);
  652. }
  653. static void __exit cleanup_rfd_ftl(void)
  654. {
  655. deregister_mtd_blktrans(&rfd_ftl_tr);
  656. }
  657. module_init(init_rfd_ftl);
  658. module_exit(cleanup_rfd_ftl);
  659. MODULE_LICENSE("GPL");
  660. MODULE_AUTHOR("Sean Young <sean@mess.org>");
  661. MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
  662. "used by General Software's Embedded BIOS");