ffsport.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright (c) 2009, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include "ffsport.h"
  20. #include "flash.h"
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/wait.h>
  25. #include <linux/mutex.h>
  26. #include <linux/kthread.h>
  27. #include <linux/log2.h>
  28. #include <linux/init.h>
  29. /**** Helper functions used for Div, Remainder operation on u64 ****/
  30. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  31. * Function: GLOB_Calc_Used_Bits
  32. * Inputs: Power of 2 number
  33. * Outputs: Number of Used Bits
  34. * 0, if the argument is 0
  35. * Description: Calculate the number of bits used by a given power of 2 number
  36. * Number can be upto 32 bit
  37. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  38. int GLOB_Calc_Used_Bits(u32 n)
  39. {
  40. int tot_bits = 0;
  41. if (n >= 1 << 16) {
  42. n >>= 16;
  43. tot_bits += 16;
  44. }
  45. if (n >= 1 << 8) {
  46. n >>= 8;
  47. tot_bits += 8;
  48. }
  49. if (n >= 1 << 4) {
  50. n >>= 4;
  51. tot_bits += 4;
  52. }
  53. if (n >= 1 << 2) {
  54. n >>= 2;
  55. tot_bits += 2;
  56. }
  57. if (n >= 1 << 1)
  58. tot_bits += 1;
  59. return ((n == 0) ? (0) : tot_bits);
  60. }
  61. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  62. * Function: GLOB_u64_Div
  63. * Inputs: Number of u64
  64. * A power of 2 number as Division
  65. * Outputs: Quotient of the Divisor operation
  66. * Description: It divides the address by divisor by using bit shift operation
  67. * (essentially without explicitely using "/").
  68. * Divisor is a power of 2 number and Divided is of u64
  69. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  70. u64 GLOB_u64_Div(u64 addr, u32 divisor)
  71. {
  72. return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
  73. }
  74. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  75. * Function: GLOB_u64_Remainder
  76. * Inputs: Number of u64
  77. * Divisor Type (1 -PageAddress, 2- BlockAddress)
  78. * Outputs: Remainder of the Division operation
  79. * Description: It calculates the remainder of a number (of u64) by
  80. * divisor(power of 2 number ) by using bit shifting and multiply
  81. * operation(essentially without explicitely using "/").
  82. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  83. u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
  84. {
  85. u64 result = 0;
  86. if (divisor_type == 1) { /* Remainder -- Page */
  87. result = (addr >> DeviceInfo.nBitsInPageDataSize);
  88. result = result * DeviceInfo.wPageDataSize;
  89. } else if (divisor_type == 2) { /* Remainder -- Block */
  90. result = (addr >> DeviceInfo.nBitsInBlockDataSize);
  91. result = result * DeviceInfo.wBlockDataSize;
  92. }
  93. result = addr - result;
  94. return result;
  95. }
  96. #define NUM_DEVICES 1
  97. #define PARTITIONS 8
  98. #define GLOB_SBD_NAME "nd"
  99. #define GLOB_SBD_IRQ_NUM (29)
  100. #define GLOB_VERSION "driver version 20091110"
  101. #define GLOB_SBD_IOCTL_GC (0x7701)
  102. #define GLOB_SBD_IOCTL_WL (0x7702)
  103. #define GLOB_SBD_IOCTL_FORMAT (0x7703)
  104. #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
  105. #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
  106. #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
  107. #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
  108. #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
  109. #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
  110. #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
  111. static u32 reserved_mb_for_os_image = 0;
  112. int nand_debug_level;
  113. module_param(nand_debug_level, int, 0644);
  114. MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
  115. MODULE_LICENSE("GPL");
  116. struct spectra_nand_dev {
  117. struct pci_dev *dev;
  118. u64 size;
  119. u16 users;
  120. spinlock_t qlock;
  121. void __iomem *ioaddr; /* Mapped address */
  122. struct request_queue *queue;
  123. struct task_struct *thread;
  124. struct gendisk *gd;
  125. u8 *tmp_buf;
  126. };
  127. static int GLOB_SBD_majornum;
  128. static char *GLOB_version = GLOB_VERSION;
  129. static struct spectra_nand_dev nand_device[NUM_DEVICES];
  130. static struct mutex spectra_lock;
  131. static int res_blks_os = 1;
  132. struct spectra_indentfy_dev_tag IdentifyDeviceData;
  133. static int force_flush_cache(void)
  134. {
  135. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  136. __FILE__, __LINE__, __func__);
  137. if (ERR == GLOB_FTL_Flush_Cache()) {
  138. printk(KERN_ERR "Fail to Flush FTL Cache!\n");
  139. return -EFAULT;
  140. }
  141. #if CMD_DMA
  142. if (glob_ftl_execute_cmds())
  143. return -EIO;
  144. else
  145. return 0;
  146. #endif
  147. return 0;
  148. }
  149. struct ioctl_rw_page_info {
  150. u8 *data;
  151. unsigned int page;
  152. };
  153. static int ioctl_read_page_data(unsigned long arg)
  154. {
  155. u8 *buf;
  156. struct ioctl_rw_page_info info;
  157. int result = PASS;
  158. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  159. return -EFAULT;
  160. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  161. if (!buf) {
  162. printk(KERN_ERR "ioctl_read_page_data: "
  163. "failed to allocate memory\n");
  164. return -ENOMEM;
  165. }
  166. mutex_lock(&spectra_lock);
  167. result = GLOB_FTL_Page_Read(buf,
  168. (u64)info.page * IdentifyDeviceData.PageDataSize);
  169. mutex_unlock(&spectra_lock);
  170. if (copy_to_user((void __user *)info.data, buf,
  171. IdentifyDeviceData.PageDataSize)) {
  172. printk(KERN_ERR "ioctl_read_page_data: "
  173. "failed to copy user data\n");
  174. kfree(buf);
  175. return -EFAULT;
  176. }
  177. kfree(buf);
  178. return result;
  179. }
  180. static int ioctl_write_page_data(unsigned long arg)
  181. {
  182. u8 *buf;
  183. struct ioctl_rw_page_info info;
  184. int result = PASS;
  185. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  186. return -EFAULT;
  187. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  188. if (!buf) {
  189. printk(KERN_ERR "ioctl_write_page_data: "
  190. "failed to allocate memory\n");
  191. return -ENOMEM;
  192. }
  193. if (copy_from_user(buf, (void __user *)info.data,
  194. IdentifyDeviceData.PageDataSize)) {
  195. printk(KERN_ERR "ioctl_write_page_data: "
  196. "failed to copy user data\n");
  197. kfree(buf);
  198. return -EFAULT;
  199. }
  200. mutex_lock(&spectra_lock);
  201. result = GLOB_FTL_Page_Write(buf,
  202. (u64)info.page * IdentifyDeviceData.PageDataSize);
  203. mutex_unlock(&spectra_lock);
  204. kfree(buf);
  205. return result;
  206. }
  207. /* Return how many blocks should be reserved for bad block replacement */
  208. static int get_res_blk_num_bad_blk(void)
  209. {
  210. return IdentifyDeviceData.wDataBlockNum / 10;
  211. }
  212. /* Return how many blocks should be reserved for OS image */
  213. static int get_res_blk_num_os(void)
  214. {
  215. u32 res_blks, blk_size;
  216. blk_size = IdentifyDeviceData.PageDataSize *
  217. IdentifyDeviceData.PagesPerBlock;
  218. res_blks = (reserved_mb_for_os_image * 1024 * 1024) / blk_size;
  219. if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
  220. res_blks = 1; /* Reserved 1 block for block table */
  221. return res_blks;
  222. }
  223. static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
  224. {
  225. rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
  226. /* rq->timeout = 5 * HZ; */
  227. rq->cmd[0] = REQ_LB_OP_FLUSH;
  228. }
  229. /* Transfer a full request. */
  230. static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
  231. {
  232. u64 start_addr, addr;
  233. u32 logical_start_sect, hd_start_sect;
  234. u32 nsect, hd_sects;
  235. u32 rsect, tsect = 0;
  236. char *buf;
  237. u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
  238. start_addr = (u64)(blk_rq_pos(req)) << 9;
  239. /* Add a big enough offset to prevent the OS Image from
  240. * being accessed or damaged by file system */
  241. start_addr += IdentifyDeviceData.PageDataSize *
  242. IdentifyDeviceData.PagesPerBlock *
  243. res_blks_os;
  244. if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
  245. req->cmd[0] == REQ_LB_OP_FLUSH) {
  246. if (force_flush_cache()) /* Fail to flush cache */
  247. return -EIO;
  248. else
  249. return 0;
  250. }
  251. if (!blk_fs_request(req))
  252. return -EIO;
  253. if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
  254. printk(KERN_ERR "Spectra error: request over the NAND "
  255. "capacity!sector %d, current_nr_sectors %d, "
  256. "while capacity is %d\n",
  257. (int)blk_rq_pos(req),
  258. blk_rq_cur_sectors(req),
  259. (int)get_capacity(tr->gd));
  260. return -EIO;
  261. }
  262. logical_start_sect = start_addr >> 9;
  263. hd_start_sect = logical_start_sect / ratio;
  264. rsect = logical_start_sect - hd_start_sect * ratio;
  265. addr = (u64)hd_start_sect * ratio * 512;
  266. buf = req->buffer;
  267. nsect = blk_rq_cur_sectors(req);
  268. if (rsect)
  269. tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
  270. switch (rq_data_dir(req)) {
  271. case READ:
  272. /* Read the first NAND page */
  273. if (rsect) {
  274. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  275. printk(KERN_ERR "Error in %s, Line %d\n",
  276. __FILE__, __LINE__);
  277. return -EIO;
  278. }
  279. memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
  280. addr += IdentifyDeviceData.PageDataSize;
  281. buf += tsect << 9;
  282. nsect -= tsect;
  283. }
  284. /* Read the other NAND pages */
  285. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  286. if (GLOB_FTL_Page_Read(buf, addr)) {
  287. printk(KERN_ERR "Error in %s, Line %d\n",
  288. __FILE__, __LINE__);
  289. return -EIO;
  290. }
  291. addr += IdentifyDeviceData.PageDataSize;
  292. buf += IdentifyDeviceData.PageDataSize;
  293. }
  294. /* Read the last NAND pages */
  295. if (nsect % ratio) {
  296. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  297. printk(KERN_ERR "Error in %s, Line %d\n",
  298. __FILE__, __LINE__);
  299. return -EIO;
  300. }
  301. memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
  302. }
  303. #if CMD_DMA
  304. if (glob_ftl_execute_cmds())
  305. return -EIO;
  306. else
  307. return 0;
  308. #endif
  309. return 0;
  310. case WRITE:
  311. /* Write the first NAND page */
  312. if (rsect) {
  313. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  314. printk(KERN_ERR "Error in %s, Line %d\n",
  315. __FILE__, __LINE__);
  316. return -EIO;
  317. }
  318. memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
  319. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  320. printk(KERN_ERR "Error in %s, Line %d\n",
  321. __FILE__, __LINE__);
  322. return -EIO;
  323. }
  324. addr += IdentifyDeviceData.PageDataSize;
  325. buf += tsect << 9;
  326. nsect -= tsect;
  327. }
  328. /* Write the other NAND pages */
  329. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  330. if (GLOB_FTL_Page_Write(buf, addr)) {
  331. printk(KERN_ERR "Error in %s, Line %d\n",
  332. __FILE__, __LINE__);
  333. return -EIO;
  334. }
  335. addr += IdentifyDeviceData.PageDataSize;
  336. buf += IdentifyDeviceData.PageDataSize;
  337. }
  338. /* Write the last NAND pages */
  339. if (nsect % ratio) {
  340. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  341. printk(KERN_ERR "Error in %s, Line %d\n",
  342. __FILE__, __LINE__);
  343. return -EIO;
  344. }
  345. memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
  346. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  347. printk(KERN_ERR "Error in %s, Line %d\n",
  348. __FILE__, __LINE__);
  349. return -EIO;
  350. }
  351. }
  352. #if CMD_DMA
  353. if (glob_ftl_execute_cmds())
  354. return -EIO;
  355. else
  356. return 0;
  357. #endif
  358. return 0;
  359. default:
  360. printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
  361. return -EIO;
  362. }
  363. }
  364. /* This function is copied from drivers/mtd/mtd_blkdevs.c */
  365. static int spectra_trans_thread(void *arg)
  366. {
  367. struct spectra_nand_dev *tr = arg;
  368. struct request_queue *rq = tr->queue;
  369. struct request *req = NULL;
  370. /* we might get involved when memory gets low, so use PF_MEMALLOC */
  371. current->flags |= PF_MEMALLOC;
  372. spin_lock_irq(rq->queue_lock);
  373. while (!kthread_should_stop()) {
  374. int res;
  375. if (!req) {
  376. req = blk_fetch_request(rq);
  377. if (!req) {
  378. set_current_state(TASK_INTERRUPTIBLE);
  379. spin_unlock_irq(rq->queue_lock);
  380. schedule();
  381. spin_lock_irq(rq->queue_lock);
  382. continue;
  383. }
  384. }
  385. spin_unlock_irq(rq->queue_lock);
  386. mutex_lock(&spectra_lock);
  387. res = do_transfer(tr, req);
  388. mutex_unlock(&spectra_lock);
  389. spin_lock_irq(rq->queue_lock);
  390. if (!__blk_end_request_cur(req, res))
  391. req = NULL;
  392. }
  393. if (req)
  394. __blk_end_request_all(req, -EIO);
  395. spin_unlock_irq(rq->queue_lock);
  396. return 0;
  397. }
  398. /* Request function that "handles clustering". */
  399. static void GLOB_SBD_request(struct request_queue *rq)
  400. {
  401. struct spectra_nand_dev *pdev = rq->queuedata;
  402. wake_up_process(pdev->thread);
  403. }
  404. static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
  405. {
  406. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  407. __FILE__, __LINE__, __func__);
  408. return 0;
  409. }
  410. static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
  411. {
  412. int ret;
  413. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  414. __FILE__, __LINE__, __func__);
  415. mutex_lock(&spectra_lock);
  416. ret = force_flush_cache();
  417. mutex_unlock(&spectra_lock);
  418. return 0;
  419. }
  420. static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  421. {
  422. geo->heads = 4;
  423. geo->sectors = 16;
  424. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  425. nand_dbg_print(NAND_DBG_DEBUG,
  426. "heads: %d, sectors: %d, cylinders: %d\n",
  427. geo->heads, geo->sectors, geo->cylinders);
  428. return 0;
  429. }
  430. int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
  431. unsigned int cmd, unsigned long arg)
  432. {
  433. int ret;
  434. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  435. __FILE__, __LINE__, __func__);
  436. switch (cmd) {
  437. case GLOB_SBD_IOCTL_GC:
  438. nand_dbg_print(NAND_DBG_DEBUG,
  439. "Spectra IOCTL: Garbage Collection "
  440. "being performed\n");
  441. if (PASS != GLOB_FTL_Garbage_Collection())
  442. return -EFAULT;
  443. return 0;
  444. case GLOB_SBD_IOCTL_WL:
  445. nand_dbg_print(NAND_DBG_DEBUG,
  446. "Spectra IOCTL: Static Wear Leveling "
  447. "being performed\n");
  448. if (PASS != GLOB_FTL_Wear_Leveling())
  449. return -EFAULT;
  450. return 0;
  451. case GLOB_SBD_IOCTL_FORMAT:
  452. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
  453. "being performed\n");
  454. if (PASS != GLOB_FTL_Flash_Format())
  455. return -EFAULT;
  456. return 0;
  457. case GLOB_SBD_IOCTL_FLUSH_CACHE:
  458. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
  459. "being performed\n");
  460. mutex_lock(&spectra_lock);
  461. ret = force_flush_cache();
  462. mutex_unlock(&spectra_lock);
  463. return ret;
  464. case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
  465. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  466. "Copy block table\n");
  467. if (copy_to_user((void __user *)arg,
  468. get_blk_table_start_addr(),
  469. get_blk_table_len()))
  470. return -EFAULT;
  471. return 0;
  472. case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
  473. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  474. "Copy wear leveling table\n");
  475. if (copy_to_user((void __user *)arg,
  476. get_wear_leveling_table_start_addr(),
  477. get_wear_leveling_table_len()))
  478. return -EFAULT;
  479. return 0;
  480. case GLOB_SBD_IOCTL_GET_NAND_INFO:
  481. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  482. "Get NAND info\n");
  483. if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
  484. sizeof(IdentifyDeviceData)))
  485. return -EFAULT;
  486. return 0;
  487. case GLOB_SBD_IOCTL_WRITE_DATA:
  488. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  489. "Write one page data\n");
  490. return ioctl_write_page_data(arg);
  491. case GLOB_SBD_IOCTL_READ_DATA:
  492. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  493. "Read one page data\n");
  494. return ioctl_read_page_data(arg);
  495. }
  496. return -ENOTTY;
  497. }
  498. static struct block_device_operations GLOB_SBD_ops = {
  499. .owner = THIS_MODULE,
  500. .open = GLOB_SBD_open,
  501. .release = GLOB_SBD_release,
  502. .locked_ioctl = GLOB_SBD_ioctl,
  503. .getgeo = GLOB_SBD_getgeo,
  504. };
  505. static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
  506. {
  507. int res_blks;
  508. u32 sects;
  509. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  510. __FILE__, __LINE__, __func__);
  511. memset(dev, 0, sizeof(struct spectra_nand_dev));
  512. nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
  513. "for OS image, %d blocks for bad block replacement.\n",
  514. get_res_blk_num_os(),
  515. get_res_blk_num_bad_blk());
  516. res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
  517. dev->size = (u64)IdentifyDeviceData.PageDataSize *
  518. IdentifyDeviceData.PagesPerBlock *
  519. (IdentifyDeviceData.wDataBlockNum - res_blks);
  520. res_blks_os = get_res_blk_num_os();
  521. spin_lock_init(&dev->qlock);
  522. dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  523. if (!dev->tmp_buf) {
  524. printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
  525. __FILE__, __LINE__);
  526. goto out_vfree;
  527. }
  528. dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
  529. if (dev->queue == NULL) {
  530. printk(KERN_ERR
  531. "Spectra: Request queue could not be initialized."
  532. " Aborting\n ");
  533. goto out_vfree;
  534. }
  535. dev->queue->queuedata = dev;
  536. /* As Linux block layer doens't support >4KB hardware sector, */
  537. /* Here we force report 512 byte hardware sector size to Kernel */
  538. blk_queue_logical_block_size(dev->queue, 512);
  539. blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
  540. SBD_prepare_flush);
  541. dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
  542. if (IS_ERR(dev->thread)) {
  543. blk_cleanup_queue(dev->queue);
  544. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  545. return PTR_ERR(dev->thread);
  546. }
  547. dev->gd = alloc_disk(PARTITIONS);
  548. if (!dev->gd) {
  549. printk(KERN_ERR
  550. "Spectra: Could not allocate disk. Aborting \n ");
  551. goto out_vfree;
  552. }
  553. dev->gd->major = GLOB_SBD_majornum;
  554. dev->gd->first_minor = which * PARTITIONS;
  555. dev->gd->fops = &GLOB_SBD_ops;
  556. dev->gd->queue = dev->queue;
  557. dev->gd->private_data = dev;
  558. snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
  559. sects = dev->size >> 9;
  560. nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
  561. set_capacity(dev->gd, sects);
  562. add_disk(dev->gd);
  563. return 0;
  564. out_vfree:
  565. return -ENOMEM;
  566. }
  567. /*
  568. static ssize_t show_nand_block_num(struct device *dev,
  569. struct device_attribute *attr, char *buf)
  570. {
  571. return snprintf(buf, PAGE_SIZE, "%d\n",
  572. (int)IdentifyDeviceData.wDataBlockNum);
  573. }
  574. static ssize_t show_nand_pages_per_block(struct device *dev,
  575. struct device_attribute *attr, char *buf)
  576. {
  577. return snprintf(buf, PAGE_SIZE, "%d\n",
  578. (int)IdentifyDeviceData.PagesPerBlock);
  579. }
  580. static ssize_t show_nand_page_size(struct device *dev,
  581. struct device_attribute *attr, char *buf)
  582. {
  583. return snprintf(buf, PAGE_SIZE, "%d\n",
  584. (int)IdentifyDeviceData.PageDataSize);
  585. }
  586. static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
  587. static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
  588. static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
  589. static void create_sysfs_entry(struct device *dev)
  590. {
  591. if (device_create_file(dev, &dev_attr_nand_block_num))
  592. printk(KERN_ERR "Spectra: "
  593. "failed to create sysfs entry nand_block_num.\n");
  594. if (device_create_file(dev, &dev_attr_nand_pages_per_block))
  595. printk(KERN_ERR "Spectra: "
  596. "failed to create sysfs entry nand_pages_per_block.\n");
  597. if (device_create_file(dev, &dev_attr_nand_page_size))
  598. printk(KERN_ERR "Spectra: "
  599. "failed to create sysfs entry nand_page_size.\n");
  600. }
  601. */
  602. static int GLOB_SBD_init(void)
  603. {
  604. int i;
  605. /* Set debug output level (0~3) here. 3 is most verbose */
  606. nand_debug_level = 0;
  607. printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
  608. mutex_init(&spectra_lock);
  609. GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
  610. if (GLOB_SBD_majornum <= 0) {
  611. printk(KERN_ERR "Unable to get the major %d for Spectra",
  612. GLOB_SBD_majornum);
  613. return -EBUSY;
  614. }
  615. if (PASS != GLOB_FTL_Flash_Init()) {
  616. printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
  617. "Aborting\n");
  618. goto out_flash_register;
  619. }
  620. /* create_sysfs_entry(&dev->dev); */
  621. if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
  622. printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
  623. "Aborting\n");
  624. goto out_flash_register;
  625. } else {
  626. nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
  627. "Num blocks=%d, pagesperblock=%d, "
  628. "pagedatasize=%d, ECCBytesPerSector=%d\n",
  629. (int)IdentifyDeviceData.NumBlocks,
  630. (int)IdentifyDeviceData.PagesPerBlock,
  631. (int)IdentifyDeviceData.PageDataSize,
  632. (int)IdentifyDeviceData.wECCBytesPerSector);
  633. }
  634. printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
  635. if (GLOB_FTL_Init() != PASS) {
  636. printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
  637. "Aborting\n");
  638. goto out_ftl_flash_register;
  639. }
  640. printk(KERN_ALERT "Spectra: block table has been found.\n");
  641. for (i = 0; i < NUM_DEVICES; i++)
  642. if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
  643. goto out_ftl_flash_register;
  644. nand_dbg_print(NAND_DBG_DEBUG,
  645. "Spectra: module loaded with major number %d\n",
  646. GLOB_SBD_majornum);
  647. return 0;
  648. out_ftl_flash_register:
  649. GLOB_FTL_Cache_Release();
  650. out_flash_register:
  651. GLOB_FTL_Flash_Release();
  652. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  653. printk(KERN_ERR "Spectra: Module load failed.\n");
  654. return -ENOMEM;
  655. }
  656. static void __exit GLOB_SBD_exit(void)
  657. {
  658. int i;
  659. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  660. __FILE__, __LINE__, __func__);
  661. for (i = 0; i < NUM_DEVICES; i++) {
  662. struct spectra_nand_dev *dev = &nand_device[i];
  663. if (dev->gd) {
  664. del_gendisk(dev->gd);
  665. put_disk(dev->gd);
  666. }
  667. if (dev->queue)
  668. blk_cleanup_queue(dev->queue);
  669. kfree(dev->tmp_buf);
  670. }
  671. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  672. mutex_lock(&spectra_lock);
  673. force_flush_cache();
  674. mutex_unlock(&spectra_lock);
  675. GLOB_FTL_Cache_Release();
  676. GLOB_FTL_Flash_Release();
  677. nand_dbg_print(NAND_DBG_DEBUG,
  678. "Spectra FTL module (major number %d) unloaded.\n",
  679. GLOB_SBD_majornum);
  680. }
  681. static int __init setup_reserve_space_for_os_image(char *cmdline)
  682. {
  683. unsigned long value;
  684. int error;
  685. printk(KERN_ALERT "Spectra - cmdline: %s\n", cmdline);
  686. if (!cmdline)
  687. return -EINVAL;
  688. error = strict_strtoul((const char *)cmdline, 10, &value);
  689. if (error)
  690. return -EINVAL;
  691. reserved_mb_for_os_image = value;
  692. return 0;
  693. }
  694. early_param("res_nand", setup_reserve_space_for_os_image);
  695. module_init(GLOB_SBD_init);
  696. module_exit(GLOB_SBD_exit);