ffsport.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright (c) 2009, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include "ffsport.h"
  20. #include "flash.h"
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/wait.h>
  25. #include <linux/mutex.h>
  26. #include <linux/kthread.h>
  27. #include <linux/log2.h>
  28. #include <linux/init.h>
  29. /**** Helper functions used for Div, Remainder operation on u64 ****/
  30. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  31. * Function: GLOB_Calc_Used_Bits
  32. * Inputs: Power of 2 number
  33. * Outputs: Number of Used Bits
  34. * 0, if the argument is 0
  35. * Description: Calculate the number of bits used by a given power of 2 number
  36. * Number can be upto 32 bit
  37. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  38. int GLOB_Calc_Used_Bits(u32 n)
  39. {
  40. int tot_bits = 0;
  41. if (n >= 1 << 16) {
  42. n >>= 16;
  43. tot_bits += 16;
  44. }
  45. if (n >= 1 << 8) {
  46. n >>= 8;
  47. tot_bits += 8;
  48. }
  49. if (n >= 1 << 4) {
  50. n >>= 4;
  51. tot_bits += 4;
  52. }
  53. if (n >= 1 << 2) {
  54. n >>= 2;
  55. tot_bits += 2;
  56. }
  57. if (n >= 1 << 1)
  58. tot_bits += 1;
  59. return ((n == 0) ? (0) : tot_bits);
  60. }
  61. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  62. * Function: GLOB_u64_Div
  63. * Inputs: Number of u64
  64. * A power of 2 number as Division
  65. * Outputs: Quotient of the Divisor operation
  66. * Description: It divides the address by divisor by using bit shift operation
  67. * (essentially without explicitely using "/").
  68. * Divisor is a power of 2 number and Divided is of u64
  69. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  70. u64 GLOB_u64_Div(u64 addr, u32 divisor)
  71. {
  72. return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
  73. }
  74. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  75. * Function: GLOB_u64_Remainder
  76. * Inputs: Number of u64
  77. * Divisor Type (1 -PageAddress, 2- BlockAddress)
  78. * Outputs: Remainder of the Division operation
  79. * Description: It calculates the remainder of a number (of u64) by
  80. * divisor(power of 2 number ) by using bit shifting and multiply
  81. * operation(essentially without explicitely using "/").
  82. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  83. u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
  84. {
  85. u64 result = 0;
  86. if (divisor_type == 1) { /* Remainder -- Page */
  87. result = (addr >> DeviceInfo.nBitsInPageDataSize);
  88. result = result * DeviceInfo.wPageDataSize;
  89. } else if (divisor_type == 2) { /* Remainder -- Block */
  90. result = (addr >> DeviceInfo.nBitsInBlockDataSize);
  91. result = result * DeviceInfo.wBlockDataSize;
  92. }
  93. result = addr - result;
  94. return result;
  95. }
  96. #define NUM_DEVICES 1
  97. #define PARTITIONS 8
  98. #define GLOB_SBD_NAME "nd"
  99. #define GLOB_SBD_IRQ_NUM (29)
  100. #define GLOB_VERSION "driver version 20091110"
  101. #define GLOB_SBD_IOCTL_GC (0x7701)
  102. #define GLOB_SBD_IOCTL_WL (0x7702)
  103. #define GLOB_SBD_IOCTL_FORMAT (0x7703)
  104. #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
  105. #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
  106. #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
  107. #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
  108. #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
  109. #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
  110. #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
  111. static int reserved_mb = 0;
  112. module_param(reserved_mb, int, 0);
  113. MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
  114. int nand_debug_level;
  115. module_param(nand_debug_level, int, 0644);
  116. MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
  117. MODULE_LICENSE("GPL");
  118. struct spectra_nand_dev {
  119. struct pci_dev *dev;
  120. u64 size;
  121. u16 users;
  122. spinlock_t qlock;
  123. void __iomem *ioaddr; /* Mapped address */
  124. struct request_queue *queue;
  125. struct task_struct *thread;
  126. struct gendisk *gd;
  127. u8 *tmp_buf;
  128. };
  129. static int GLOB_SBD_majornum;
  130. static char *GLOB_version = GLOB_VERSION;
  131. static struct spectra_nand_dev nand_device[NUM_DEVICES];
  132. static struct mutex spectra_lock;
  133. static int res_blks_os = 1;
  134. struct spectra_indentfy_dev_tag IdentifyDeviceData;
  135. static int force_flush_cache(void)
  136. {
  137. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  138. __FILE__, __LINE__, __func__);
  139. if (ERR == GLOB_FTL_Flush_Cache()) {
  140. printk(KERN_ERR "Fail to Flush FTL Cache!\n");
  141. return -EFAULT;
  142. }
  143. #if CMD_DMA
  144. if (glob_ftl_execute_cmds())
  145. return -EIO;
  146. else
  147. return 0;
  148. #endif
  149. return 0;
  150. }
  151. struct ioctl_rw_page_info {
  152. u8 *data;
  153. unsigned int page;
  154. };
  155. static int ioctl_read_page_data(unsigned long arg)
  156. {
  157. u8 *buf;
  158. struct ioctl_rw_page_info info;
  159. int result = PASS;
  160. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  161. return -EFAULT;
  162. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  163. if (!buf) {
  164. printk(KERN_ERR "ioctl_read_page_data: "
  165. "failed to allocate memory\n");
  166. return -ENOMEM;
  167. }
  168. mutex_lock(&spectra_lock);
  169. result = GLOB_FTL_Page_Read(buf,
  170. (u64)info.page * IdentifyDeviceData.PageDataSize);
  171. mutex_unlock(&spectra_lock);
  172. if (copy_to_user((void __user *)info.data, buf,
  173. IdentifyDeviceData.PageDataSize)) {
  174. printk(KERN_ERR "ioctl_read_page_data: "
  175. "failed to copy user data\n");
  176. kfree(buf);
  177. return -EFAULT;
  178. }
  179. kfree(buf);
  180. return result;
  181. }
  182. static int ioctl_write_page_data(unsigned long arg)
  183. {
  184. u8 *buf;
  185. struct ioctl_rw_page_info info;
  186. int result = PASS;
  187. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  188. return -EFAULT;
  189. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  190. if (!buf) {
  191. printk(KERN_ERR "ioctl_write_page_data: "
  192. "failed to allocate memory\n");
  193. return -ENOMEM;
  194. }
  195. if (copy_from_user(buf, (void __user *)info.data,
  196. IdentifyDeviceData.PageDataSize)) {
  197. printk(KERN_ERR "ioctl_write_page_data: "
  198. "failed to copy user data\n");
  199. kfree(buf);
  200. return -EFAULT;
  201. }
  202. mutex_lock(&spectra_lock);
  203. result = GLOB_FTL_Page_Write(buf,
  204. (u64)info.page * IdentifyDeviceData.PageDataSize);
  205. mutex_unlock(&spectra_lock);
  206. kfree(buf);
  207. return result;
  208. }
  209. /* Return how many blocks should be reserved for bad block replacement */
  210. static int get_res_blk_num_bad_blk(void)
  211. {
  212. return IdentifyDeviceData.wDataBlockNum / 10;
  213. }
  214. /* Return how many blocks should be reserved for OS image */
  215. static int get_res_blk_num_os(void)
  216. {
  217. u32 res_blks, blk_size;
  218. blk_size = IdentifyDeviceData.PageDataSize *
  219. IdentifyDeviceData.PagesPerBlock;
  220. res_blks = (reserved_mb * 1024 * 1024) / blk_size;
  221. if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
  222. res_blks = 1; /* Reserved 1 block for block table */
  223. return res_blks;
  224. }
  225. static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
  226. {
  227. rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
  228. /* rq->timeout = 5 * HZ; */
  229. rq->cmd[0] = REQ_LB_OP_FLUSH;
  230. }
  231. /* Transfer a full request. */
  232. static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
  233. {
  234. u64 start_addr, addr;
  235. u32 logical_start_sect, hd_start_sect;
  236. u32 nsect, hd_sects;
  237. u32 rsect, tsect = 0;
  238. char *buf;
  239. u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
  240. start_addr = (u64)(blk_rq_pos(req)) << 9;
  241. /* Add a big enough offset to prevent the OS Image from
  242. * being accessed or damaged by file system */
  243. start_addr += IdentifyDeviceData.PageDataSize *
  244. IdentifyDeviceData.PagesPerBlock *
  245. res_blks_os;
  246. if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
  247. req->cmd[0] == REQ_LB_OP_FLUSH) {
  248. if (force_flush_cache()) /* Fail to flush cache */
  249. return -EIO;
  250. else
  251. return 0;
  252. }
  253. if (!blk_fs_request(req))
  254. return -EIO;
  255. if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
  256. printk(KERN_ERR "Spectra error: request over the NAND "
  257. "capacity!sector %d, current_nr_sectors %d, "
  258. "while capacity is %d\n",
  259. (int)blk_rq_pos(req),
  260. blk_rq_cur_sectors(req),
  261. (int)get_capacity(tr->gd));
  262. return -EIO;
  263. }
  264. logical_start_sect = start_addr >> 9;
  265. hd_start_sect = logical_start_sect / ratio;
  266. rsect = logical_start_sect - hd_start_sect * ratio;
  267. addr = (u64)hd_start_sect * ratio * 512;
  268. buf = req->buffer;
  269. nsect = blk_rq_cur_sectors(req);
  270. if (rsect)
  271. tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
  272. switch (rq_data_dir(req)) {
  273. case READ:
  274. /* Read the first NAND page */
  275. if (rsect) {
  276. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  277. printk(KERN_ERR "Error in %s, Line %d\n",
  278. __FILE__, __LINE__);
  279. return -EIO;
  280. }
  281. memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
  282. addr += IdentifyDeviceData.PageDataSize;
  283. buf += tsect << 9;
  284. nsect -= tsect;
  285. }
  286. /* Read the other NAND pages */
  287. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  288. if (GLOB_FTL_Page_Read(buf, addr)) {
  289. printk(KERN_ERR "Error in %s, Line %d\n",
  290. __FILE__, __LINE__);
  291. return -EIO;
  292. }
  293. addr += IdentifyDeviceData.PageDataSize;
  294. buf += IdentifyDeviceData.PageDataSize;
  295. }
  296. /* Read the last NAND pages */
  297. if (nsect % ratio) {
  298. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  299. printk(KERN_ERR "Error in %s, Line %d\n",
  300. __FILE__, __LINE__);
  301. return -EIO;
  302. }
  303. memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
  304. }
  305. #if CMD_DMA
  306. if (glob_ftl_execute_cmds())
  307. return -EIO;
  308. else
  309. return 0;
  310. #endif
  311. return 0;
  312. case WRITE:
  313. /* Write the first NAND page */
  314. if (rsect) {
  315. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  316. printk(KERN_ERR "Error in %s, Line %d\n",
  317. __FILE__, __LINE__);
  318. return -EIO;
  319. }
  320. memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
  321. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  322. printk(KERN_ERR "Error in %s, Line %d\n",
  323. __FILE__, __LINE__);
  324. return -EIO;
  325. }
  326. addr += IdentifyDeviceData.PageDataSize;
  327. buf += tsect << 9;
  328. nsect -= tsect;
  329. }
  330. /* Write the other NAND pages */
  331. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  332. if (GLOB_FTL_Page_Write(buf, addr)) {
  333. printk(KERN_ERR "Error in %s, Line %d\n",
  334. __FILE__, __LINE__);
  335. return -EIO;
  336. }
  337. addr += IdentifyDeviceData.PageDataSize;
  338. buf += IdentifyDeviceData.PageDataSize;
  339. }
  340. /* Write the last NAND pages */
  341. if (nsect % ratio) {
  342. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  343. printk(KERN_ERR "Error in %s, Line %d\n",
  344. __FILE__, __LINE__);
  345. return -EIO;
  346. }
  347. memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
  348. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  349. printk(KERN_ERR "Error in %s, Line %d\n",
  350. __FILE__, __LINE__);
  351. return -EIO;
  352. }
  353. }
  354. #if CMD_DMA
  355. if (glob_ftl_execute_cmds())
  356. return -EIO;
  357. else
  358. return 0;
  359. #endif
  360. return 0;
  361. default:
  362. printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
  363. return -EIO;
  364. }
  365. }
  366. /* This function is copied from drivers/mtd/mtd_blkdevs.c */
  367. static int spectra_trans_thread(void *arg)
  368. {
  369. struct spectra_nand_dev *tr = arg;
  370. struct request_queue *rq = tr->queue;
  371. struct request *req = NULL;
  372. /* we might get involved when memory gets low, so use PF_MEMALLOC */
  373. current->flags |= PF_MEMALLOC;
  374. spin_lock_irq(rq->queue_lock);
  375. while (!kthread_should_stop()) {
  376. int res;
  377. if (!req) {
  378. req = blk_fetch_request(rq);
  379. if (!req) {
  380. set_current_state(TASK_INTERRUPTIBLE);
  381. spin_unlock_irq(rq->queue_lock);
  382. schedule();
  383. spin_lock_irq(rq->queue_lock);
  384. continue;
  385. }
  386. }
  387. spin_unlock_irq(rq->queue_lock);
  388. mutex_lock(&spectra_lock);
  389. res = do_transfer(tr, req);
  390. mutex_unlock(&spectra_lock);
  391. spin_lock_irq(rq->queue_lock);
  392. if (!__blk_end_request_cur(req, res))
  393. req = NULL;
  394. }
  395. if (req)
  396. __blk_end_request_all(req, -EIO);
  397. spin_unlock_irq(rq->queue_lock);
  398. return 0;
  399. }
  400. /* Request function that "handles clustering". */
  401. static void GLOB_SBD_request(struct request_queue *rq)
  402. {
  403. struct spectra_nand_dev *pdev = rq->queuedata;
  404. wake_up_process(pdev->thread);
  405. }
  406. static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
  407. {
  408. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  409. __FILE__, __LINE__, __func__);
  410. return 0;
  411. }
  412. static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
  413. {
  414. int ret;
  415. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  416. __FILE__, __LINE__, __func__);
  417. mutex_lock(&spectra_lock);
  418. ret = force_flush_cache();
  419. mutex_unlock(&spectra_lock);
  420. return 0;
  421. }
  422. static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  423. {
  424. geo->heads = 4;
  425. geo->sectors = 16;
  426. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  427. nand_dbg_print(NAND_DBG_DEBUG,
  428. "heads: %d, sectors: %d, cylinders: %d\n",
  429. geo->heads, geo->sectors, geo->cylinders);
  430. return 0;
  431. }
  432. int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
  433. unsigned int cmd, unsigned long arg)
  434. {
  435. int ret;
  436. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  437. __FILE__, __LINE__, __func__);
  438. switch (cmd) {
  439. case GLOB_SBD_IOCTL_GC:
  440. nand_dbg_print(NAND_DBG_DEBUG,
  441. "Spectra IOCTL: Garbage Collection "
  442. "being performed\n");
  443. if (PASS != GLOB_FTL_Garbage_Collection())
  444. return -EFAULT;
  445. return 0;
  446. case GLOB_SBD_IOCTL_WL:
  447. nand_dbg_print(NAND_DBG_DEBUG,
  448. "Spectra IOCTL: Static Wear Leveling "
  449. "being performed\n");
  450. if (PASS != GLOB_FTL_Wear_Leveling())
  451. return -EFAULT;
  452. return 0;
  453. case GLOB_SBD_IOCTL_FORMAT:
  454. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
  455. "being performed\n");
  456. if (PASS != GLOB_FTL_Flash_Format())
  457. return -EFAULT;
  458. return 0;
  459. case GLOB_SBD_IOCTL_FLUSH_CACHE:
  460. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
  461. "being performed\n");
  462. mutex_lock(&spectra_lock);
  463. ret = force_flush_cache();
  464. mutex_unlock(&spectra_lock);
  465. return ret;
  466. case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
  467. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  468. "Copy block table\n");
  469. if (copy_to_user((void __user *)arg,
  470. get_blk_table_start_addr(),
  471. get_blk_table_len()))
  472. return -EFAULT;
  473. return 0;
  474. case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
  475. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  476. "Copy wear leveling table\n");
  477. if (copy_to_user((void __user *)arg,
  478. get_wear_leveling_table_start_addr(),
  479. get_wear_leveling_table_len()))
  480. return -EFAULT;
  481. return 0;
  482. case GLOB_SBD_IOCTL_GET_NAND_INFO:
  483. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  484. "Get NAND info\n");
  485. if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
  486. sizeof(IdentifyDeviceData)))
  487. return -EFAULT;
  488. return 0;
  489. case GLOB_SBD_IOCTL_WRITE_DATA:
  490. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  491. "Write one page data\n");
  492. return ioctl_write_page_data(arg);
  493. case GLOB_SBD_IOCTL_READ_DATA:
  494. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  495. "Read one page data\n");
  496. return ioctl_read_page_data(arg);
  497. }
  498. return -ENOTTY;
  499. }
  500. static struct block_device_operations GLOB_SBD_ops = {
  501. .owner = THIS_MODULE,
  502. .open = GLOB_SBD_open,
  503. .release = GLOB_SBD_release,
  504. .locked_ioctl = GLOB_SBD_ioctl,
  505. .getgeo = GLOB_SBD_getgeo,
  506. };
  507. static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
  508. {
  509. int res_blks;
  510. u32 sects;
  511. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  512. __FILE__, __LINE__, __func__);
  513. memset(dev, 0, sizeof(struct spectra_nand_dev));
  514. nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
  515. "for OS image, %d blocks for bad block replacement.\n",
  516. get_res_blk_num_os(),
  517. get_res_blk_num_bad_blk());
  518. res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
  519. dev->size = (u64)IdentifyDeviceData.PageDataSize *
  520. IdentifyDeviceData.PagesPerBlock *
  521. (IdentifyDeviceData.wDataBlockNum - res_blks);
  522. res_blks_os = get_res_blk_num_os();
  523. spin_lock_init(&dev->qlock);
  524. dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  525. if (!dev->tmp_buf) {
  526. printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
  527. __FILE__, __LINE__);
  528. goto out_vfree;
  529. }
  530. dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
  531. if (dev->queue == NULL) {
  532. printk(KERN_ERR
  533. "Spectra: Request queue could not be initialized."
  534. " Aborting\n ");
  535. goto out_vfree;
  536. }
  537. dev->queue->queuedata = dev;
  538. /* As Linux block layer doens't support >4KB hardware sector, */
  539. /* Here we force report 512 byte hardware sector size to Kernel */
  540. blk_queue_logical_block_size(dev->queue, 512);
  541. blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
  542. SBD_prepare_flush);
  543. dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
  544. if (IS_ERR(dev->thread)) {
  545. blk_cleanup_queue(dev->queue);
  546. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  547. return PTR_ERR(dev->thread);
  548. }
  549. dev->gd = alloc_disk(PARTITIONS);
  550. if (!dev->gd) {
  551. printk(KERN_ERR
  552. "Spectra: Could not allocate disk. Aborting \n ");
  553. goto out_vfree;
  554. }
  555. dev->gd->major = GLOB_SBD_majornum;
  556. dev->gd->first_minor = which * PARTITIONS;
  557. dev->gd->fops = &GLOB_SBD_ops;
  558. dev->gd->queue = dev->queue;
  559. dev->gd->private_data = dev;
  560. snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
  561. sects = dev->size >> 9;
  562. nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
  563. set_capacity(dev->gd, sects);
  564. add_disk(dev->gd);
  565. return 0;
  566. out_vfree:
  567. return -ENOMEM;
  568. }
  569. /*
  570. static ssize_t show_nand_block_num(struct device *dev,
  571. struct device_attribute *attr, char *buf)
  572. {
  573. return snprintf(buf, PAGE_SIZE, "%d\n",
  574. (int)IdentifyDeviceData.wDataBlockNum);
  575. }
  576. static ssize_t show_nand_pages_per_block(struct device *dev,
  577. struct device_attribute *attr, char *buf)
  578. {
  579. return snprintf(buf, PAGE_SIZE, "%d\n",
  580. (int)IdentifyDeviceData.PagesPerBlock);
  581. }
  582. static ssize_t show_nand_page_size(struct device *dev,
  583. struct device_attribute *attr, char *buf)
  584. {
  585. return snprintf(buf, PAGE_SIZE, "%d\n",
  586. (int)IdentifyDeviceData.PageDataSize);
  587. }
  588. static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
  589. static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
  590. static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
  591. static void create_sysfs_entry(struct device *dev)
  592. {
  593. if (device_create_file(dev, &dev_attr_nand_block_num))
  594. printk(KERN_ERR "Spectra: "
  595. "failed to create sysfs entry nand_block_num.\n");
  596. if (device_create_file(dev, &dev_attr_nand_pages_per_block))
  597. printk(KERN_ERR "Spectra: "
  598. "failed to create sysfs entry nand_pages_per_block.\n");
  599. if (device_create_file(dev, &dev_attr_nand_page_size))
  600. printk(KERN_ERR "Spectra: "
  601. "failed to create sysfs entry nand_page_size.\n");
  602. }
  603. */
  604. static int GLOB_SBD_init(void)
  605. {
  606. int i;
  607. /* Set debug output level (0~3) here. 3 is most verbose */
  608. nand_debug_level = 0;
  609. printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
  610. mutex_init(&spectra_lock);
  611. GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
  612. if (GLOB_SBD_majornum <= 0) {
  613. printk(KERN_ERR "Unable to get the major %d for Spectra",
  614. GLOB_SBD_majornum);
  615. return -EBUSY;
  616. }
  617. if (PASS != GLOB_FTL_Flash_Init()) {
  618. printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
  619. "Aborting\n");
  620. goto out_flash_register;
  621. }
  622. /* create_sysfs_entry(&dev->dev); */
  623. if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
  624. printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
  625. "Aborting\n");
  626. goto out_flash_register;
  627. } else {
  628. nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
  629. "Num blocks=%d, pagesperblock=%d, "
  630. "pagedatasize=%d, ECCBytesPerSector=%d\n",
  631. (int)IdentifyDeviceData.NumBlocks,
  632. (int)IdentifyDeviceData.PagesPerBlock,
  633. (int)IdentifyDeviceData.PageDataSize,
  634. (int)IdentifyDeviceData.wECCBytesPerSector);
  635. }
  636. printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
  637. if (GLOB_FTL_Init() != PASS) {
  638. printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
  639. "Aborting\n");
  640. goto out_ftl_flash_register;
  641. }
  642. printk(KERN_ALERT "Spectra: block table has been found.\n");
  643. for (i = 0; i < NUM_DEVICES; i++)
  644. if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
  645. goto out_ftl_flash_register;
  646. nand_dbg_print(NAND_DBG_DEBUG,
  647. "Spectra: module loaded with major number %d\n",
  648. GLOB_SBD_majornum);
  649. return 0;
  650. out_ftl_flash_register:
  651. GLOB_FTL_Cache_Release();
  652. out_flash_register:
  653. GLOB_FTL_Flash_Release();
  654. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  655. printk(KERN_ERR "Spectra: Module load failed.\n");
  656. return -ENOMEM;
  657. }
  658. static void __exit GLOB_SBD_exit(void)
  659. {
  660. int i;
  661. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  662. __FILE__, __LINE__, __func__);
  663. for (i = 0; i < NUM_DEVICES; i++) {
  664. struct spectra_nand_dev *dev = &nand_device[i];
  665. if (dev->gd) {
  666. del_gendisk(dev->gd);
  667. put_disk(dev->gd);
  668. }
  669. if (dev->queue)
  670. blk_cleanup_queue(dev->queue);
  671. kfree(dev->tmp_buf);
  672. }
  673. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  674. mutex_lock(&spectra_lock);
  675. force_flush_cache();
  676. mutex_unlock(&spectra_lock);
  677. GLOB_FTL_Cache_Release();
  678. GLOB_FTL_Flash_Release();
  679. nand_dbg_print(NAND_DBG_DEBUG,
  680. "Spectra FTL module (major number %d) unloaded.\n",
  681. GLOB_SBD_majornum);
  682. }
  683. module_init(GLOB_SBD_init);
  684. module_exit(GLOB_SBD_exit);