lpddr_cmds.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * LPDDR flash memory device operations. This module provides read, write,
  3. * erase, lock/unlock support for LPDDR flash memories
  4. * (C) 2008 Korolev Alexey <akorolev@infradead.org>
  5. * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
  6. * Many thanks to Roman Borisov for intial enabling
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  21. * 02110-1301, USA.
  22. * TODO:
  23. * Implement VPP management
  24. * Implement XIP support
  25. * Implement OTP support
  26. */
  27. #include <linux/mtd/pfow.h>
  28. #include <linux/mtd/qinfo.h>
  29. #include <linux/slab.h>
  30. static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
  31. size_t *retlen, u_char *buf);
  32. static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
  33. size_t len, size_t *retlen, const u_char *buf);
  34. static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
  35. unsigned long count, loff_t to, size_t *retlen);
  36. static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
  37. static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  38. static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  39. static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
  40. size_t *retlen, void **mtdbuf, resource_size_t *phys);
  41. static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
  42. static int get_chip(struct map_info *map, struct flchip *chip, int mode);
  43. static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
  44. static void put_chip(struct map_info *map, struct flchip *chip);
  45. struct mtd_info *lpddr_cmdset(struct map_info *map)
  46. {
  47. struct lpddr_private *lpddr = map->fldrv_priv;
  48. struct flchip_shared *shared;
  49. struct flchip *chip;
  50. struct mtd_info *mtd;
  51. int numchips;
  52. int i, j;
  53. mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
  54. if (!mtd) {
  55. printk(KERN_ERR "Failed to allocate memory for MTD device\n");
  56. return NULL;
  57. }
  58. mtd->priv = map;
  59. mtd->type = MTD_NORFLASH;
  60. /* Fill in the default mtd operations */
  61. mtd->read = lpddr_read;
  62. mtd->type = MTD_NORFLASH;
  63. mtd->flags = MTD_CAP_NORFLASH;
  64. mtd->flags &= ~MTD_BIT_WRITEABLE;
  65. mtd->erase = lpddr_erase;
  66. mtd->write = lpddr_write_buffers;
  67. mtd->writev = lpddr_writev;
  68. mtd->read_oob = NULL;
  69. mtd->write_oob = NULL;
  70. mtd->sync = NULL;
  71. mtd->lock = lpddr_lock;
  72. mtd->unlock = lpddr_unlock;
  73. mtd->suspend = NULL;
  74. mtd->resume = NULL;
  75. if (map_is_linear(map)) {
  76. mtd->point = lpddr_point;
  77. mtd->unpoint = lpddr_unpoint;
  78. }
  79. mtd->block_isbad = NULL;
  80. mtd->block_markbad = NULL;
  81. mtd->size = 1 << lpddr->qinfo->DevSizeShift;
  82. mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
  83. mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
  84. shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
  85. GFP_KERNEL);
  86. if (!shared) {
  87. kfree(lpddr);
  88. kfree(mtd);
  89. return NULL;
  90. }
  91. chip = &lpddr->chips[0];
  92. numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
  93. for (i = 0; i < numchips; i++) {
  94. shared[i].writing = shared[i].erasing = NULL;
  95. spin_lock_init(&shared[i].lock);
  96. for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
  97. *chip = lpddr->chips[i];
  98. chip->start += j << lpddr->chipshift;
  99. chip->oldstate = chip->state = FL_READY;
  100. chip->priv = &shared[i];
  101. /* those should be reset too since
  102. they create memory references. */
  103. init_waitqueue_head(&chip->wq);
  104. spin_lock_init(&chip->_spinlock);
  105. chip->mutex = &chip->_spinlock;
  106. chip++;
  107. }
  108. }
  109. return mtd;
  110. }
  111. EXPORT_SYMBOL(lpddr_cmdset);
  112. static int wait_for_ready(struct map_info *map, struct flchip *chip,
  113. unsigned int chip_op_time)
  114. {
  115. unsigned int timeo, reset_timeo, sleep_time;
  116. unsigned int dsr;
  117. flstate_t chip_state = chip->state;
  118. int ret = 0;
  119. /* set our timeout to 8 times the expected delay */
  120. timeo = chip_op_time * 8;
  121. if (!timeo)
  122. timeo = 500000;
  123. reset_timeo = timeo;
  124. sleep_time = chip_op_time / 2;
  125. for (;;) {
  126. dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
  127. if (dsr & DSR_READY_STATUS)
  128. break;
  129. if (!timeo) {
  130. printk(KERN_ERR "%s: Flash timeout error state %d \n",
  131. map->name, chip_state);
  132. ret = -ETIME;
  133. break;
  134. }
  135. /* OK Still waiting. Drop the lock, wait a while and retry. */
  136. spin_unlock(chip->mutex);
  137. if (sleep_time >= 1000000/HZ) {
  138. /*
  139. * Half of the normal delay still remaining
  140. * can be performed with a sleeping delay instead
  141. * of busy waiting.
  142. */
  143. msleep(sleep_time/1000);
  144. timeo -= sleep_time;
  145. sleep_time = 1000000/HZ;
  146. } else {
  147. udelay(1);
  148. cond_resched();
  149. timeo--;
  150. }
  151. spin_lock(chip->mutex);
  152. while (chip->state != chip_state) {
  153. /* Someone's suspended the operation: sleep */
  154. DECLARE_WAITQUEUE(wait, current);
  155. set_current_state(TASK_UNINTERRUPTIBLE);
  156. add_wait_queue(&chip->wq, &wait);
  157. spin_unlock(chip->mutex);
  158. schedule();
  159. remove_wait_queue(&chip->wq, &wait);
  160. spin_lock(chip->mutex);
  161. }
  162. if (chip->erase_suspended || chip->write_suspended) {
  163. /* Suspend has occured while sleep: reset timeout */
  164. timeo = reset_timeo;
  165. chip->erase_suspended = chip->write_suspended = 0;
  166. }
  167. }
  168. /* check status for errors */
  169. if (dsr & DSR_ERR) {
  170. /* Clear DSR*/
  171. map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
  172. printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
  173. map->name, dsr);
  174. print_drs_error(dsr);
  175. ret = -EIO;
  176. }
  177. chip->state = FL_READY;
  178. return ret;
  179. }
  180. static int get_chip(struct map_info *map, struct flchip *chip, int mode)
  181. {
  182. int ret;
  183. DECLARE_WAITQUEUE(wait, current);
  184. retry:
  185. if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
  186. && chip->state != FL_SYNCING) {
  187. /*
  188. * OK. We have possibility for contension on the write/erase
  189. * operations which are global to the real chip and not per
  190. * partition. So let's fight it over in the partition which
  191. * currently has authority on the operation.
  192. *
  193. * The rules are as follows:
  194. *
  195. * - any write operation must own shared->writing.
  196. *
  197. * - any erase operation must own _both_ shared->writing and
  198. * shared->erasing.
  199. *
  200. * - contension arbitration is handled in the owner's context.
  201. *
  202. * The 'shared' struct can be read and/or written only when
  203. * its lock is taken.
  204. */
  205. struct flchip_shared *shared = chip->priv;
  206. struct flchip *contender;
  207. spin_lock(&shared->lock);
  208. contender = shared->writing;
  209. if (contender && contender != chip) {
  210. /*
  211. * The engine to perform desired operation on this
  212. * partition is already in use by someone else.
  213. * Let's fight over it in the context of the chip
  214. * currently using it. If it is possible to suspend,
  215. * that other partition will do just that, otherwise
  216. * it'll happily send us to sleep. In any case, when
  217. * get_chip returns success we're clear to go ahead.
  218. */
  219. ret = spin_trylock(contender->mutex);
  220. spin_unlock(&shared->lock);
  221. if (!ret)
  222. goto retry;
  223. spin_unlock(chip->mutex);
  224. ret = chip_ready(map, contender, mode);
  225. spin_lock(chip->mutex);
  226. if (ret == -EAGAIN) {
  227. spin_unlock(contender->mutex);
  228. goto retry;
  229. }
  230. if (ret) {
  231. spin_unlock(contender->mutex);
  232. return ret;
  233. }
  234. spin_lock(&shared->lock);
  235. /* We should not own chip if it is already in FL_SYNCING
  236. * state. Put contender and retry. */
  237. if (chip->state == FL_SYNCING) {
  238. put_chip(map, contender);
  239. spin_unlock(contender->mutex);
  240. goto retry;
  241. }
  242. spin_unlock(contender->mutex);
  243. }
  244. /* Check if we have suspended erase on this chip.
  245. Must sleep in such a case. */
  246. if (mode == FL_ERASING && shared->erasing
  247. && shared->erasing->oldstate == FL_ERASING) {
  248. spin_unlock(&shared->lock);
  249. set_current_state(TASK_UNINTERRUPTIBLE);
  250. add_wait_queue(&chip->wq, &wait);
  251. spin_unlock(chip->mutex);
  252. schedule();
  253. remove_wait_queue(&chip->wq, &wait);
  254. spin_lock(chip->mutex);
  255. goto retry;
  256. }
  257. /* We now own it */
  258. shared->writing = chip;
  259. if (mode == FL_ERASING)
  260. shared->erasing = chip;
  261. spin_unlock(&shared->lock);
  262. }
  263. ret = chip_ready(map, chip, mode);
  264. if (ret == -EAGAIN)
  265. goto retry;
  266. return ret;
  267. }
  268. static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
  269. {
  270. struct lpddr_private *lpddr = map->fldrv_priv;
  271. int ret = 0;
  272. DECLARE_WAITQUEUE(wait, current);
  273. /* Prevent setting state FL_SYNCING for chip in suspended state. */
  274. if (FL_SYNCING == mode && FL_READY != chip->oldstate)
  275. goto sleep;
  276. switch (chip->state) {
  277. case FL_READY:
  278. case FL_JEDEC_QUERY:
  279. return 0;
  280. case FL_ERASING:
  281. if (!lpddr->qinfo->SuspEraseSupp ||
  282. !(mode == FL_READY || mode == FL_POINT))
  283. goto sleep;
  284. map_write(map, CMD(LPDDR_SUSPEND),
  285. map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
  286. chip->oldstate = FL_ERASING;
  287. chip->state = FL_ERASE_SUSPENDING;
  288. ret = wait_for_ready(map, chip, 0);
  289. if (ret) {
  290. /* Oops. something got wrong. */
  291. /* Resume and pretend we weren't here. */
  292. map_write(map, CMD(LPDDR_RESUME),
  293. map->pfow_base + PFOW_COMMAND_CODE);
  294. map_write(map, CMD(LPDDR_START_EXECUTION),
  295. map->pfow_base + PFOW_COMMAND_EXECUTE);
  296. chip->state = FL_ERASING;
  297. chip->oldstate = FL_READY;
  298. printk(KERN_ERR "%s: suspend operation failed."
  299. "State may be wrong \n", map->name);
  300. return -EIO;
  301. }
  302. chip->erase_suspended = 1;
  303. chip->state = FL_READY;
  304. return 0;
  305. /* Erase suspend */
  306. case FL_POINT:
  307. /* Only if there's no operation suspended... */
  308. if (mode == FL_READY && chip->oldstate == FL_READY)
  309. return 0;
  310. default:
  311. sleep:
  312. set_current_state(TASK_UNINTERRUPTIBLE);
  313. add_wait_queue(&chip->wq, &wait);
  314. spin_unlock(chip->mutex);
  315. schedule();
  316. remove_wait_queue(&chip->wq, &wait);
  317. spin_lock(chip->mutex);
  318. return -EAGAIN;
  319. }
  320. }
  321. static void put_chip(struct map_info *map, struct flchip *chip)
  322. {
  323. if (chip->priv) {
  324. struct flchip_shared *shared = chip->priv;
  325. spin_lock(&shared->lock);
  326. if (shared->writing == chip && chip->oldstate == FL_READY) {
  327. /* We own the ability to write, but we're done */
  328. shared->writing = shared->erasing;
  329. if (shared->writing && shared->writing != chip) {
  330. /* give back the ownership */
  331. struct flchip *loaner = shared->writing;
  332. spin_lock(loaner->mutex);
  333. spin_unlock(&shared->lock);
  334. spin_unlock(chip->mutex);
  335. put_chip(map, loaner);
  336. spin_lock(chip->mutex);
  337. spin_unlock(loaner->mutex);
  338. wake_up(&chip->wq);
  339. return;
  340. }
  341. shared->erasing = NULL;
  342. shared->writing = NULL;
  343. } else if (shared->erasing == chip && shared->writing != chip) {
  344. /*
  345. * We own the ability to erase without the ability
  346. * to write, which means the erase was suspended
  347. * and some other partition is currently writing.
  348. * Don't let the switch below mess things up since
  349. * we don't have ownership to resume anything.
  350. */
  351. spin_unlock(&shared->lock);
  352. wake_up(&chip->wq);
  353. return;
  354. }
  355. spin_unlock(&shared->lock);
  356. }
  357. switch (chip->oldstate) {
  358. case FL_ERASING:
  359. chip->state = chip->oldstate;
  360. map_write(map, CMD(LPDDR_RESUME),
  361. map->pfow_base + PFOW_COMMAND_CODE);
  362. map_write(map, CMD(LPDDR_START_EXECUTION),
  363. map->pfow_base + PFOW_COMMAND_EXECUTE);
  364. chip->oldstate = FL_READY;
  365. chip->state = FL_ERASING;
  366. break;
  367. case FL_READY:
  368. break;
  369. default:
  370. printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
  371. map->name, chip->oldstate);
  372. }
  373. wake_up(&chip->wq);
  374. }
  375. int do_write_buffer(struct map_info *map, struct flchip *chip,
  376. unsigned long adr, const struct kvec **pvec,
  377. unsigned long *pvec_seek, int len)
  378. {
  379. struct lpddr_private *lpddr = map->fldrv_priv;
  380. map_word datum;
  381. int ret, wbufsize, word_gap, words;
  382. const struct kvec *vec;
  383. unsigned long vec_seek;
  384. unsigned long prog_buf_ofs;
  385. wbufsize = 1 << lpddr->qinfo->BufSizeShift;
  386. spin_lock(chip->mutex);
  387. ret = get_chip(map, chip, FL_WRITING);
  388. if (ret) {
  389. spin_unlock(chip->mutex);
  390. return ret;
  391. }
  392. /* Figure out the number of words to write */
  393. word_gap = (-adr & (map_bankwidth(map)-1));
  394. words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
  395. if (!word_gap) {
  396. words--;
  397. } else {
  398. word_gap = map_bankwidth(map) - word_gap;
  399. adr -= word_gap;
  400. datum = map_word_ff(map);
  401. }
  402. /* Write data */
  403. /* Get the program buffer offset from PFOW register data first*/
  404. prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
  405. map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
  406. vec = *pvec;
  407. vec_seek = *pvec_seek;
  408. do {
  409. int n = map_bankwidth(map) - word_gap;
  410. if (n > vec->iov_len - vec_seek)
  411. n = vec->iov_len - vec_seek;
  412. if (n > len)
  413. n = len;
  414. if (!word_gap && (len < map_bankwidth(map)))
  415. datum = map_word_ff(map);
  416. datum = map_word_load_partial(map, datum,
  417. vec->iov_base + vec_seek, word_gap, n);
  418. len -= n;
  419. word_gap += n;
  420. if (!len || word_gap == map_bankwidth(map)) {
  421. map_write(map, datum, prog_buf_ofs);
  422. prog_buf_ofs += map_bankwidth(map);
  423. word_gap = 0;
  424. }
  425. vec_seek += n;
  426. if (vec_seek == vec->iov_len) {
  427. vec++;
  428. vec_seek = 0;
  429. }
  430. } while (len);
  431. *pvec = vec;
  432. *pvec_seek = vec_seek;
  433. /* GO GO GO */
  434. send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
  435. chip->state = FL_WRITING;
  436. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
  437. if (ret) {
  438. printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
  439. map->name, ret, adr);
  440. goto out;
  441. }
  442. out: put_chip(map, chip);
  443. spin_unlock(chip->mutex);
  444. return ret;
  445. }
  446. int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
  447. {
  448. struct map_info *map = mtd->priv;
  449. struct lpddr_private *lpddr = map->fldrv_priv;
  450. int chipnum = adr >> lpddr->chipshift;
  451. struct flchip *chip = &lpddr->chips[chipnum];
  452. int ret;
  453. spin_lock(chip->mutex);
  454. ret = get_chip(map, chip, FL_ERASING);
  455. if (ret) {
  456. spin_unlock(chip->mutex);
  457. return ret;
  458. }
  459. send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
  460. chip->state = FL_ERASING;
  461. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
  462. if (ret) {
  463. printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
  464. map->name, ret, adr);
  465. goto out;
  466. }
  467. out: put_chip(map, chip);
  468. spin_unlock(chip->mutex);
  469. return ret;
  470. }
  471. static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
  472. size_t *retlen, u_char *buf)
  473. {
  474. struct map_info *map = mtd->priv;
  475. struct lpddr_private *lpddr = map->fldrv_priv;
  476. int chipnum = adr >> lpddr->chipshift;
  477. struct flchip *chip = &lpddr->chips[chipnum];
  478. int ret = 0;
  479. spin_lock(chip->mutex);
  480. ret = get_chip(map, chip, FL_READY);
  481. if (ret) {
  482. spin_unlock(chip->mutex);
  483. return ret;
  484. }
  485. map_copy_from(map, buf, adr, len);
  486. *retlen = len;
  487. put_chip(map, chip);
  488. spin_unlock(chip->mutex);
  489. return ret;
  490. }
  491. static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
  492. size_t *retlen, void **mtdbuf, resource_size_t *phys)
  493. {
  494. struct map_info *map = mtd->priv;
  495. struct lpddr_private *lpddr = map->fldrv_priv;
  496. int chipnum = adr >> lpddr->chipshift;
  497. unsigned long ofs, last_end = 0;
  498. struct flchip *chip = &lpddr->chips[chipnum];
  499. int ret = 0;
  500. if (!map->virt || (adr + len > mtd->size))
  501. return -EINVAL;
  502. /* ofs: offset within the first chip that the first read should start */
  503. ofs = adr - (chipnum << lpddr->chipshift);
  504. *mtdbuf = (void *)map->virt + chip->start + ofs;
  505. *retlen = 0;
  506. while (len) {
  507. unsigned long thislen;
  508. if (chipnum >= lpddr->numchips)
  509. break;
  510. /* We cannot point across chips that are virtually disjoint */
  511. if (!last_end)
  512. last_end = chip->start;
  513. else if (chip->start != last_end)
  514. break;
  515. if ((len + ofs - 1) >> lpddr->chipshift)
  516. thislen = (1<<lpddr->chipshift) - ofs;
  517. else
  518. thislen = len;
  519. /* get the chip */
  520. spin_lock(chip->mutex);
  521. ret = get_chip(map, chip, FL_POINT);
  522. spin_unlock(chip->mutex);
  523. if (ret)
  524. break;
  525. chip->state = FL_POINT;
  526. chip->ref_point_counter++;
  527. *retlen += thislen;
  528. len -= thislen;
  529. ofs = 0;
  530. last_end += 1 << lpddr->chipshift;
  531. chipnum++;
  532. chip = &lpddr->chips[chipnum];
  533. }
  534. return 0;
  535. }
  536. static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
  537. {
  538. struct map_info *map = mtd->priv;
  539. struct lpddr_private *lpddr = map->fldrv_priv;
  540. int chipnum = adr >> lpddr->chipshift;
  541. unsigned long ofs;
  542. /* ofs: offset within the first chip that the first read should start */
  543. ofs = adr - (chipnum << lpddr->chipshift);
  544. while (len) {
  545. unsigned long thislen;
  546. struct flchip *chip;
  547. chip = &lpddr->chips[chipnum];
  548. if (chipnum >= lpddr->numchips)
  549. break;
  550. if ((len + ofs - 1) >> lpddr->chipshift)
  551. thislen = (1<<lpddr->chipshift) - ofs;
  552. else
  553. thislen = len;
  554. spin_lock(chip->mutex);
  555. if (chip->state == FL_POINT) {
  556. chip->ref_point_counter--;
  557. if (chip->ref_point_counter == 0)
  558. chip->state = FL_READY;
  559. } else
  560. printk(KERN_WARNING "%s: Warning: unpoint called on non"
  561. "pointed region\n", map->name);
  562. put_chip(map, chip);
  563. spin_unlock(chip->mutex);
  564. len -= thislen;
  565. ofs = 0;
  566. chipnum++;
  567. }
  568. }
  569. static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
  570. size_t *retlen, const u_char *buf)
  571. {
  572. struct kvec vec;
  573. vec.iov_base = (void *) buf;
  574. vec.iov_len = len;
  575. return lpddr_writev(mtd, &vec, 1, to, retlen);
  576. }
  577. static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
  578. unsigned long count, loff_t to, size_t *retlen)
  579. {
  580. struct map_info *map = mtd->priv;
  581. struct lpddr_private *lpddr = map->fldrv_priv;
  582. int ret = 0;
  583. int chipnum;
  584. unsigned long ofs, vec_seek, i;
  585. int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
  586. size_t len = 0;
  587. for (i = 0; i < count; i++)
  588. len += vecs[i].iov_len;
  589. *retlen = 0;
  590. if (!len)
  591. return 0;
  592. chipnum = to >> lpddr->chipshift;
  593. ofs = to;
  594. vec_seek = 0;
  595. do {
  596. /* We must not cross write block boundaries */
  597. int size = wbufsize - (ofs & (wbufsize-1));
  598. if (size > len)
  599. size = len;
  600. ret = do_write_buffer(map, &lpddr->chips[chipnum],
  601. ofs, &vecs, &vec_seek, size);
  602. if (ret)
  603. return ret;
  604. ofs += size;
  605. (*retlen) += size;
  606. len -= size;
  607. /* Be nice and reschedule with the chip in a usable
  608. * state for other processes */
  609. cond_resched();
  610. } while (len);
  611. return 0;
  612. }
  613. static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
  614. {
  615. unsigned long ofs, len;
  616. int ret;
  617. struct map_info *map = mtd->priv;
  618. struct lpddr_private *lpddr = map->fldrv_priv;
  619. int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
  620. ofs = instr->addr;
  621. len = instr->len;
  622. if (ofs > mtd->size || (len + ofs) > mtd->size)
  623. return -EINVAL;
  624. while (len > 0) {
  625. ret = do_erase_oneblock(mtd, ofs);
  626. if (ret)
  627. return ret;
  628. ofs += size;
  629. len -= size;
  630. }
  631. instr->state = MTD_ERASE_DONE;
  632. mtd_erase_callback(instr);
  633. return 0;
  634. }
  635. #define DO_XXLOCK_LOCK 1
  636. #define DO_XXLOCK_UNLOCK 2
  637. int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
  638. {
  639. int ret = 0;
  640. struct map_info *map = mtd->priv;
  641. struct lpddr_private *lpddr = map->fldrv_priv;
  642. int chipnum = adr >> lpddr->chipshift;
  643. struct flchip *chip = &lpddr->chips[chipnum];
  644. spin_lock(chip->mutex);
  645. ret = get_chip(map, chip, FL_LOCKING);
  646. if (ret) {
  647. spin_unlock(chip->mutex);
  648. return ret;
  649. }
  650. if (thunk == DO_XXLOCK_LOCK) {
  651. send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
  652. chip->state = FL_LOCKING;
  653. } else if (thunk == DO_XXLOCK_UNLOCK) {
  654. send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
  655. chip->state = FL_UNLOCKING;
  656. } else
  657. BUG();
  658. ret = wait_for_ready(map, chip, 1);
  659. if (ret) {
  660. printk(KERN_ERR "%s: block unlock error status %d \n",
  661. map->name, ret);
  662. goto out;
  663. }
  664. out: put_chip(map, chip);
  665. spin_unlock(chip->mutex);
  666. return ret;
  667. }
  668. static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  669. {
  670. return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
  671. }
  672. static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  673. {
  674. return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
  675. }
  676. int word_program(struct map_info *map, loff_t adr, uint32_t curval)
  677. {
  678. int ret;
  679. struct lpddr_private *lpddr = map->fldrv_priv;
  680. int chipnum = adr >> lpddr->chipshift;
  681. struct flchip *chip = &lpddr->chips[chipnum];
  682. spin_lock(chip->mutex);
  683. ret = get_chip(map, chip, FL_WRITING);
  684. if (ret) {
  685. spin_unlock(chip->mutex);
  686. return ret;
  687. }
  688. send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
  689. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
  690. if (ret) {
  691. printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
  692. map->name, adr, curval);
  693. goto out;
  694. }
  695. out: put_chip(map, chip);
  696. spin_unlock(chip->mutex);
  697. return ret;
  698. }
  699. MODULE_LICENSE("GPL");
  700. MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
  701. MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");