block.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491
  1. /*
  2. * Block driver for media (i.e., flash cards)
  3. *
  4. * Copyright 2002 Hewlett-Packard Company
  5. * Copyright 2005-2008 Pierre Ossman
  6. *
  7. * Use consistent with the GNU GPL is permitted,
  8. * provided that this copyright notice is
  9. * preserved in its entirety in all copies and derived works.
  10. *
  11. * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12. * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13. * FITNESS FOR ANY PARTICULAR PURPOSE.
  14. *
  15. * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16. *
  17. * Author: Andrew Christian
  18. * 28 May 2002
  19. */
  20. #include <linux/moduleparam.h>
  21. #include <linux/module.h>
  22. #include <linux/init.h>
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/slab.h>
  26. #include <linux/errno.h>
  27. #include <linux/hdreg.h>
  28. #include <linux/kdev_t.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/mutex.h>
  31. #include <linux/scatterlist.h>
  32. #include <linux/string_helpers.h>
  33. #include <linux/delay.h>
  34. #include <linux/capability.h>
  35. #include <linux/compat.h>
  36. #include <linux/pm_runtime.h>
  37. #include <linux/mmc/ioctl.h>
  38. #include <linux/mmc/card.h>
  39. #include <linux/mmc/host.h>
  40. #include <linux/mmc/mmc.h>
  41. #include <linux/mmc/sd.h>
  42. #include <asm/uaccess.h>
  43. #include "queue.h"
  44. MODULE_ALIAS("mmc:block");
  45. #ifdef MODULE_PARAM_PREFIX
  46. #undef MODULE_PARAM_PREFIX
  47. #endif
  48. #define MODULE_PARAM_PREFIX "mmcblk."
  49. #define INAND_CMD38_ARG_EXT_CSD 113
  50. #define INAND_CMD38_ARG_ERASE 0x00
  51. #define INAND_CMD38_ARG_TRIM 0x01
  52. #define INAND_CMD38_ARG_SECERASE 0x80
  53. #define INAND_CMD38_ARG_SECTRIM1 0x81
  54. #define INAND_CMD38_ARG_SECTRIM2 0x88
  55. #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  56. #define MMC_SANITIZE_REQ_TIMEOUT 240000
  57. #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
  58. #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
  59. (req->cmd_flags & REQ_META)) && \
  60. (rq_data_dir(req) == WRITE))
  61. #define PACKED_CMD_VER 0x01
  62. #define PACKED_CMD_WR 0x02
  63. static DEFINE_MUTEX(block_mutex);
  64. /*
  65. * The defaults come from config options but can be overriden by module
  66. * or bootarg options.
  67. */
  68. static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  69. /*
  70. * We've only got one major, so number of mmcblk devices is
  71. * limited to 256 / number of minors per device.
  72. */
  73. static int max_devices;
  74. /* 256 minors, so at most 256 separate devices */
  75. static DECLARE_BITMAP(dev_use, 256);
  76. static DECLARE_BITMAP(name_use, 256);
  77. /*
  78. * There is one mmc_blk_data per slot.
  79. */
  80. struct mmc_blk_data {
  81. spinlock_t lock;
  82. struct gendisk *disk;
  83. struct mmc_queue queue;
  84. struct list_head part;
  85. unsigned int flags;
  86. #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
  87. #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
  88. #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
  89. unsigned int usage;
  90. unsigned int read_only;
  91. unsigned int part_type;
  92. unsigned int name_idx;
  93. unsigned int reset_done;
  94. #define MMC_BLK_READ BIT(0)
  95. #define MMC_BLK_WRITE BIT(1)
  96. #define MMC_BLK_DISCARD BIT(2)
  97. #define MMC_BLK_SECDISCARD BIT(3)
  98. /*
  99. * Only set in main mmc_blk_data associated
  100. * with mmc_card with mmc_set_drvdata, and keeps
  101. * track of the current selected device partition.
  102. */
  103. unsigned int part_curr;
  104. struct device_attribute force_ro;
  105. struct device_attribute power_ro_lock;
  106. int area_type;
  107. };
  108. static DEFINE_MUTEX(open_lock);
  109. enum {
  110. MMC_PACKED_NR_IDX = -1,
  111. MMC_PACKED_NR_ZERO,
  112. MMC_PACKED_NR_SINGLE,
  113. };
  114. module_param(perdev_minors, int, 0444);
  115. MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
  116. static inline int mmc_blk_part_switch(struct mmc_card *card,
  117. struct mmc_blk_data *md);
  118. static int get_card_status(struct mmc_card *card, u32 *status, int retries);
  119. static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
  120. {
  121. struct mmc_packed *packed = mqrq->packed;
  122. BUG_ON(!packed);
  123. mqrq->cmd_type = MMC_PACKED_NONE;
  124. packed->nr_entries = MMC_PACKED_NR_ZERO;
  125. packed->idx_failure = MMC_PACKED_NR_IDX;
  126. packed->retries = 0;
  127. packed->blocks = 0;
  128. }
  129. static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
  130. {
  131. struct mmc_blk_data *md;
  132. mutex_lock(&open_lock);
  133. md = disk->private_data;
  134. if (md && md->usage == 0)
  135. md = NULL;
  136. if (md)
  137. md->usage++;
  138. mutex_unlock(&open_lock);
  139. return md;
  140. }
  141. static inline int mmc_get_devidx(struct gendisk *disk)
  142. {
  143. int devmaj = MAJOR(disk_devt(disk));
  144. int devidx = MINOR(disk_devt(disk)) / perdev_minors;
  145. if (!devmaj)
  146. devidx = disk->first_minor / perdev_minors;
  147. return devidx;
  148. }
  149. static void mmc_blk_put(struct mmc_blk_data *md)
  150. {
  151. mutex_lock(&open_lock);
  152. md->usage--;
  153. if (md->usage == 0) {
  154. int devidx = mmc_get_devidx(md->disk);
  155. blk_cleanup_queue(md->queue.queue);
  156. __clear_bit(devidx, dev_use);
  157. put_disk(md->disk);
  158. kfree(md);
  159. }
  160. mutex_unlock(&open_lock);
  161. }
  162. static ssize_t power_ro_lock_show(struct device *dev,
  163. struct device_attribute *attr, char *buf)
  164. {
  165. int ret;
  166. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  167. struct mmc_card *card = md->queue.card;
  168. int locked = 0;
  169. if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
  170. locked = 2;
  171. else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
  172. locked = 1;
  173. ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
  174. return ret;
  175. }
  176. static ssize_t power_ro_lock_store(struct device *dev,
  177. struct device_attribute *attr, const char *buf, size_t count)
  178. {
  179. int ret;
  180. struct mmc_blk_data *md, *part_md;
  181. struct mmc_card *card;
  182. unsigned long set;
  183. if (kstrtoul(buf, 0, &set))
  184. return -EINVAL;
  185. if (set != 1)
  186. return count;
  187. md = mmc_blk_get(dev_to_disk(dev));
  188. card = md->queue.card;
  189. mmc_get_card(card);
  190. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
  191. card->ext_csd.boot_ro_lock |
  192. EXT_CSD_BOOT_WP_B_PWR_WP_EN,
  193. card->ext_csd.part_time);
  194. if (ret)
  195. pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
  196. else
  197. card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
  198. mmc_put_card(card);
  199. if (!ret) {
  200. pr_info("%s: Locking boot partition ro until next power on\n",
  201. md->disk->disk_name);
  202. set_disk_ro(md->disk, 1);
  203. list_for_each_entry(part_md, &md->part, part)
  204. if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
  205. pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
  206. set_disk_ro(part_md->disk, 1);
  207. }
  208. }
  209. mmc_blk_put(md);
  210. return count;
  211. }
  212. static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
  213. char *buf)
  214. {
  215. int ret;
  216. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  217. ret = snprintf(buf, PAGE_SIZE, "%d",
  218. get_disk_ro(dev_to_disk(dev)) ^
  219. md->read_only);
  220. mmc_blk_put(md);
  221. return ret;
  222. }
  223. static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
  224. const char *buf, size_t count)
  225. {
  226. int ret;
  227. char *end;
  228. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  229. unsigned long set = simple_strtoul(buf, &end, 0);
  230. if (end == buf) {
  231. ret = -EINVAL;
  232. goto out;
  233. }
  234. set_disk_ro(dev_to_disk(dev), set || md->read_only);
  235. ret = count;
  236. out:
  237. mmc_blk_put(md);
  238. return ret;
  239. }
  240. static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
  241. {
  242. struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
  243. int ret = -ENXIO;
  244. mutex_lock(&block_mutex);
  245. if (md) {
  246. if (md->usage == 2)
  247. check_disk_change(bdev);
  248. ret = 0;
  249. if ((mode & FMODE_WRITE) && md->read_only) {
  250. mmc_blk_put(md);
  251. ret = -EROFS;
  252. }
  253. }
  254. mutex_unlock(&block_mutex);
  255. return ret;
  256. }
  257. static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
  258. {
  259. struct mmc_blk_data *md = disk->private_data;
  260. mutex_lock(&block_mutex);
  261. mmc_blk_put(md);
  262. mutex_unlock(&block_mutex);
  263. }
  264. static int
  265. mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  266. {
  267. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  268. geo->heads = 4;
  269. geo->sectors = 16;
  270. return 0;
  271. }
  272. struct mmc_blk_ioc_data {
  273. struct mmc_ioc_cmd ic;
  274. unsigned char *buf;
  275. u64 buf_bytes;
  276. };
  277. static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
  278. struct mmc_ioc_cmd __user *user)
  279. {
  280. struct mmc_blk_ioc_data *idata;
  281. int err;
  282. idata = kzalloc(sizeof(*idata), GFP_KERNEL);
  283. if (!idata) {
  284. err = -ENOMEM;
  285. goto out;
  286. }
  287. if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
  288. err = -EFAULT;
  289. goto idata_err;
  290. }
  291. idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
  292. if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
  293. err = -EOVERFLOW;
  294. goto idata_err;
  295. }
  296. if (!idata->buf_bytes)
  297. return idata;
  298. idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
  299. if (!idata->buf) {
  300. err = -ENOMEM;
  301. goto idata_err;
  302. }
  303. if (copy_from_user(idata->buf, (void __user *)(unsigned long)
  304. idata->ic.data_ptr, idata->buf_bytes)) {
  305. err = -EFAULT;
  306. goto copy_err;
  307. }
  308. return idata;
  309. copy_err:
  310. kfree(idata->buf);
  311. idata_err:
  312. kfree(idata);
  313. out:
  314. return ERR_PTR(err);
  315. }
  316. static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
  317. u32 retries_max)
  318. {
  319. int err;
  320. u32 retry_count = 0;
  321. if (!status || !retries_max)
  322. return -EINVAL;
  323. do {
  324. err = get_card_status(card, status, 5);
  325. if (err)
  326. break;
  327. if (!R1_STATUS(*status) &&
  328. (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
  329. break; /* RPMB programming operation complete */
  330. /*
  331. * Rechedule to give the MMC device a chance to continue
  332. * processing the previous command without being polled too
  333. * frequently.
  334. */
  335. usleep_range(1000, 5000);
  336. } while (++retry_count < retries_max);
  337. if (retry_count == retries_max)
  338. err = -EPERM;
  339. return err;
  340. }
  341. static int ioctl_do_sanitize(struct mmc_card *card)
  342. {
  343. int err;
  344. if (!(mmc_can_sanitize(card) &&
  345. (card->host->caps2 & MMC_CAP2_SANITIZE))) {
  346. pr_warn("%s: %s - SANITIZE is not supported\n",
  347. mmc_hostname(card->host), __func__);
  348. err = -EOPNOTSUPP;
  349. goto out;
  350. }
  351. pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
  352. mmc_hostname(card->host), __func__);
  353. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  354. EXT_CSD_SANITIZE_START, 1,
  355. MMC_SANITIZE_REQ_TIMEOUT);
  356. if (err)
  357. pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
  358. mmc_hostname(card->host), __func__, err);
  359. pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
  360. __func__);
  361. out:
  362. return err;
  363. }
  364. static int mmc_blk_ioctl_cmd(struct block_device *bdev,
  365. struct mmc_ioc_cmd __user *ic_ptr)
  366. {
  367. struct mmc_blk_ioc_data *idata;
  368. struct mmc_blk_data *md;
  369. struct mmc_card *card;
  370. struct mmc_command cmd = {0};
  371. struct mmc_data data = {0};
  372. struct mmc_request mrq = {NULL};
  373. struct scatterlist sg;
  374. int err;
  375. int is_rpmb = false;
  376. u32 status = 0;
  377. /*
  378. * The caller must have CAP_SYS_RAWIO, and must be calling this on the
  379. * whole block device, not on a partition. This prevents overspray
  380. * between sibling partitions.
  381. */
  382. if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
  383. return -EPERM;
  384. idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
  385. if (IS_ERR(idata))
  386. return PTR_ERR(idata);
  387. md = mmc_blk_get(bdev->bd_disk);
  388. if (!md) {
  389. err = -EINVAL;
  390. goto cmd_err;
  391. }
  392. if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
  393. is_rpmb = true;
  394. card = md->queue.card;
  395. if (IS_ERR(card)) {
  396. err = PTR_ERR(card);
  397. goto cmd_done;
  398. }
  399. cmd.opcode = idata->ic.opcode;
  400. cmd.arg = idata->ic.arg;
  401. cmd.flags = idata->ic.flags;
  402. if (idata->buf_bytes) {
  403. data.sg = &sg;
  404. data.sg_len = 1;
  405. data.blksz = idata->ic.blksz;
  406. data.blocks = idata->ic.blocks;
  407. sg_init_one(data.sg, idata->buf, idata->buf_bytes);
  408. if (idata->ic.write_flag)
  409. data.flags = MMC_DATA_WRITE;
  410. else
  411. data.flags = MMC_DATA_READ;
  412. /* data.flags must already be set before doing this. */
  413. mmc_set_data_timeout(&data, card);
  414. /* Allow overriding the timeout_ns for empirical tuning. */
  415. if (idata->ic.data_timeout_ns)
  416. data.timeout_ns = idata->ic.data_timeout_ns;
  417. if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
  418. /*
  419. * Pretend this is a data transfer and rely on the
  420. * host driver to compute timeout. When all host
  421. * drivers support cmd.cmd_timeout for R1B, this
  422. * can be changed to:
  423. *
  424. * mrq.data = NULL;
  425. * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
  426. */
  427. data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
  428. }
  429. mrq.data = &data;
  430. }
  431. mrq.cmd = &cmd;
  432. mmc_get_card(card);
  433. err = mmc_blk_part_switch(card, md);
  434. if (err)
  435. goto cmd_rel_host;
  436. if (idata->ic.is_acmd) {
  437. err = mmc_app_cmd(card->host, card);
  438. if (err)
  439. goto cmd_rel_host;
  440. }
  441. if (is_rpmb) {
  442. err = mmc_set_blockcount(card, data.blocks,
  443. idata->ic.write_flag & (1 << 31));
  444. if (err)
  445. goto cmd_rel_host;
  446. }
  447. if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
  448. (cmd.opcode == MMC_SWITCH)) {
  449. err = ioctl_do_sanitize(card);
  450. if (err)
  451. pr_err("%s: ioctl_do_sanitize() failed. err = %d",
  452. __func__, err);
  453. goto cmd_rel_host;
  454. }
  455. mmc_wait_for_req(card->host, &mrq);
  456. if (cmd.error) {
  457. dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
  458. __func__, cmd.error);
  459. err = cmd.error;
  460. goto cmd_rel_host;
  461. }
  462. if (data.error) {
  463. dev_err(mmc_dev(card->host), "%s: data error %d\n",
  464. __func__, data.error);
  465. err = data.error;
  466. goto cmd_rel_host;
  467. }
  468. /*
  469. * According to the SD specs, some commands require a delay after
  470. * issuing the command.
  471. */
  472. if (idata->ic.postsleep_min_us)
  473. usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
  474. if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
  475. err = -EFAULT;
  476. goto cmd_rel_host;
  477. }
  478. if (!idata->ic.write_flag) {
  479. if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
  480. idata->buf, idata->buf_bytes)) {
  481. err = -EFAULT;
  482. goto cmd_rel_host;
  483. }
  484. }
  485. if (is_rpmb) {
  486. /*
  487. * Ensure RPMB command has completed by polling CMD13
  488. * "Send Status".
  489. */
  490. err = ioctl_rpmb_card_status_poll(card, &status, 5);
  491. if (err)
  492. dev_err(mmc_dev(card->host),
  493. "%s: Card Status=0x%08X, error %d\n",
  494. __func__, status, err);
  495. }
  496. cmd_rel_host:
  497. mmc_put_card(card);
  498. cmd_done:
  499. mmc_blk_put(md);
  500. cmd_err:
  501. kfree(idata->buf);
  502. kfree(idata);
  503. return err;
  504. }
  505. static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
  506. unsigned int cmd, unsigned long arg)
  507. {
  508. int ret = -EINVAL;
  509. if (cmd == MMC_IOC_CMD)
  510. ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
  511. return ret;
  512. }
  513. #ifdef CONFIG_COMPAT
  514. static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
  515. unsigned int cmd, unsigned long arg)
  516. {
  517. return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
  518. }
  519. #endif
  520. static const struct block_device_operations mmc_bdops = {
  521. .open = mmc_blk_open,
  522. .release = mmc_blk_release,
  523. .getgeo = mmc_blk_getgeo,
  524. .owner = THIS_MODULE,
  525. .ioctl = mmc_blk_ioctl,
  526. #ifdef CONFIG_COMPAT
  527. .compat_ioctl = mmc_blk_compat_ioctl,
  528. #endif
  529. };
  530. static inline int mmc_blk_part_switch(struct mmc_card *card,
  531. struct mmc_blk_data *md)
  532. {
  533. int ret;
  534. struct mmc_blk_data *main_md = mmc_get_drvdata(card);
  535. if (main_md->part_curr == md->part_type)
  536. return 0;
  537. if (mmc_card_mmc(card)) {
  538. u8 part_config = card->ext_csd.part_config;
  539. part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
  540. part_config |= md->part_type;
  541. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  542. EXT_CSD_PART_CONFIG, part_config,
  543. card->ext_csd.part_time);
  544. if (ret)
  545. return ret;
  546. card->ext_csd.part_config = part_config;
  547. }
  548. main_md->part_curr = md->part_type;
  549. return 0;
  550. }
  551. static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
  552. {
  553. int err;
  554. u32 result;
  555. __be32 *blocks;
  556. struct mmc_request mrq = {NULL};
  557. struct mmc_command cmd = {0};
  558. struct mmc_data data = {0};
  559. struct scatterlist sg;
  560. cmd.opcode = MMC_APP_CMD;
  561. cmd.arg = card->rca << 16;
  562. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  563. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  564. if (err)
  565. return (u32)-1;
  566. if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  567. return (u32)-1;
  568. memset(&cmd, 0, sizeof(struct mmc_command));
  569. cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
  570. cmd.arg = 0;
  571. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  572. data.blksz = 4;
  573. data.blocks = 1;
  574. data.flags = MMC_DATA_READ;
  575. data.sg = &sg;
  576. data.sg_len = 1;
  577. mmc_set_data_timeout(&data, card);
  578. mrq.cmd = &cmd;
  579. mrq.data = &data;
  580. blocks = kmalloc(4, GFP_KERNEL);
  581. if (!blocks)
  582. return (u32)-1;
  583. sg_init_one(&sg, blocks, 4);
  584. mmc_wait_for_req(card->host, &mrq);
  585. result = ntohl(*blocks);
  586. kfree(blocks);
  587. if (cmd.error || data.error)
  588. result = (u32)-1;
  589. return result;
  590. }
  591. static int send_stop(struct mmc_card *card, u32 *status)
  592. {
  593. struct mmc_command cmd = {0};
  594. int err;
  595. cmd.opcode = MMC_STOP_TRANSMISSION;
  596. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  597. err = mmc_wait_for_cmd(card->host, &cmd, 5);
  598. if (err == 0)
  599. *status = cmd.resp[0];
  600. return err;
  601. }
  602. static int get_card_status(struct mmc_card *card, u32 *status, int retries)
  603. {
  604. struct mmc_command cmd = {0};
  605. int err;
  606. cmd.opcode = MMC_SEND_STATUS;
  607. if (!mmc_host_is_spi(card->host))
  608. cmd.arg = card->rca << 16;
  609. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  610. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  611. if (err == 0)
  612. *status = cmd.resp[0];
  613. return err;
  614. }
  615. #define ERR_NOMEDIUM 3
  616. #define ERR_RETRY 2
  617. #define ERR_ABORT 1
  618. #define ERR_CONTINUE 0
  619. static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
  620. bool status_valid, u32 status)
  621. {
  622. switch (error) {
  623. case -EILSEQ:
  624. /* response crc error, retry the r/w cmd */
  625. pr_err("%s: %s sending %s command, card status %#x\n",
  626. req->rq_disk->disk_name, "response CRC error",
  627. name, status);
  628. return ERR_RETRY;
  629. case -ETIMEDOUT:
  630. pr_err("%s: %s sending %s command, card status %#x\n",
  631. req->rq_disk->disk_name, "timed out", name, status);
  632. /* If the status cmd initially failed, retry the r/w cmd */
  633. if (!status_valid)
  634. return ERR_RETRY;
  635. /*
  636. * If it was a r/w cmd crc error, or illegal command
  637. * (eg, issued in wrong state) then retry - we should
  638. * have corrected the state problem above.
  639. */
  640. if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
  641. return ERR_RETRY;
  642. /* Otherwise abort the command */
  643. return ERR_ABORT;
  644. default:
  645. /* We don't understand the error code the driver gave us */
  646. pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
  647. req->rq_disk->disk_name, error, status);
  648. return ERR_ABORT;
  649. }
  650. }
  651. /*
  652. * Initial r/w and stop cmd error recovery.
  653. * We don't know whether the card received the r/w cmd or not, so try to
  654. * restore things back to a sane state. Essentially, we do this as follows:
  655. * - Obtain card status. If the first attempt to obtain card status fails,
  656. * the status word will reflect the failed status cmd, not the failed
  657. * r/w cmd. If we fail to obtain card status, it suggests we can no
  658. * longer communicate with the card.
  659. * - Check the card state. If the card received the cmd but there was a
  660. * transient problem with the response, it might still be in a data transfer
  661. * mode. Try to send it a stop command. If this fails, we can't recover.
  662. * - If the r/w cmd failed due to a response CRC error, it was probably
  663. * transient, so retry the cmd.
  664. * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
  665. * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
  666. * illegal cmd, retry.
  667. * Otherwise we don't understand what happened, so abort.
  668. */
  669. static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
  670. struct mmc_blk_request *brq, int *ecc_err)
  671. {
  672. bool prev_cmd_status_valid = true;
  673. u32 status, stop_status = 0;
  674. int err, retry;
  675. if (mmc_card_removed(card))
  676. return ERR_NOMEDIUM;
  677. /*
  678. * Try to get card status which indicates both the card state
  679. * and why there was no response. If the first attempt fails,
  680. * we can't be sure the returned status is for the r/w command.
  681. */
  682. for (retry = 2; retry >= 0; retry--) {
  683. err = get_card_status(card, &status, 0);
  684. if (!err)
  685. break;
  686. prev_cmd_status_valid = false;
  687. pr_err("%s: error %d sending status command, %sing\n",
  688. req->rq_disk->disk_name, err, retry ? "retry" : "abort");
  689. }
  690. /* We couldn't get a response from the card. Give up. */
  691. if (err) {
  692. /* Check if the card is removed */
  693. if (mmc_detect_card_removed(card->host))
  694. return ERR_NOMEDIUM;
  695. return ERR_ABORT;
  696. }
  697. /* Flag ECC errors */
  698. if ((status & R1_CARD_ECC_FAILED) ||
  699. (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
  700. (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
  701. *ecc_err = 1;
  702. /*
  703. * Check the current card state. If it is in some data transfer
  704. * mode, tell it to stop (and hopefully transition back to TRAN.)
  705. */
  706. if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
  707. R1_CURRENT_STATE(status) == R1_STATE_RCV) {
  708. err = send_stop(card, &stop_status);
  709. if (err)
  710. pr_err("%s: error %d sending stop command\n",
  711. req->rq_disk->disk_name, err);
  712. /*
  713. * If the stop cmd also timed out, the card is probably
  714. * not present, so abort. Other errors are bad news too.
  715. */
  716. if (err)
  717. return ERR_ABORT;
  718. if (stop_status & R1_CARD_ECC_FAILED)
  719. *ecc_err = 1;
  720. }
  721. /* Check for set block count errors */
  722. if (brq->sbc.error)
  723. return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
  724. prev_cmd_status_valid, status);
  725. /* Check for r/w command errors */
  726. if (brq->cmd.error)
  727. return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
  728. prev_cmd_status_valid, status);
  729. /* Data errors */
  730. if (!brq->stop.error)
  731. return ERR_CONTINUE;
  732. /* Now for stop errors. These aren't fatal to the transfer. */
  733. pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
  734. req->rq_disk->disk_name, brq->stop.error,
  735. brq->cmd.resp[0], status);
  736. /*
  737. * Subsitute in our own stop status as this will give the error
  738. * state which happened during the execution of the r/w command.
  739. */
  740. if (stop_status) {
  741. brq->stop.resp[0] = stop_status;
  742. brq->stop.error = 0;
  743. }
  744. return ERR_CONTINUE;
  745. }
  746. static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
  747. int type)
  748. {
  749. int err;
  750. if (md->reset_done & type)
  751. return -EEXIST;
  752. md->reset_done |= type;
  753. err = mmc_hw_reset(host);
  754. /* Ensure we switch back to the correct partition */
  755. if (err != -EOPNOTSUPP) {
  756. struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
  757. int part_err;
  758. main_md->part_curr = main_md->part_type;
  759. part_err = mmc_blk_part_switch(host->card, md);
  760. if (part_err) {
  761. /*
  762. * We have failed to get back into the correct
  763. * partition, so we need to abort the whole request.
  764. */
  765. return -ENODEV;
  766. }
  767. }
  768. return err;
  769. }
  770. static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
  771. {
  772. md->reset_done &= ~type;
  773. }
  774. static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
  775. {
  776. struct mmc_blk_data *md = mq->data;
  777. struct mmc_card *card = md->queue.card;
  778. unsigned int from, nr, arg;
  779. int err = 0, type = MMC_BLK_DISCARD;
  780. if (!mmc_can_erase(card)) {
  781. err = -EOPNOTSUPP;
  782. goto out;
  783. }
  784. from = blk_rq_pos(req);
  785. nr = blk_rq_sectors(req);
  786. if (mmc_can_discard(card))
  787. arg = MMC_DISCARD_ARG;
  788. else if (mmc_can_trim(card))
  789. arg = MMC_TRIM_ARG;
  790. else
  791. arg = MMC_ERASE_ARG;
  792. retry:
  793. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  794. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  795. INAND_CMD38_ARG_EXT_CSD,
  796. arg == MMC_TRIM_ARG ?
  797. INAND_CMD38_ARG_TRIM :
  798. INAND_CMD38_ARG_ERASE,
  799. 0);
  800. if (err)
  801. goto out;
  802. }
  803. err = mmc_erase(card, from, nr, arg);
  804. out:
  805. if (err == -EIO && !mmc_blk_reset(md, card->host, type))
  806. goto retry;
  807. if (!err)
  808. mmc_blk_reset_success(md, type);
  809. blk_end_request(req, err, blk_rq_bytes(req));
  810. return err ? 0 : 1;
  811. }
  812. static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
  813. struct request *req)
  814. {
  815. struct mmc_blk_data *md = mq->data;
  816. struct mmc_card *card = md->queue.card;
  817. unsigned int from, nr, arg;
  818. int err = 0, type = MMC_BLK_SECDISCARD;
  819. if (!(mmc_can_secure_erase_trim(card))) {
  820. err = -EOPNOTSUPP;
  821. goto out;
  822. }
  823. from = blk_rq_pos(req);
  824. nr = blk_rq_sectors(req);
  825. if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
  826. arg = MMC_SECURE_TRIM1_ARG;
  827. else
  828. arg = MMC_SECURE_ERASE_ARG;
  829. retry:
  830. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  831. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  832. INAND_CMD38_ARG_EXT_CSD,
  833. arg == MMC_SECURE_TRIM1_ARG ?
  834. INAND_CMD38_ARG_SECTRIM1 :
  835. INAND_CMD38_ARG_SECERASE,
  836. 0);
  837. if (err)
  838. goto out_retry;
  839. }
  840. err = mmc_erase(card, from, nr, arg);
  841. if (err == -EIO)
  842. goto out_retry;
  843. if (err)
  844. goto out;
  845. if (arg == MMC_SECURE_TRIM1_ARG) {
  846. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  847. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  848. INAND_CMD38_ARG_EXT_CSD,
  849. INAND_CMD38_ARG_SECTRIM2,
  850. 0);
  851. if (err)
  852. goto out_retry;
  853. }
  854. err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
  855. if (err == -EIO)
  856. goto out_retry;
  857. if (err)
  858. goto out;
  859. }
  860. out_retry:
  861. if (err && !mmc_blk_reset(md, card->host, type))
  862. goto retry;
  863. if (!err)
  864. mmc_blk_reset_success(md, type);
  865. out:
  866. blk_end_request(req, err, blk_rq_bytes(req));
  867. return err ? 0 : 1;
  868. }
  869. static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
  870. {
  871. struct mmc_blk_data *md = mq->data;
  872. struct mmc_card *card = md->queue.card;
  873. int ret = 0;
  874. ret = mmc_flush_cache(card);
  875. if (ret)
  876. ret = -EIO;
  877. blk_end_request_all(req, ret);
  878. return ret ? 0 : 1;
  879. }
  880. /*
  881. * Reformat current write as a reliable write, supporting
  882. * both legacy and the enhanced reliable write MMC cards.
  883. * In each transfer we'll handle only as much as a single
  884. * reliable write can handle, thus finish the request in
  885. * partial completions.
  886. */
  887. static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
  888. struct mmc_card *card,
  889. struct request *req)
  890. {
  891. if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
  892. /* Legacy mode imposes restrictions on transfers. */
  893. if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
  894. brq->data.blocks = 1;
  895. if (brq->data.blocks > card->ext_csd.rel_sectors)
  896. brq->data.blocks = card->ext_csd.rel_sectors;
  897. else if (brq->data.blocks < card->ext_csd.rel_sectors)
  898. brq->data.blocks = 1;
  899. }
  900. }
  901. #define CMD_ERRORS \
  902. (R1_OUT_OF_RANGE | /* Command argument out of range */ \
  903. R1_ADDRESS_ERROR | /* Misaligned address */ \
  904. R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
  905. R1_WP_VIOLATION | /* Tried to write to protected block */ \
  906. R1_CC_ERROR | /* Card controller error */ \
  907. R1_ERROR) /* General/unknown error */
  908. static int mmc_blk_err_check(struct mmc_card *card,
  909. struct mmc_async_req *areq)
  910. {
  911. struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
  912. mmc_active);
  913. struct mmc_blk_request *brq = &mq_mrq->brq;
  914. struct request *req = mq_mrq->req;
  915. int ecc_err = 0;
  916. /*
  917. * sbc.error indicates a problem with the set block count
  918. * command. No data will have been transferred.
  919. *
  920. * cmd.error indicates a problem with the r/w command. No
  921. * data will have been transferred.
  922. *
  923. * stop.error indicates a problem with the stop command. Data
  924. * may have been transferred, or may still be transferring.
  925. */
  926. if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
  927. brq->data.error) {
  928. switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
  929. case ERR_RETRY:
  930. return MMC_BLK_RETRY;
  931. case ERR_ABORT:
  932. return MMC_BLK_ABORT;
  933. case ERR_NOMEDIUM:
  934. return MMC_BLK_NOMEDIUM;
  935. case ERR_CONTINUE:
  936. break;
  937. }
  938. }
  939. /*
  940. * Check for errors relating to the execution of the
  941. * initial command - such as address errors. No data
  942. * has been transferred.
  943. */
  944. if (brq->cmd.resp[0] & CMD_ERRORS) {
  945. pr_err("%s: r/w command failed, status = %#x\n",
  946. req->rq_disk->disk_name, brq->cmd.resp[0]);
  947. return MMC_BLK_ABORT;
  948. }
  949. /*
  950. * Everything else is either success, or a data error of some
  951. * kind. If it was a write, we may have transitioned to
  952. * program mode, which we have to wait for it to complete.
  953. */
  954. if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
  955. u32 status;
  956. unsigned long timeout;
  957. timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
  958. do {
  959. int err = get_card_status(card, &status, 5);
  960. if (err) {
  961. pr_err("%s: error %d requesting status\n",
  962. req->rq_disk->disk_name, err);
  963. return MMC_BLK_CMD_ERR;
  964. }
  965. /* Timeout if the device never becomes ready for data
  966. * and never leaves the program state.
  967. */
  968. if (time_after(jiffies, timeout)) {
  969. pr_err("%s: Card stuck in programming state!"\
  970. " %s %s\n", mmc_hostname(card->host),
  971. req->rq_disk->disk_name, __func__);
  972. return MMC_BLK_CMD_ERR;
  973. }
  974. /*
  975. * Some cards mishandle the status bits,
  976. * so make sure to check both the busy
  977. * indication and the card state.
  978. */
  979. } while (!(status & R1_READY_FOR_DATA) ||
  980. (R1_CURRENT_STATE(status) == R1_STATE_PRG));
  981. }
  982. if (brq->data.error) {
  983. pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
  984. req->rq_disk->disk_name, brq->data.error,
  985. (unsigned)blk_rq_pos(req),
  986. (unsigned)blk_rq_sectors(req),
  987. brq->cmd.resp[0], brq->stop.resp[0]);
  988. if (rq_data_dir(req) == READ) {
  989. if (ecc_err)
  990. return MMC_BLK_ECC_ERR;
  991. return MMC_BLK_DATA_ERR;
  992. } else {
  993. return MMC_BLK_CMD_ERR;
  994. }
  995. }
  996. if (!brq->data.bytes_xfered)
  997. return MMC_BLK_RETRY;
  998. if (mmc_packed_cmd(mq_mrq->cmd_type)) {
  999. if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
  1000. return MMC_BLK_PARTIAL;
  1001. else
  1002. return MMC_BLK_SUCCESS;
  1003. }
  1004. if (blk_rq_bytes(req) != brq->data.bytes_xfered)
  1005. return MMC_BLK_PARTIAL;
  1006. return MMC_BLK_SUCCESS;
  1007. }
  1008. static int mmc_blk_packed_err_check(struct mmc_card *card,
  1009. struct mmc_async_req *areq)
  1010. {
  1011. struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
  1012. mmc_active);
  1013. struct request *req = mq_rq->req;
  1014. struct mmc_packed *packed = mq_rq->packed;
  1015. int err, check, status;
  1016. u8 *ext_csd;
  1017. BUG_ON(!packed);
  1018. packed->retries--;
  1019. check = mmc_blk_err_check(card, areq);
  1020. err = get_card_status(card, &status, 0);
  1021. if (err) {
  1022. pr_err("%s: error %d sending status command\n",
  1023. req->rq_disk->disk_name, err);
  1024. return MMC_BLK_ABORT;
  1025. }
  1026. if (status & R1_EXCEPTION_EVENT) {
  1027. ext_csd = kzalloc(512, GFP_KERNEL);
  1028. if (!ext_csd) {
  1029. pr_err("%s: unable to allocate buffer for ext_csd\n",
  1030. req->rq_disk->disk_name);
  1031. return -ENOMEM;
  1032. }
  1033. err = mmc_send_ext_csd(card, ext_csd);
  1034. if (err) {
  1035. pr_err("%s: error %d sending ext_csd\n",
  1036. req->rq_disk->disk_name, err);
  1037. check = MMC_BLK_ABORT;
  1038. goto free;
  1039. }
  1040. if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
  1041. EXT_CSD_PACKED_FAILURE) &&
  1042. (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
  1043. EXT_CSD_PACKED_GENERIC_ERROR)) {
  1044. if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
  1045. EXT_CSD_PACKED_INDEXED_ERROR) {
  1046. packed->idx_failure =
  1047. ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
  1048. check = MMC_BLK_PARTIAL;
  1049. }
  1050. pr_err("%s: packed cmd failed, nr %u, sectors %u, "
  1051. "failure index: %d\n",
  1052. req->rq_disk->disk_name, packed->nr_entries,
  1053. packed->blocks, packed->idx_failure);
  1054. }
  1055. free:
  1056. kfree(ext_csd);
  1057. }
  1058. return check;
  1059. }
  1060. static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
  1061. struct mmc_card *card,
  1062. int disable_multi,
  1063. struct mmc_queue *mq)
  1064. {
  1065. u32 readcmd, writecmd;
  1066. struct mmc_blk_request *brq = &mqrq->brq;
  1067. struct request *req = mqrq->req;
  1068. struct mmc_blk_data *md = mq->data;
  1069. bool do_data_tag;
  1070. /*
  1071. * Reliable writes are used to implement Forced Unit Access and
  1072. * REQ_META accesses, and are supported only on MMCs.
  1073. *
  1074. * XXX: this really needs a good explanation of why REQ_META
  1075. * is treated special.
  1076. */
  1077. bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
  1078. (req->cmd_flags & REQ_META)) &&
  1079. (rq_data_dir(req) == WRITE) &&
  1080. (md->flags & MMC_BLK_REL_WR);
  1081. memset(brq, 0, sizeof(struct mmc_blk_request));
  1082. brq->mrq.cmd = &brq->cmd;
  1083. brq->mrq.data = &brq->data;
  1084. brq->cmd.arg = blk_rq_pos(req);
  1085. if (!mmc_card_blockaddr(card))
  1086. brq->cmd.arg <<= 9;
  1087. brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  1088. brq->data.blksz = 512;
  1089. brq->stop.opcode = MMC_STOP_TRANSMISSION;
  1090. brq->stop.arg = 0;
  1091. brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  1092. brq->data.blocks = blk_rq_sectors(req);
  1093. /*
  1094. * The block layer doesn't support all sector count
  1095. * restrictions, so we need to be prepared for too big
  1096. * requests.
  1097. */
  1098. if (brq->data.blocks > card->host->max_blk_count)
  1099. brq->data.blocks = card->host->max_blk_count;
  1100. if (brq->data.blocks > 1) {
  1101. /*
  1102. * After a read error, we redo the request one sector
  1103. * at a time in order to accurately determine which
  1104. * sectors can be read successfully.
  1105. */
  1106. if (disable_multi)
  1107. brq->data.blocks = 1;
  1108. /* Some controllers can't do multiblock reads due to hw bugs */
  1109. if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
  1110. rq_data_dir(req) == READ)
  1111. brq->data.blocks = 1;
  1112. }
  1113. if (brq->data.blocks > 1 || do_rel_wr) {
  1114. /* SPI multiblock writes terminate using a special
  1115. * token, not a STOP_TRANSMISSION request.
  1116. */
  1117. if (!mmc_host_is_spi(card->host) ||
  1118. rq_data_dir(req) == READ)
  1119. brq->mrq.stop = &brq->stop;
  1120. readcmd = MMC_READ_MULTIPLE_BLOCK;
  1121. writecmd = MMC_WRITE_MULTIPLE_BLOCK;
  1122. } else {
  1123. brq->mrq.stop = NULL;
  1124. readcmd = MMC_READ_SINGLE_BLOCK;
  1125. writecmd = MMC_WRITE_BLOCK;
  1126. }
  1127. if (rq_data_dir(req) == READ) {
  1128. brq->cmd.opcode = readcmd;
  1129. brq->data.flags |= MMC_DATA_READ;
  1130. } else {
  1131. brq->cmd.opcode = writecmd;
  1132. brq->data.flags |= MMC_DATA_WRITE;
  1133. }
  1134. if (do_rel_wr)
  1135. mmc_apply_rel_rw(brq, card, req);
  1136. /*
  1137. * Data tag is used only during writing meta data to speed
  1138. * up write and any subsequent read of this meta data
  1139. */
  1140. do_data_tag = (card->ext_csd.data_tag_unit_size) &&
  1141. (req->cmd_flags & REQ_META) &&
  1142. (rq_data_dir(req) == WRITE) &&
  1143. ((brq->data.blocks * brq->data.blksz) >=
  1144. card->ext_csd.data_tag_unit_size);
  1145. /*
  1146. * Pre-defined multi-block transfers are preferable to
  1147. * open ended-ones (and necessary for reliable writes).
  1148. * However, it is not sufficient to just send CMD23,
  1149. * and avoid the final CMD12, as on an error condition
  1150. * CMD12 (stop) needs to be sent anyway. This, coupled
  1151. * with Auto-CMD23 enhancements provided by some
  1152. * hosts, means that the complexity of dealing
  1153. * with this is best left to the host. If CMD23 is
  1154. * supported by card and host, we'll fill sbc in and let
  1155. * the host deal with handling it correctly. This means
  1156. * that for hosts that don't expose MMC_CAP_CMD23, no
  1157. * change of behavior will be observed.
  1158. *
  1159. * N.B: Some MMC cards experience perf degradation.
  1160. * We'll avoid using CMD23-bounded multiblock writes for
  1161. * these, while retaining features like reliable writes.
  1162. */
  1163. if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
  1164. (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
  1165. do_data_tag)) {
  1166. brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
  1167. brq->sbc.arg = brq->data.blocks |
  1168. (do_rel_wr ? (1 << 31) : 0) |
  1169. (do_data_tag ? (1 << 29) : 0);
  1170. brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1171. brq->mrq.sbc = &brq->sbc;
  1172. }
  1173. mmc_set_data_timeout(&brq->data, card);
  1174. brq->data.sg = mqrq->sg;
  1175. brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
  1176. /*
  1177. * Adjust the sg list so it is the same size as the
  1178. * request.
  1179. */
  1180. if (brq->data.blocks != blk_rq_sectors(req)) {
  1181. int i, data_size = brq->data.blocks << 9;
  1182. struct scatterlist *sg;
  1183. for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
  1184. data_size -= sg->length;
  1185. if (data_size <= 0) {
  1186. sg->length += data_size;
  1187. i++;
  1188. break;
  1189. }
  1190. }
  1191. brq->data.sg_len = i;
  1192. }
  1193. mqrq->mmc_active.mrq = &brq->mrq;
  1194. mqrq->mmc_active.err_check = mmc_blk_err_check;
  1195. mmc_queue_bounce_pre(mqrq);
  1196. }
  1197. static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
  1198. struct mmc_card *card)
  1199. {
  1200. unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
  1201. unsigned int max_seg_sz = queue_max_segment_size(q);
  1202. unsigned int len, nr_segs = 0;
  1203. do {
  1204. len = min(hdr_sz, max_seg_sz);
  1205. hdr_sz -= len;
  1206. nr_segs++;
  1207. } while (hdr_sz);
  1208. return nr_segs;
  1209. }
  1210. static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
  1211. {
  1212. struct request_queue *q = mq->queue;
  1213. struct mmc_card *card = mq->card;
  1214. struct request *cur = req, *next = NULL;
  1215. struct mmc_blk_data *md = mq->data;
  1216. struct mmc_queue_req *mqrq = mq->mqrq_cur;
  1217. bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
  1218. unsigned int req_sectors = 0, phys_segments = 0;
  1219. unsigned int max_blk_count, max_phys_segs;
  1220. bool put_back = true;
  1221. u8 max_packed_rw = 0;
  1222. u8 reqs = 0;
  1223. if (!(md->flags & MMC_BLK_PACKED_CMD))
  1224. goto no_packed;
  1225. if ((rq_data_dir(cur) == WRITE) &&
  1226. mmc_host_packed_wr(card->host))
  1227. max_packed_rw = card->ext_csd.max_packed_writes;
  1228. if (max_packed_rw == 0)
  1229. goto no_packed;
  1230. if (mmc_req_rel_wr(cur) &&
  1231. (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
  1232. goto no_packed;
  1233. if (mmc_large_sector(card) &&
  1234. !IS_ALIGNED(blk_rq_sectors(cur), 8))
  1235. goto no_packed;
  1236. mmc_blk_clear_packed(mqrq);
  1237. max_blk_count = min(card->host->max_blk_count,
  1238. card->host->max_req_size >> 9);
  1239. if (unlikely(max_blk_count > 0xffff))
  1240. max_blk_count = 0xffff;
  1241. max_phys_segs = queue_max_segments(q);
  1242. req_sectors += blk_rq_sectors(cur);
  1243. phys_segments += cur->nr_phys_segments;
  1244. if (rq_data_dir(cur) == WRITE) {
  1245. req_sectors += mmc_large_sector(card) ? 8 : 1;
  1246. phys_segments += mmc_calc_packed_hdr_segs(q, card);
  1247. }
  1248. do {
  1249. if (reqs >= max_packed_rw - 1) {
  1250. put_back = false;
  1251. break;
  1252. }
  1253. spin_lock_irq(q->queue_lock);
  1254. next = blk_fetch_request(q);
  1255. spin_unlock_irq(q->queue_lock);
  1256. if (!next) {
  1257. put_back = false;
  1258. break;
  1259. }
  1260. if (mmc_large_sector(card) &&
  1261. !IS_ALIGNED(blk_rq_sectors(next), 8))
  1262. break;
  1263. if (next->cmd_flags & REQ_DISCARD ||
  1264. next->cmd_flags & REQ_FLUSH)
  1265. break;
  1266. if (rq_data_dir(cur) != rq_data_dir(next))
  1267. break;
  1268. if (mmc_req_rel_wr(next) &&
  1269. (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
  1270. break;
  1271. req_sectors += blk_rq_sectors(next);
  1272. if (req_sectors > max_blk_count)
  1273. break;
  1274. phys_segments += next->nr_phys_segments;
  1275. if (phys_segments > max_phys_segs)
  1276. break;
  1277. list_add_tail(&next->queuelist, &mqrq->packed->list);
  1278. cur = next;
  1279. reqs++;
  1280. } while (1);
  1281. if (put_back) {
  1282. spin_lock_irq(q->queue_lock);
  1283. blk_requeue_request(q, next);
  1284. spin_unlock_irq(q->queue_lock);
  1285. }
  1286. if (reqs > 0) {
  1287. list_add(&req->queuelist, &mqrq->packed->list);
  1288. mqrq->packed->nr_entries = ++reqs;
  1289. mqrq->packed->retries = reqs;
  1290. return reqs;
  1291. }
  1292. no_packed:
  1293. mqrq->cmd_type = MMC_PACKED_NONE;
  1294. return 0;
  1295. }
  1296. static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
  1297. struct mmc_card *card,
  1298. struct mmc_queue *mq)
  1299. {
  1300. struct mmc_blk_request *brq = &mqrq->brq;
  1301. struct request *req = mqrq->req;
  1302. struct request *prq;
  1303. struct mmc_blk_data *md = mq->data;
  1304. struct mmc_packed *packed = mqrq->packed;
  1305. bool do_rel_wr, do_data_tag;
  1306. u32 *packed_cmd_hdr;
  1307. u8 hdr_blocks;
  1308. u8 i = 1;
  1309. BUG_ON(!packed);
  1310. mqrq->cmd_type = MMC_PACKED_WRITE;
  1311. packed->blocks = 0;
  1312. packed->idx_failure = MMC_PACKED_NR_IDX;
  1313. packed_cmd_hdr = packed->cmd_hdr;
  1314. memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
  1315. packed_cmd_hdr[0] = (packed->nr_entries << 16) |
  1316. (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
  1317. hdr_blocks = mmc_large_sector(card) ? 8 : 1;
  1318. /*
  1319. * Argument for each entry of packed group
  1320. */
  1321. list_for_each_entry(prq, &packed->list, queuelist) {
  1322. do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
  1323. do_data_tag = (card->ext_csd.data_tag_unit_size) &&
  1324. (prq->cmd_flags & REQ_META) &&
  1325. (rq_data_dir(prq) == WRITE) &&
  1326. ((brq->data.blocks * brq->data.blksz) >=
  1327. card->ext_csd.data_tag_unit_size);
  1328. /* Argument of CMD23 */
  1329. packed_cmd_hdr[(i * 2)] =
  1330. (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
  1331. (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
  1332. blk_rq_sectors(prq);
  1333. /* Argument of CMD18 or CMD25 */
  1334. packed_cmd_hdr[((i * 2)) + 1] =
  1335. mmc_card_blockaddr(card) ?
  1336. blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
  1337. packed->blocks += blk_rq_sectors(prq);
  1338. i++;
  1339. }
  1340. memset(brq, 0, sizeof(struct mmc_blk_request));
  1341. brq->mrq.cmd = &brq->cmd;
  1342. brq->mrq.data = &brq->data;
  1343. brq->mrq.sbc = &brq->sbc;
  1344. brq->mrq.stop = &brq->stop;
  1345. brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
  1346. brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
  1347. brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1348. brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
  1349. brq->cmd.arg = blk_rq_pos(req);
  1350. if (!mmc_card_blockaddr(card))
  1351. brq->cmd.arg <<= 9;
  1352. brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  1353. brq->data.blksz = 512;
  1354. brq->data.blocks = packed->blocks + hdr_blocks;
  1355. brq->data.flags |= MMC_DATA_WRITE;
  1356. brq->stop.opcode = MMC_STOP_TRANSMISSION;
  1357. brq->stop.arg = 0;
  1358. brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  1359. mmc_set_data_timeout(&brq->data, card);
  1360. brq->data.sg = mqrq->sg;
  1361. brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
  1362. mqrq->mmc_active.mrq = &brq->mrq;
  1363. mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
  1364. mmc_queue_bounce_pre(mqrq);
  1365. }
  1366. static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
  1367. struct mmc_blk_request *brq, struct request *req,
  1368. int ret)
  1369. {
  1370. struct mmc_queue_req *mq_rq;
  1371. mq_rq = container_of(brq, struct mmc_queue_req, brq);
  1372. /*
  1373. * If this is an SD card and we're writing, we can first
  1374. * mark the known good sectors as ok.
  1375. *
  1376. * If the card is not SD, we can still ok written sectors
  1377. * as reported by the controller (which might be less than
  1378. * the real number of written sectors, but never more).
  1379. */
  1380. if (mmc_card_sd(card)) {
  1381. u32 blocks;
  1382. blocks = mmc_sd_num_wr_blocks(card);
  1383. if (blocks != (u32)-1) {
  1384. ret = blk_end_request(req, 0, blocks << 9);
  1385. }
  1386. } else {
  1387. if (!mmc_packed_cmd(mq_rq->cmd_type))
  1388. ret = blk_end_request(req, 0, brq->data.bytes_xfered);
  1389. }
  1390. return ret;
  1391. }
  1392. static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
  1393. {
  1394. struct request *prq;
  1395. struct mmc_packed *packed = mq_rq->packed;
  1396. int idx = packed->idx_failure, i = 0;
  1397. int ret = 0;
  1398. BUG_ON(!packed);
  1399. while (!list_empty(&packed->list)) {
  1400. prq = list_entry_rq(packed->list.next);
  1401. if (idx == i) {
  1402. /* retry from error index */
  1403. packed->nr_entries -= idx;
  1404. mq_rq->req = prq;
  1405. ret = 1;
  1406. if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
  1407. list_del_init(&prq->queuelist);
  1408. mmc_blk_clear_packed(mq_rq);
  1409. }
  1410. return ret;
  1411. }
  1412. list_del_init(&prq->queuelist);
  1413. blk_end_request(prq, 0, blk_rq_bytes(prq));
  1414. i++;
  1415. }
  1416. mmc_blk_clear_packed(mq_rq);
  1417. return ret;
  1418. }
  1419. static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
  1420. {
  1421. struct request *prq;
  1422. struct mmc_packed *packed = mq_rq->packed;
  1423. BUG_ON(!packed);
  1424. while (!list_empty(&packed->list)) {
  1425. prq = list_entry_rq(packed->list.next);
  1426. list_del_init(&prq->queuelist);
  1427. blk_end_request(prq, -EIO, blk_rq_bytes(prq));
  1428. }
  1429. mmc_blk_clear_packed(mq_rq);
  1430. }
  1431. static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
  1432. struct mmc_queue_req *mq_rq)
  1433. {
  1434. struct request *prq;
  1435. struct request_queue *q = mq->queue;
  1436. struct mmc_packed *packed = mq_rq->packed;
  1437. BUG_ON(!packed);
  1438. while (!list_empty(&packed->list)) {
  1439. prq = list_entry_rq(packed->list.prev);
  1440. if (prq->queuelist.prev != &packed->list) {
  1441. list_del_init(&prq->queuelist);
  1442. spin_lock_irq(q->queue_lock);
  1443. blk_requeue_request(mq->queue, prq);
  1444. spin_unlock_irq(q->queue_lock);
  1445. } else {
  1446. list_del_init(&prq->queuelist);
  1447. }
  1448. }
  1449. mmc_blk_clear_packed(mq_rq);
  1450. }
  1451. static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  1452. {
  1453. struct mmc_blk_data *md = mq->data;
  1454. struct mmc_card *card = md->queue.card;
  1455. struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
  1456. int ret = 1, disable_multi = 0, retry = 0, type;
  1457. enum mmc_blk_status status;
  1458. struct mmc_queue_req *mq_rq;
  1459. struct request *req = rqc;
  1460. struct mmc_async_req *areq;
  1461. const u8 packed_nr = 2;
  1462. u8 reqs = 0;
  1463. if (!rqc && !mq->mqrq_prev->req)
  1464. return 0;
  1465. if (rqc)
  1466. reqs = mmc_blk_prep_packed_list(mq, rqc);
  1467. do {
  1468. if (rqc) {
  1469. /*
  1470. * When 4KB native sector is enabled, only 8 blocks
  1471. * multiple read or write is allowed
  1472. */
  1473. if ((brq->data.blocks & 0x07) &&
  1474. (card->ext_csd.data_sector_size == 4096)) {
  1475. pr_err("%s: Transfer size is not 4KB sector size aligned\n",
  1476. req->rq_disk->disk_name);
  1477. mq_rq = mq->mqrq_cur;
  1478. goto cmd_abort;
  1479. }
  1480. if (reqs >= packed_nr)
  1481. mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
  1482. card, mq);
  1483. else
  1484. mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
  1485. areq = &mq->mqrq_cur->mmc_active;
  1486. } else
  1487. areq = NULL;
  1488. areq = mmc_start_req(card->host, areq, (int *) &status);
  1489. if (!areq) {
  1490. if (status == MMC_BLK_NEW_REQUEST)
  1491. mq->flags |= MMC_QUEUE_NEW_REQUEST;
  1492. return 0;
  1493. }
  1494. mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
  1495. brq = &mq_rq->brq;
  1496. req = mq_rq->req;
  1497. type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
  1498. mmc_queue_bounce_post(mq_rq);
  1499. switch (status) {
  1500. case MMC_BLK_SUCCESS:
  1501. case MMC_BLK_PARTIAL:
  1502. /*
  1503. * A block was successfully transferred.
  1504. */
  1505. mmc_blk_reset_success(md, type);
  1506. if (mmc_packed_cmd(mq_rq->cmd_type)) {
  1507. ret = mmc_blk_end_packed_req(mq_rq);
  1508. break;
  1509. } else {
  1510. ret = blk_end_request(req, 0,
  1511. brq->data.bytes_xfered);
  1512. }
  1513. /*
  1514. * If the blk_end_request function returns non-zero even
  1515. * though all data has been transferred and no errors
  1516. * were returned by the host controller, it's a bug.
  1517. */
  1518. if (status == MMC_BLK_SUCCESS && ret) {
  1519. pr_err("%s BUG rq_tot %d d_xfer %d\n",
  1520. __func__, blk_rq_bytes(req),
  1521. brq->data.bytes_xfered);
  1522. rqc = NULL;
  1523. goto cmd_abort;
  1524. }
  1525. break;
  1526. case MMC_BLK_CMD_ERR:
  1527. ret = mmc_blk_cmd_err(md, card, brq, req, ret);
  1528. if (!mmc_blk_reset(md, card->host, type))
  1529. break;
  1530. goto cmd_abort;
  1531. case MMC_BLK_RETRY:
  1532. if (retry++ < 5)
  1533. break;
  1534. /* Fall through */
  1535. case MMC_BLK_ABORT:
  1536. if (!mmc_blk_reset(md, card->host, type))
  1537. break;
  1538. goto cmd_abort;
  1539. case MMC_BLK_DATA_ERR: {
  1540. int err;
  1541. err = mmc_blk_reset(md, card->host, type);
  1542. if (!err)
  1543. break;
  1544. if (err == -ENODEV ||
  1545. mmc_packed_cmd(mq_rq->cmd_type))
  1546. goto cmd_abort;
  1547. /* Fall through */
  1548. }
  1549. case MMC_BLK_ECC_ERR:
  1550. if (brq->data.blocks > 1) {
  1551. /* Redo read one sector at a time */
  1552. pr_warning("%s: retrying using single block read\n",
  1553. req->rq_disk->disk_name);
  1554. disable_multi = 1;
  1555. break;
  1556. }
  1557. /*
  1558. * After an error, we redo I/O one sector at a
  1559. * time, so we only reach here after trying to
  1560. * read a single sector.
  1561. */
  1562. ret = blk_end_request(req, -EIO,
  1563. brq->data.blksz);
  1564. if (!ret)
  1565. goto start_new_req;
  1566. break;
  1567. case MMC_BLK_NOMEDIUM:
  1568. goto cmd_abort;
  1569. default:
  1570. pr_err("%s: Unhandled return value (%d)",
  1571. req->rq_disk->disk_name, status);
  1572. goto cmd_abort;
  1573. }
  1574. if (ret) {
  1575. if (mmc_packed_cmd(mq_rq->cmd_type)) {
  1576. if (!mq_rq->packed->retries)
  1577. goto cmd_abort;
  1578. mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
  1579. mmc_start_req(card->host,
  1580. &mq_rq->mmc_active, NULL);
  1581. } else {
  1582. /*
  1583. * In case of a incomplete request
  1584. * prepare it again and resend.
  1585. */
  1586. mmc_blk_rw_rq_prep(mq_rq, card,
  1587. disable_multi, mq);
  1588. mmc_start_req(card->host,
  1589. &mq_rq->mmc_active, NULL);
  1590. }
  1591. }
  1592. } while (ret);
  1593. return 1;
  1594. cmd_abort:
  1595. if (mmc_packed_cmd(mq_rq->cmd_type)) {
  1596. mmc_blk_abort_packed_req(mq_rq);
  1597. } else {
  1598. if (mmc_card_removed(card))
  1599. req->cmd_flags |= REQ_QUIET;
  1600. while (ret)
  1601. ret = blk_end_request(req, -EIO,
  1602. blk_rq_cur_bytes(req));
  1603. }
  1604. start_new_req:
  1605. if (rqc) {
  1606. if (mmc_card_removed(card)) {
  1607. rqc->cmd_flags |= REQ_QUIET;
  1608. blk_end_request_all(rqc, -EIO);
  1609. } else {
  1610. /*
  1611. * If current request is packed, it needs to put back.
  1612. */
  1613. if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
  1614. mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
  1615. mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
  1616. mmc_start_req(card->host,
  1617. &mq->mqrq_cur->mmc_active, NULL);
  1618. }
  1619. }
  1620. return 0;
  1621. }
  1622. static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
  1623. {
  1624. int ret;
  1625. struct mmc_blk_data *md = mq->data;
  1626. struct mmc_card *card = md->queue.card;
  1627. struct mmc_host *host = card->host;
  1628. unsigned long flags;
  1629. if (req && !mq->mqrq_prev->req)
  1630. /* claim host only for the first request */
  1631. mmc_get_card(card);
  1632. ret = mmc_blk_part_switch(card, md);
  1633. if (ret) {
  1634. if (req) {
  1635. blk_end_request_all(req, -EIO);
  1636. }
  1637. ret = 0;
  1638. goto out;
  1639. }
  1640. mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
  1641. if (req && req->cmd_flags & REQ_DISCARD) {
  1642. /* complete ongoing async transfer before issuing discard */
  1643. if (card->host->areq)
  1644. mmc_blk_issue_rw_rq(mq, NULL);
  1645. if (req->cmd_flags & REQ_SECURE &&
  1646. !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
  1647. ret = mmc_blk_issue_secdiscard_rq(mq, req);
  1648. else
  1649. ret = mmc_blk_issue_discard_rq(mq, req);
  1650. } else if (req && req->cmd_flags & REQ_FLUSH) {
  1651. /* complete ongoing async transfer before issuing flush */
  1652. if (card->host->areq)
  1653. mmc_blk_issue_rw_rq(mq, NULL);
  1654. ret = mmc_blk_issue_flush(mq, req);
  1655. } else {
  1656. if (!req && host->areq) {
  1657. spin_lock_irqsave(&host->context_info.lock, flags);
  1658. host->context_info.is_waiting_last_req = true;
  1659. spin_unlock_irqrestore(&host->context_info.lock, flags);
  1660. }
  1661. ret = mmc_blk_issue_rw_rq(mq, req);
  1662. }
  1663. out:
  1664. if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
  1665. (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
  1666. /*
  1667. * Release host when there are no more requests
  1668. * and after special request(discard, flush) is done.
  1669. * In case sepecial request, there is no reentry to
  1670. * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
  1671. */
  1672. mmc_put_card(card);
  1673. return ret;
  1674. }
  1675. static inline int mmc_blk_readonly(struct mmc_card *card)
  1676. {
  1677. return mmc_card_readonly(card) ||
  1678. !(card->csd.cmdclass & CCC_BLOCK_WRITE);
  1679. }
  1680. static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
  1681. struct device *parent,
  1682. sector_t size,
  1683. bool default_ro,
  1684. const char *subname,
  1685. int area_type)
  1686. {
  1687. struct mmc_blk_data *md;
  1688. int devidx, ret;
  1689. devidx = find_first_zero_bit(dev_use, max_devices);
  1690. if (devidx >= max_devices)
  1691. return ERR_PTR(-ENOSPC);
  1692. __set_bit(devidx, dev_use);
  1693. md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
  1694. if (!md) {
  1695. ret = -ENOMEM;
  1696. goto out;
  1697. }
  1698. /*
  1699. * !subname implies we are creating main mmc_blk_data that will be
  1700. * associated with mmc_card with mmc_set_drvdata. Due to device
  1701. * partitions, devidx will not coincide with a per-physical card
  1702. * index anymore so we keep track of a name index.
  1703. */
  1704. if (!subname) {
  1705. md->name_idx = find_first_zero_bit(name_use, max_devices);
  1706. __set_bit(md->name_idx, name_use);
  1707. } else
  1708. md->name_idx = ((struct mmc_blk_data *)
  1709. dev_to_disk(parent)->private_data)->name_idx;
  1710. md->area_type = area_type;
  1711. /*
  1712. * Set the read-only status based on the supported commands
  1713. * and the write protect switch.
  1714. */
  1715. md->read_only = mmc_blk_readonly(card);
  1716. md->disk = alloc_disk(perdev_minors);
  1717. if (md->disk == NULL) {
  1718. ret = -ENOMEM;
  1719. goto err_kfree;
  1720. }
  1721. spin_lock_init(&md->lock);
  1722. INIT_LIST_HEAD(&md->part);
  1723. md->usage = 1;
  1724. ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
  1725. if (ret)
  1726. goto err_putdisk;
  1727. md->queue.issue_fn = mmc_blk_issue_rq;
  1728. md->queue.data = md;
  1729. md->disk->major = MMC_BLOCK_MAJOR;
  1730. md->disk->first_minor = devidx * perdev_minors;
  1731. md->disk->fops = &mmc_bdops;
  1732. md->disk->private_data = md;
  1733. md->disk->queue = md->queue.queue;
  1734. md->disk->driverfs_dev = parent;
  1735. set_disk_ro(md->disk, md->read_only || default_ro);
  1736. if (area_type & MMC_BLK_DATA_AREA_RPMB)
  1737. md->disk->flags |= GENHD_FL_NO_PART_SCAN;
  1738. /*
  1739. * As discussed on lkml, GENHD_FL_REMOVABLE should:
  1740. *
  1741. * - be set for removable media with permanent block devices
  1742. * - be unset for removable block devices with permanent media
  1743. *
  1744. * Since MMC block devices clearly fall under the second
  1745. * case, we do not set GENHD_FL_REMOVABLE. Userspace
  1746. * should use the block device creation/destruction hotplug
  1747. * messages to tell when the card is present.
  1748. */
  1749. snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
  1750. "mmcblk%d%s", md->name_idx, subname ? subname : "");
  1751. if (mmc_card_mmc(card))
  1752. blk_queue_logical_block_size(md->queue.queue,
  1753. card->ext_csd.data_sector_size);
  1754. else
  1755. blk_queue_logical_block_size(md->queue.queue, 512);
  1756. set_capacity(md->disk, size);
  1757. if (mmc_host_cmd23(card->host)) {
  1758. if (mmc_card_mmc(card) ||
  1759. (mmc_card_sd(card) &&
  1760. card->scr.cmds & SD_SCR_CMD23_SUPPORT))
  1761. md->flags |= MMC_BLK_CMD23;
  1762. }
  1763. if (mmc_card_mmc(card) &&
  1764. md->flags & MMC_BLK_CMD23 &&
  1765. ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
  1766. card->ext_csd.rel_sectors)) {
  1767. md->flags |= MMC_BLK_REL_WR;
  1768. blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
  1769. }
  1770. if (mmc_card_mmc(card) &&
  1771. (area_type == MMC_BLK_DATA_AREA_MAIN) &&
  1772. (md->flags & MMC_BLK_CMD23) &&
  1773. card->ext_csd.packed_event_en) {
  1774. if (!mmc_packed_init(&md->queue, card))
  1775. md->flags |= MMC_BLK_PACKED_CMD;
  1776. }
  1777. return md;
  1778. err_putdisk:
  1779. put_disk(md->disk);
  1780. err_kfree:
  1781. kfree(md);
  1782. out:
  1783. return ERR_PTR(ret);
  1784. }
  1785. static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
  1786. {
  1787. sector_t size;
  1788. struct mmc_blk_data *md;
  1789. if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
  1790. /*
  1791. * The EXT_CSD sector count is in number or 512 byte
  1792. * sectors.
  1793. */
  1794. size = card->ext_csd.sectors;
  1795. } else {
  1796. /*
  1797. * The CSD capacity field is in units of read_blkbits.
  1798. * set_capacity takes units of 512 bytes.
  1799. */
  1800. size = card->csd.capacity << (card->csd.read_blkbits - 9);
  1801. }
  1802. md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
  1803. MMC_BLK_DATA_AREA_MAIN);
  1804. return md;
  1805. }
  1806. static int mmc_blk_alloc_part(struct mmc_card *card,
  1807. struct mmc_blk_data *md,
  1808. unsigned int part_type,
  1809. sector_t size,
  1810. bool default_ro,
  1811. const char *subname,
  1812. int area_type)
  1813. {
  1814. char cap_str[10];
  1815. struct mmc_blk_data *part_md;
  1816. part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
  1817. subname, area_type);
  1818. if (IS_ERR(part_md))
  1819. return PTR_ERR(part_md);
  1820. part_md->part_type = part_type;
  1821. list_add(&part_md->part, &md->part);
  1822. string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
  1823. cap_str, sizeof(cap_str));
  1824. pr_info("%s: %s %s partition %u %s\n",
  1825. part_md->disk->disk_name, mmc_card_id(card),
  1826. mmc_card_name(card), part_md->part_type, cap_str);
  1827. return 0;
  1828. }
  1829. /* MMC Physical partitions consist of two boot partitions and
  1830. * up to four general purpose partitions.
  1831. * For each partition enabled in EXT_CSD a block device will be allocatedi
  1832. * to provide access to the partition.
  1833. */
  1834. static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
  1835. {
  1836. int idx, ret = 0;
  1837. if (!mmc_card_mmc(card))
  1838. return 0;
  1839. for (idx = 0; idx < card->nr_parts; idx++) {
  1840. if (card->part[idx].size) {
  1841. ret = mmc_blk_alloc_part(card, md,
  1842. card->part[idx].part_cfg,
  1843. card->part[idx].size >> 9,
  1844. card->part[idx].force_ro,
  1845. card->part[idx].name,
  1846. card->part[idx].area_type);
  1847. if (ret)
  1848. return ret;
  1849. }
  1850. }
  1851. return ret;
  1852. }
  1853. static void mmc_blk_remove_req(struct mmc_blk_data *md)
  1854. {
  1855. struct mmc_card *card;
  1856. if (md) {
  1857. /*
  1858. * Flush remaining requests and free queues. It
  1859. * is freeing the queue that stops new requests
  1860. * from being accepted.
  1861. */
  1862. mmc_cleanup_queue(&md->queue);
  1863. if (md->flags & MMC_BLK_PACKED_CMD)
  1864. mmc_packed_clean(&md->queue);
  1865. card = md->queue.card;
  1866. if (md->disk->flags & GENHD_FL_UP) {
  1867. device_remove_file(disk_to_dev(md->disk), &md->force_ro);
  1868. if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
  1869. card->ext_csd.boot_ro_lockable)
  1870. device_remove_file(disk_to_dev(md->disk),
  1871. &md->power_ro_lock);
  1872. del_gendisk(md->disk);
  1873. }
  1874. mmc_blk_put(md);
  1875. }
  1876. }
  1877. static void mmc_blk_remove_parts(struct mmc_card *card,
  1878. struct mmc_blk_data *md)
  1879. {
  1880. struct list_head *pos, *q;
  1881. struct mmc_blk_data *part_md;
  1882. __clear_bit(md->name_idx, name_use);
  1883. list_for_each_safe(pos, q, &md->part) {
  1884. part_md = list_entry(pos, struct mmc_blk_data, part);
  1885. list_del(pos);
  1886. mmc_blk_remove_req(part_md);
  1887. }
  1888. }
  1889. static int mmc_add_disk(struct mmc_blk_data *md)
  1890. {
  1891. int ret;
  1892. struct mmc_card *card = md->queue.card;
  1893. add_disk(md->disk);
  1894. md->force_ro.show = force_ro_show;
  1895. md->force_ro.store = force_ro_store;
  1896. sysfs_attr_init(&md->force_ro.attr);
  1897. md->force_ro.attr.name = "force_ro";
  1898. md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
  1899. ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
  1900. if (ret)
  1901. goto force_ro_fail;
  1902. if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
  1903. card->ext_csd.boot_ro_lockable) {
  1904. umode_t mode;
  1905. if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
  1906. mode = S_IRUGO;
  1907. else
  1908. mode = S_IRUGO | S_IWUSR;
  1909. md->power_ro_lock.show = power_ro_lock_show;
  1910. md->power_ro_lock.store = power_ro_lock_store;
  1911. sysfs_attr_init(&md->power_ro_lock.attr);
  1912. md->power_ro_lock.attr.mode = mode;
  1913. md->power_ro_lock.attr.name =
  1914. "ro_lock_until_next_power_on";
  1915. ret = device_create_file(disk_to_dev(md->disk),
  1916. &md->power_ro_lock);
  1917. if (ret)
  1918. goto power_ro_lock_fail;
  1919. }
  1920. return ret;
  1921. power_ro_lock_fail:
  1922. device_remove_file(disk_to_dev(md->disk), &md->force_ro);
  1923. force_ro_fail:
  1924. del_gendisk(md->disk);
  1925. return ret;
  1926. }
  1927. #define CID_MANFID_SANDISK 0x2
  1928. #define CID_MANFID_TOSHIBA 0x11
  1929. #define CID_MANFID_MICRON 0x13
  1930. #define CID_MANFID_SAMSUNG 0x15
  1931. static const struct mmc_fixup blk_fixups[] =
  1932. {
  1933. MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
  1934. MMC_QUIRK_INAND_CMD38),
  1935. MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
  1936. MMC_QUIRK_INAND_CMD38),
  1937. MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
  1938. MMC_QUIRK_INAND_CMD38),
  1939. MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
  1940. MMC_QUIRK_INAND_CMD38),
  1941. MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
  1942. MMC_QUIRK_INAND_CMD38),
  1943. /*
  1944. * Some MMC cards experience performance degradation with CMD23
  1945. * instead of CMD12-bounded multiblock transfers. For now we'll
  1946. * black list what's bad...
  1947. * - Certain Toshiba cards.
  1948. *
  1949. * N.B. This doesn't affect SD cards.
  1950. */
  1951. MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  1952. MMC_QUIRK_BLK_NO_CMD23),
  1953. MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  1954. MMC_QUIRK_BLK_NO_CMD23),
  1955. MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  1956. MMC_QUIRK_BLK_NO_CMD23),
  1957. /*
  1958. * Some Micron MMC cards needs longer data read timeout than
  1959. * indicated in CSD.
  1960. */
  1961. MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
  1962. MMC_QUIRK_LONG_READ_TIME),
  1963. /*
  1964. * On these Samsung MoviNAND parts, performing secure erase or
  1965. * secure trim can result in unrecoverable corruption due to a
  1966. * firmware bug.
  1967. */
  1968. MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1969. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1970. MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1971. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1972. MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1973. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1974. MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1975. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1976. MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1977. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1978. MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1979. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1980. MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1981. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1982. MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  1983. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  1984. END_FIXUP
  1985. };
  1986. static int mmc_blk_probe(struct mmc_card *card)
  1987. {
  1988. struct mmc_blk_data *md, *part_md;
  1989. char cap_str[10];
  1990. /*
  1991. * Check that the card supports the command class(es) we need.
  1992. */
  1993. if (!(card->csd.cmdclass & CCC_BLOCK_READ))
  1994. return -ENODEV;
  1995. md = mmc_blk_alloc(card);
  1996. if (IS_ERR(md))
  1997. return PTR_ERR(md);
  1998. string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
  1999. cap_str, sizeof(cap_str));
  2000. pr_info("%s: %s %s %s %s\n",
  2001. md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
  2002. cap_str, md->read_only ? "(ro)" : "");
  2003. if (mmc_blk_alloc_parts(card, md))
  2004. goto out;
  2005. mmc_set_drvdata(card, md);
  2006. mmc_fixup_device(card, blk_fixups);
  2007. if (mmc_add_disk(md))
  2008. goto out;
  2009. list_for_each_entry(part_md, &md->part, part) {
  2010. if (mmc_add_disk(part_md))
  2011. goto out;
  2012. }
  2013. pm_runtime_set_autosuspend_delay(&card->dev, 3000);
  2014. pm_runtime_use_autosuspend(&card->dev);
  2015. /*
  2016. * Don't enable runtime PM for SD-combo cards here. Leave that
  2017. * decision to be taken during the SDIO init sequence instead.
  2018. */
  2019. if (card->type != MMC_TYPE_SD_COMBO) {
  2020. pm_runtime_set_active(&card->dev);
  2021. pm_runtime_enable(&card->dev);
  2022. }
  2023. return 0;
  2024. out:
  2025. mmc_blk_remove_parts(card, md);
  2026. mmc_blk_remove_req(md);
  2027. return 0;
  2028. }
  2029. static void mmc_blk_remove(struct mmc_card *card)
  2030. {
  2031. struct mmc_blk_data *md = mmc_get_drvdata(card);
  2032. mmc_blk_remove_parts(card, md);
  2033. pm_runtime_get_sync(&card->dev);
  2034. mmc_claim_host(card->host);
  2035. mmc_blk_part_switch(card, md);
  2036. mmc_release_host(card->host);
  2037. if (card->type != MMC_TYPE_SD_COMBO)
  2038. pm_runtime_disable(&card->dev);
  2039. pm_runtime_put_noidle(&card->dev);
  2040. mmc_blk_remove_req(md);
  2041. mmc_set_drvdata(card, NULL);
  2042. }
  2043. #ifdef CONFIG_PM
  2044. static int mmc_blk_suspend(struct mmc_card *card)
  2045. {
  2046. struct mmc_blk_data *part_md;
  2047. struct mmc_blk_data *md = mmc_get_drvdata(card);
  2048. if (md) {
  2049. pm_runtime_get_sync(&card->dev);
  2050. mmc_queue_suspend(&md->queue);
  2051. list_for_each_entry(part_md, &md->part, part) {
  2052. mmc_queue_suspend(&part_md->queue);
  2053. }
  2054. }
  2055. return 0;
  2056. }
  2057. static int mmc_blk_resume(struct mmc_card *card)
  2058. {
  2059. struct mmc_blk_data *part_md;
  2060. struct mmc_blk_data *md = mmc_get_drvdata(card);
  2061. if (md) {
  2062. /*
  2063. * Resume involves the card going into idle state,
  2064. * so current partition is always the main one.
  2065. */
  2066. md->part_curr = md->part_type;
  2067. mmc_queue_resume(&md->queue);
  2068. list_for_each_entry(part_md, &md->part, part) {
  2069. mmc_queue_resume(&part_md->queue);
  2070. }
  2071. pm_runtime_put(&card->dev);
  2072. }
  2073. return 0;
  2074. }
  2075. #else
  2076. #define mmc_blk_suspend NULL
  2077. #define mmc_blk_resume NULL
  2078. #endif
  2079. static struct mmc_driver mmc_driver = {
  2080. .drv = {
  2081. .name = "mmcblk",
  2082. },
  2083. .probe = mmc_blk_probe,
  2084. .remove = mmc_blk_remove,
  2085. .suspend = mmc_blk_suspend,
  2086. .resume = mmc_blk_resume,
  2087. };
  2088. static int __init mmc_blk_init(void)
  2089. {
  2090. int res;
  2091. if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
  2092. pr_info("mmcblk: using %d minors per device\n", perdev_minors);
  2093. max_devices = 256 / perdev_minors;
  2094. res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
  2095. if (res)
  2096. goto out;
  2097. res = mmc_register_driver(&mmc_driver);
  2098. if (res)
  2099. goto out2;
  2100. return 0;
  2101. out2:
  2102. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  2103. out:
  2104. return res;
  2105. }
  2106. static void __exit mmc_blk_exit(void)
  2107. {
  2108. mmc_unregister_driver(&mmc_driver);
  2109. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  2110. }
  2111. module_init(mmc_blk_init);
  2112. module_exit(mmc_blk_exit);
  2113. MODULE_LICENSE("GPL");
  2114. MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");