nvme-core.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073
  1. /*
  2. * NVM Express device driver
  3. * Copyright (c) 2011, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/nvme.h>
  19. #include <linux/bio.h>
  20. #include <linux/bitops.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/fs.h>
  25. #include <linux/genhd.h>
  26. #include <linux/idr.h>
  27. #include <linux/init.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/kdev_t.h>
  31. #include <linux/kthread.h>
  32. #include <linux/kernel.h>
  33. #include <linux/mm.h>
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/pci.h>
  37. #include <linux/poison.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/types.h>
  41. #include <scsi/sg.h>
  42. #include <asm-generic/io-64-nonatomic-lo-hi.h>
  43. #define NVME_Q_DEPTH 1024
  44. #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
  45. #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
  46. #define NVME_MINORS 64
  47. #define ADMIN_TIMEOUT (60 * HZ)
  48. static int nvme_major;
  49. module_param(nvme_major, int, 0);
  50. static int use_threaded_interrupts;
  51. module_param(use_threaded_interrupts, int, 0);
  52. static DEFINE_SPINLOCK(dev_list_lock);
  53. static LIST_HEAD(dev_list);
  54. static struct task_struct *nvme_thread;
  55. /*
  56. * An NVM Express queue. Each device has at least two (one for admin
  57. * commands and one for I/O commands).
  58. */
  59. struct nvme_queue {
  60. struct device *q_dmadev;
  61. struct nvme_dev *dev;
  62. spinlock_t q_lock;
  63. struct nvme_command *sq_cmds;
  64. volatile struct nvme_completion *cqes;
  65. dma_addr_t sq_dma_addr;
  66. dma_addr_t cq_dma_addr;
  67. wait_queue_head_t sq_full;
  68. wait_queue_t sq_cong_wait;
  69. struct bio_list sq_cong;
  70. u32 __iomem *q_db;
  71. u16 q_depth;
  72. u16 cq_vector;
  73. u16 sq_head;
  74. u16 sq_tail;
  75. u16 cq_head;
  76. u16 cq_phase;
  77. unsigned long cmdid_data[];
  78. };
  79. /*
  80. * Check we didin't inadvertently grow the command struct
  81. */
  82. static inline void _nvme_check_size(void)
  83. {
  84. BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
  85. BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
  86. BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
  87. BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
  88. BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
  89. BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
  90. BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
  91. BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
  92. BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
  93. BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
  94. BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
  95. }
  96. typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
  97. struct nvme_completion *);
  98. struct nvme_cmd_info {
  99. nvme_completion_fn fn;
  100. void *ctx;
  101. unsigned long timeout;
  102. };
  103. static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
  104. {
  105. return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
  106. }
  107. /**
  108. * alloc_cmdid() - Allocate a Command ID
  109. * @nvmeq: The queue that will be used for this command
  110. * @ctx: A pointer that will be passed to the handler
  111. * @handler: The function to call on completion
  112. *
  113. * Allocate a Command ID for a queue. The data passed in will
  114. * be passed to the completion handler. This is implemented by using
  115. * the bottom two bits of the ctx pointer to store the handler ID.
  116. * Passing in a pointer that's not 4-byte aligned will cause a BUG.
  117. * We can change this if it becomes a problem.
  118. *
  119. * May be called with local interrupts disabled and the q_lock held,
  120. * or with interrupts enabled and no locks held.
  121. */
  122. static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
  123. nvme_completion_fn handler, unsigned timeout)
  124. {
  125. int depth = nvmeq->q_depth - 1;
  126. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  127. int cmdid;
  128. do {
  129. cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
  130. if (cmdid >= depth)
  131. return -EBUSY;
  132. } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
  133. info[cmdid].fn = handler;
  134. info[cmdid].ctx = ctx;
  135. info[cmdid].timeout = jiffies + timeout;
  136. return cmdid;
  137. }
  138. static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
  139. nvme_completion_fn handler, unsigned timeout)
  140. {
  141. int cmdid;
  142. wait_event_killable(nvmeq->sq_full,
  143. (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
  144. return (cmdid < 0) ? -EINTR : cmdid;
  145. }
  146. /* Special values must be less than 0x1000 */
  147. #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
  148. #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
  149. #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
  150. #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
  151. #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
  152. static void special_completion(struct nvme_dev *dev, void *ctx,
  153. struct nvme_completion *cqe)
  154. {
  155. if (ctx == CMD_CTX_CANCELLED)
  156. return;
  157. if (ctx == CMD_CTX_FLUSH)
  158. return;
  159. if (ctx == CMD_CTX_COMPLETED) {
  160. dev_warn(&dev->pci_dev->dev,
  161. "completed id %d twice on queue %d\n",
  162. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  163. return;
  164. }
  165. if (ctx == CMD_CTX_INVALID) {
  166. dev_warn(&dev->pci_dev->dev,
  167. "invalid id %d completed on queue %d\n",
  168. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  169. return;
  170. }
  171. dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
  172. }
  173. /*
  174. * Called with local interrupts disabled and the q_lock held. May not sleep.
  175. */
  176. static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
  177. nvme_completion_fn *fn)
  178. {
  179. void *ctx;
  180. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  181. if (cmdid >= nvmeq->q_depth) {
  182. *fn = special_completion;
  183. return CMD_CTX_INVALID;
  184. }
  185. if (fn)
  186. *fn = info[cmdid].fn;
  187. ctx = info[cmdid].ctx;
  188. info[cmdid].fn = special_completion;
  189. info[cmdid].ctx = CMD_CTX_COMPLETED;
  190. clear_bit(cmdid, nvmeq->cmdid_data);
  191. wake_up(&nvmeq->sq_full);
  192. return ctx;
  193. }
  194. static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
  195. nvme_completion_fn *fn)
  196. {
  197. void *ctx;
  198. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  199. if (fn)
  200. *fn = info[cmdid].fn;
  201. ctx = info[cmdid].ctx;
  202. info[cmdid].fn = special_completion;
  203. info[cmdid].ctx = CMD_CTX_CANCELLED;
  204. return ctx;
  205. }
  206. struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
  207. {
  208. return dev->queues[get_cpu() + 1];
  209. }
  210. void put_nvmeq(struct nvme_queue *nvmeq)
  211. {
  212. put_cpu();
  213. }
  214. /**
  215. * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  216. * @nvmeq: The queue to use
  217. * @cmd: The command to send
  218. *
  219. * Safe to use from interrupt context
  220. */
  221. static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
  222. {
  223. unsigned long flags;
  224. u16 tail;
  225. spin_lock_irqsave(&nvmeq->q_lock, flags);
  226. tail = nvmeq->sq_tail;
  227. memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
  228. if (++tail == nvmeq->q_depth)
  229. tail = 0;
  230. writel(tail, nvmeq->q_db);
  231. nvmeq->sq_tail = tail;
  232. spin_unlock_irqrestore(&nvmeq->q_lock, flags);
  233. return 0;
  234. }
  235. static __le64 **iod_list(struct nvme_iod *iod)
  236. {
  237. return ((void *)iod) + iod->offset;
  238. }
  239. /*
  240. * Will slightly overestimate the number of pages needed. This is OK
  241. * as it only leads to a small amount of wasted memory for the lifetime of
  242. * the I/O.
  243. */
  244. static int nvme_npages(unsigned size)
  245. {
  246. unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
  247. return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
  248. }
  249. static struct nvme_iod *
  250. nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
  251. {
  252. struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
  253. sizeof(__le64 *) * nvme_npages(nbytes) +
  254. sizeof(struct scatterlist) * nseg, gfp);
  255. if (iod) {
  256. iod->offset = offsetof(struct nvme_iod, sg[nseg]);
  257. iod->npages = -1;
  258. iod->length = nbytes;
  259. iod->nents = 0;
  260. }
  261. return iod;
  262. }
  263. void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
  264. {
  265. const int last_prp = PAGE_SIZE / 8 - 1;
  266. int i;
  267. __le64 **list = iod_list(iod);
  268. dma_addr_t prp_dma = iod->first_dma;
  269. if (iod->npages == 0)
  270. dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
  271. for (i = 0; i < iod->npages; i++) {
  272. __le64 *prp_list = list[i];
  273. dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
  274. dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
  275. prp_dma = next_prp_dma;
  276. }
  277. kfree(iod);
  278. }
  279. static void bio_completion(struct nvme_dev *dev, void *ctx,
  280. struct nvme_completion *cqe)
  281. {
  282. struct nvme_iod *iod = ctx;
  283. struct bio *bio = iod->private;
  284. u16 status = le16_to_cpup(&cqe->status) >> 1;
  285. if (iod->nents)
  286. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  287. bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  288. nvme_free_iod(dev, iod);
  289. if (status)
  290. bio_endio(bio, -EIO);
  291. else
  292. bio_endio(bio, 0);
  293. }
  294. /* length is in bytes. gfp flags indicates whether we may sleep. */
  295. int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
  296. struct nvme_iod *iod, int total_len, gfp_t gfp)
  297. {
  298. struct dma_pool *pool;
  299. int length = total_len;
  300. struct scatterlist *sg = iod->sg;
  301. int dma_len = sg_dma_len(sg);
  302. u64 dma_addr = sg_dma_address(sg);
  303. int offset = offset_in_page(dma_addr);
  304. __le64 *prp_list;
  305. __le64 **list = iod_list(iod);
  306. dma_addr_t prp_dma;
  307. int nprps, i;
  308. cmd->prp1 = cpu_to_le64(dma_addr);
  309. length -= (PAGE_SIZE - offset);
  310. if (length <= 0)
  311. return total_len;
  312. dma_len -= (PAGE_SIZE - offset);
  313. if (dma_len) {
  314. dma_addr += (PAGE_SIZE - offset);
  315. } else {
  316. sg = sg_next(sg);
  317. dma_addr = sg_dma_address(sg);
  318. dma_len = sg_dma_len(sg);
  319. }
  320. if (length <= PAGE_SIZE) {
  321. cmd->prp2 = cpu_to_le64(dma_addr);
  322. return total_len;
  323. }
  324. nprps = DIV_ROUND_UP(length, PAGE_SIZE);
  325. if (nprps <= (256 / 8)) {
  326. pool = dev->prp_small_pool;
  327. iod->npages = 0;
  328. } else {
  329. pool = dev->prp_page_pool;
  330. iod->npages = 1;
  331. }
  332. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  333. if (!prp_list) {
  334. cmd->prp2 = cpu_to_le64(dma_addr);
  335. iod->npages = -1;
  336. return (total_len - length) + PAGE_SIZE;
  337. }
  338. list[0] = prp_list;
  339. iod->first_dma = prp_dma;
  340. cmd->prp2 = cpu_to_le64(prp_dma);
  341. i = 0;
  342. for (;;) {
  343. if (i == PAGE_SIZE / 8) {
  344. __le64 *old_prp_list = prp_list;
  345. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  346. if (!prp_list)
  347. return total_len - length;
  348. list[iod->npages++] = prp_list;
  349. prp_list[0] = old_prp_list[i - 1];
  350. old_prp_list[i - 1] = cpu_to_le64(prp_dma);
  351. i = 1;
  352. }
  353. prp_list[i++] = cpu_to_le64(dma_addr);
  354. dma_len -= PAGE_SIZE;
  355. dma_addr += PAGE_SIZE;
  356. length -= PAGE_SIZE;
  357. if (length <= 0)
  358. break;
  359. if (dma_len > 0)
  360. continue;
  361. BUG_ON(dma_len < 0);
  362. sg = sg_next(sg);
  363. dma_addr = sg_dma_address(sg);
  364. dma_len = sg_dma_len(sg);
  365. }
  366. return total_len;
  367. }
  368. struct nvme_bio_pair {
  369. struct bio b1, b2, *parent;
  370. struct bio_vec *bv1, *bv2;
  371. int err;
  372. atomic_t cnt;
  373. };
  374. static void nvme_bio_pair_endio(struct bio *bio, int err)
  375. {
  376. struct nvme_bio_pair *bp = bio->bi_private;
  377. if (err)
  378. bp->err = err;
  379. if (atomic_dec_and_test(&bp->cnt)) {
  380. bio_endio(bp->parent, bp->err);
  381. if (bp->bv1)
  382. kfree(bp->bv1);
  383. if (bp->bv2)
  384. kfree(bp->bv2);
  385. kfree(bp);
  386. }
  387. }
  388. static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
  389. int len, int offset)
  390. {
  391. struct nvme_bio_pair *bp;
  392. BUG_ON(len > bio->bi_size);
  393. BUG_ON(idx > bio->bi_vcnt);
  394. bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
  395. if (!bp)
  396. return NULL;
  397. bp->err = 0;
  398. bp->b1 = *bio;
  399. bp->b2 = *bio;
  400. bp->b1.bi_size = len;
  401. bp->b2.bi_size -= len;
  402. bp->b1.bi_vcnt = idx;
  403. bp->b2.bi_idx = idx;
  404. bp->b2.bi_sector += len >> 9;
  405. if (offset) {
  406. bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
  407. GFP_ATOMIC);
  408. if (!bp->bv1)
  409. goto split_fail_1;
  410. bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
  411. GFP_ATOMIC);
  412. if (!bp->bv2)
  413. goto split_fail_2;
  414. memcpy(bp->bv1, bio->bi_io_vec,
  415. bio->bi_max_vecs * sizeof(struct bio_vec));
  416. memcpy(bp->bv2, bio->bi_io_vec,
  417. bio->bi_max_vecs * sizeof(struct bio_vec));
  418. bp->b1.bi_io_vec = bp->bv1;
  419. bp->b2.bi_io_vec = bp->bv2;
  420. bp->b2.bi_io_vec[idx].bv_offset += offset;
  421. bp->b2.bi_io_vec[idx].bv_len -= offset;
  422. bp->b1.bi_io_vec[idx].bv_len = offset;
  423. bp->b1.bi_vcnt++;
  424. } else
  425. bp->bv1 = bp->bv2 = NULL;
  426. bp->b1.bi_private = bp;
  427. bp->b2.bi_private = bp;
  428. bp->b1.bi_end_io = nvme_bio_pair_endio;
  429. bp->b2.bi_end_io = nvme_bio_pair_endio;
  430. bp->parent = bio;
  431. atomic_set(&bp->cnt, 2);
  432. return bp;
  433. split_fail_2:
  434. kfree(bp->bv1);
  435. split_fail_1:
  436. kfree(bp);
  437. return NULL;
  438. }
  439. static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
  440. int idx, int len, int offset)
  441. {
  442. struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
  443. if (!bp)
  444. return -ENOMEM;
  445. if (bio_list_empty(&nvmeq->sq_cong))
  446. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  447. bio_list_add(&nvmeq->sq_cong, &bp->b1);
  448. bio_list_add(&nvmeq->sq_cong, &bp->b2);
  449. return 0;
  450. }
  451. /* NVMe scatterlists require no holes in the virtual address */
  452. #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
  453. (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
  454. static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
  455. struct bio *bio, enum dma_data_direction dma_dir, int psegs)
  456. {
  457. struct bio_vec *bvec, *bvprv = NULL;
  458. struct scatterlist *sg = NULL;
  459. int i, length = 0, nsegs = 0, split_len = bio->bi_size;
  460. if (nvmeq->dev->stripe_size)
  461. split_len = nvmeq->dev->stripe_size -
  462. ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
  463. sg_init_table(iod->sg, psegs);
  464. bio_for_each_segment(bvec, bio, i) {
  465. if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
  466. sg->length += bvec->bv_len;
  467. } else {
  468. if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
  469. return nvme_split_and_submit(bio, nvmeq, i,
  470. length, 0);
  471. sg = sg ? sg + 1 : iod->sg;
  472. sg_set_page(sg, bvec->bv_page, bvec->bv_len,
  473. bvec->bv_offset);
  474. nsegs++;
  475. }
  476. if (split_len - length < bvec->bv_len)
  477. return nvme_split_and_submit(bio, nvmeq, i, split_len,
  478. split_len - length);
  479. length += bvec->bv_len;
  480. bvprv = bvec;
  481. }
  482. iod->nents = nsegs;
  483. sg_mark_end(sg);
  484. if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
  485. return -ENOMEM;
  486. BUG_ON(length != bio->bi_size);
  487. return length;
  488. }
  489. /*
  490. * We reuse the small pool to allocate the 16-byte range here as it is not
  491. * worth having a special pool for these or additional cases to handle freeing
  492. * the iod.
  493. */
  494. static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  495. struct bio *bio, struct nvme_iod *iod, int cmdid)
  496. {
  497. struct nvme_dsm_range *range;
  498. struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  499. range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
  500. &iod->first_dma);
  501. if (!range)
  502. return -ENOMEM;
  503. iod_list(iod)[0] = (__le64 *)range;
  504. iod->npages = 0;
  505. range->cattr = cpu_to_le32(0);
  506. range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
  507. range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
  508. memset(cmnd, 0, sizeof(*cmnd));
  509. cmnd->dsm.opcode = nvme_cmd_dsm;
  510. cmnd->dsm.command_id = cmdid;
  511. cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
  512. cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
  513. cmnd->dsm.nr = 0;
  514. cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
  515. if (++nvmeq->sq_tail == nvmeq->q_depth)
  516. nvmeq->sq_tail = 0;
  517. writel(nvmeq->sq_tail, nvmeq->q_db);
  518. return 0;
  519. }
  520. static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  521. int cmdid)
  522. {
  523. struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  524. memset(cmnd, 0, sizeof(*cmnd));
  525. cmnd->common.opcode = nvme_cmd_flush;
  526. cmnd->common.command_id = cmdid;
  527. cmnd->common.nsid = cpu_to_le32(ns->ns_id);
  528. if (++nvmeq->sq_tail == nvmeq->q_depth)
  529. nvmeq->sq_tail = 0;
  530. writel(nvmeq->sq_tail, nvmeq->q_db);
  531. return 0;
  532. }
  533. int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
  534. {
  535. int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
  536. special_completion, NVME_IO_TIMEOUT);
  537. if (unlikely(cmdid < 0))
  538. return cmdid;
  539. return nvme_submit_flush(nvmeq, ns, cmdid);
  540. }
  541. /*
  542. * Called with local interrupts disabled and the q_lock held. May not sleep.
  543. */
  544. static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  545. struct bio *bio)
  546. {
  547. struct nvme_command *cmnd;
  548. struct nvme_iod *iod;
  549. enum dma_data_direction dma_dir;
  550. int cmdid, length, result = -ENOMEM;
  551. u16 control;
  552. u32 dsmgmt;
  553. int psegs = bio_phys_segments(ns->queue, bio);
  554. if ((bio->bi_rw & REQ_FLUSH) && psegs) {
  555. result = nvme_submit_flush_data(nvmeq, ns);
  556. if (result)
  557. return result;
  558. }
  559. iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
  560. if (!iod)
  561. goto nomem;
  562. iod->private = bio;
  563. result = -EBUSY;
  564. cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
  565. if (unlikely(cmdid < 0))
  566. goto free_iod;
  567. if (bio->bi_rw & REQ_DISCARD) {
  568. result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
  569. if (result)
  570. goto free_cmdid;
  571. return result;
  572. }
  573. if ((bio->bi_rw & REQ_FLUSH) && !psegs)
  574. return nvme_submit_flush(nvmeq, ns, cmdid);
  575. control = 0;
  576. if (bio->bi_rw & REQ_FUA)
  577. control |= NVME_RW_FUA;
  578. if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
  579. control |= NVME_RW_LR;
  580. dsmgmt = 0;
  581. if (bio->bi_rw & REQ_RAHEAD)
  582. dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
  583. cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  584. memset(cmnd, 0, sizeof(*cmnd));
  585. if (bio_data_dir(bio)) {
  586. cmnd->rw.opcode = nvme_cmd_write;
  587. dma_dir = DMA_TO_DEVICE;
  588. } else {
  589. cmnd->rw.opcode = nvme_cmd_read;
  590. dma_dir = DMA_FROM_DEVICE;
  591. }
  592. result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
  593. if (result <= 0)
  594. goto free_cmdid;
  595. length = result;
  596. cmnd->rw.command_id = cmdid;
  597. cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
  598. length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
  599. GFP_ATOMIC);
  600. cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
  601. cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
  602. cmnd->rw.control = cpu_to_le16(control);
  603. cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  604. if (++nvmeq->sq_tail == nvmeq->q_depth)
  605. nvmeq->sq_tail = 0;
  606. writel(nvmeq->sq_tail, nvmeq->q_db);
  607. return 0;
  608. free_cmdid:
  609. free_cmdid(nvmeq, cmdid, NULL);
  610. free_iod:
  611. nvme_free_iod(nvmeq->dev, iod);
  612. nomem:
  613. return result;
  614. }
  615. static void nvme_make_request(struct request_queue *q, struct bio *bio)
  616. {
  617. struct nvme_ns *ns = q->queuedata;
  618. struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
  619. int result = -EBUSY;
  620. spin_lock_irq(&nvmeq->q_lock);
  621. if (bio_list_empty(&nvmeq->sq_cong))
  622. result = nvme_submit_bio_queue(nvmeq, ns, bio);
  623. if (unlikely(result)) {
  624. if (bio_list_empty(&nvmeq->sq_cong))
  625. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  626. bio_list_add(&nvmeq->sq_cong, bio);
  627. }
  628. spin_unlock_irq(&nvmeq->q_lock);
  629. put_nvmeq(nvmeq);
  630. }
  631. static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
  632. {
  633. u16 head, phase;
  634. head = nvmeq->cq_head;
  635. phase = nvmeq->cq_phase;
  636. for (;;) {
  637. void *ctx;
  638. nvme_completion_fn fn;
  639. struct nvme_completion cqe = nvmeq->cqes[head];
  640. if ((le16_to_cpu(cqe.status) & 1) != phase)
  641. break;
  642. nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
  643. if (++head == nvmeq->q_depth) {
  644. head = 0;
  645. phase = !phase;
  646. }
  647. ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
  648. fn(nvmeq->dev, ctx, &cqe);
  649. }
  650. /* If the controller ignores the cq head doorbell and continuously
  651. * writes to the queue, it is theoretically possible to wrap around
  652. * the queue twice and mistakenly return IRQ_NONE. Linux only
  653. * requires that 0.1% of your interrupts are handled, so this isn't
  654. * a big problem.
  655. */
  656. if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
  657. return IRQ_NONE;
  658. writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
  659. nvmeq->cq_head = head;
  660. nvmeq->cq_phase = phase;
  661. return IRQ_HANDLED;
  662. }
  663. static irqreturn_t nvme_irq(int irq, void *data)
  664. {
  665. irqreturn_t result;
  666. struct nvme_queue *nvmeq = data;
  667. spin_lock(&nvmeq->q_lock);
  668. result = nvme_process_cq(nvmeq);
  669. spin_unlock(&nvmeq->q_lock);
  670. return result;
  671. }
  672. static irqreturn_t nvme_irq_check(int irq, void *data)
  673. {
  674. struct nvme_queue *nvmeq = data;
  675. struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
  676. if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
  677. return IRQ_NONE;
  678. return IRQ_WAKE_THREAD;
  679. }
  680. static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
  681. {
  682. spin_lock_irq(&nvmeq->q_lock);
  683. cancel_cmdid(nvmeq, cmdid, NULL);
  684. spin_unlock_irq(&nvmeq->q_lock);
  685. }
  686. struct sync_cmd_info {
  687. struct task_struct *task;
  688. u32 result;
  689. int status;
  690. };
  691. static void sync_completion(struct nvme_dev *dev, void *ctx,
  692. struct nvme_completion *cqe)
  693. {
  694. struct sync_cmd_info *cmdinfo = ctx;
  695. cmdinfo->result = le32_to_cpup(&cqe->result);
  696. cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
  697. wake_up_process(cmdinfo->task);
  698. }
  699. /*
  700. * Returns 0 on success. If the result is negative, it's a Linux error code;
  701. * if the result is positive, it's an NVM Express status code
  702. */
  703. int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
  704. u32 *result, unsigned timeout)
  705. {
  706. int cmdid;
  707. struct sync_cmd_info cmdinfo;
  708. cmdinfo.task = current;
  709. cmdinfo.status = -EINTR;
  710. cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
  711. timeout);
  712. if (cmdid < 0)
  713. return cmdid;
  714. cmd->common.command_id = cmdid;
  715. set_current_state(TASK_KILLABLE);
  716. nvme_submit_cmd(nvmeq, cmd);
  717. schedule_timeout(timeout);
  718. if (cmdinfo.status == -EINTR) {
  719. nvme_abort_command(nvmeq, cmdid);
  720. return -EINTR;
  721. }
  722. if (result)
  723. *result = cmdinfo.result;
  724. return cmdinfo.status;
  725. }
  726. int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
  727. u32 *result)
  728. {
  729. return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
  730. }
  731. static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
  732. {
  733. int status;
  734. struct nvme_command c;
  735. memset(&c, 0, sizeof(c));
  736. c.delete_queue.opcode = opcode;
  737. c.delete_queue.qid = cpu_to_le16(id);
  738. status = nvme_submit_admin_cmd(dev, &c, NULL);
  739. if (status)
  740. return -EIO;
  741. return 0;
  742. }
  743. static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
  744. struct nvme_queue *nvmeq)
  745. {
  746. int status;
  747. struct nvme_command c;
  748. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
  749. memset(&c, 0, sizeof(c));
  750. c.create_cq.opcode = nvme_admin_create_cq;
  751. c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
  752. c.create_cq.cqid = cpu_to_le16(qid);
  753. c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  754. c.create_cq.cq_flags = cpu_to_le16(flags);
  755. c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
  756. status = nvme_submit_admin_cmd(dev, &c, NULL);
  757. if (status)
  758. return -EIO;
  759. return 0;
  760. }
  761. static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
  762. struct nvme_queue *nvmeq)
  763. {
  764. int status;
  765. struct nvme_command c;
  766. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
  767. memset(&c, 0, sizeof(c));
  768. c.create_sq.opcode = nvme_admin_create_sq;
  769. c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
  770. c.create_sq.sqid = cpu_to_le16(qid);
  771. c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  772. c.create_sq.sq_flags = cpu_to_le16(flags);
  773. c.create_sq.cqid = cpu_to_le16(qid);
  774. status = nvme_submit_admin_cmd(dev, &c, NULL);
  775. if (status)
  776. return -EIO;
  777. return 0;
  778. }
  779. static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
  780. {
  781. return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
  782. }
  783. static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
  784. {
  785. return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
  786. }
  787. int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
  788. dma_addr_t dma_addr)
  789. {
  790. struct nvme_command c;
  791. memset(&c, 0, sizeof(c));
  792. c.identify.opcode = nvme_admin_identify;
  793. c.identify.nsid = cpu_to_le32(nsid);
  794. c.identify.prp1 = cpu_to_le64(dma_addr);
  795. c.identify.cns = cpu_to_le32(cns);
  796. return nvme_submit_admin_cmd(dev, &c, NULL);
  797. }
  798. int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
  799. dma_addr_t dma_addr, u32 *result)
  800. {
  801. struct nvme_command c;
  802. memset(&c, 0, sizeof(c));
  803. c.features.opcode = nvme_admin_get_features;
  804. c.features.nsid = cpu_to_le32(nsid);
  805. c.features.prp1 = cpu_to_le64(dma_addr);
  806. c.features.fid = cpu_to_le32(fid);
  807. return nvme_submit_admin_cmd(dev, &c, result);
  808. }
  809. int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
  810. dma_addr_t dma_addr, u32 *result)
  811. {
  812. struct nvme_command c;
  813. memset(&c, 0, sizeof(c));
  814. c.features.opcode = nvme_admin_set_features;
  815. c.features.prp1 = cpu_to_le64(dma_addr);
  816. c.features.fid = cpu_to_le32(fid);
  817. c.features.dword11 = cpu_to_le32(dword11);
  818. return nvme_submit_admin_cmd(dev, &c, result);
  819. }
  820. /**
  821. * nvme_cancel_ios - Cancel outstanding I/Os
  822. * @queue: The queue to cancel I/Os on
  823. * @timeout: True to only cancel I/Os which have timed out
  824. */
  825. static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
  826. {
  827. int depth = nvmeq->q_depth - 1;
  828. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  829. unsigned long now = jiffies;
  830. int cmdid;
  831. for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
  832. void *ctx;
  833. nvme_completion_fn fn;
  834. static struct nvme_completion cqe = {
  835. .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
  836. };
  837. if (timeout && !time_after(now, info[cmdid].timeout))
  838. continue;
  839. dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
  840. ctx = cancel_cmdid(nvmeq, cmdid, &fn);
  841. fn(nvmeq->dev, ctx, &cqe);
  842. }
  843. }
  844. static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
  845. {
  846. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  847. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  848. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  849. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  850. kfree(nvmeq);
  851. }
  852. static void nvme_free_queue(struct nvme_dev *dev, int qid)
  853. {
  854. struct nvme_queue *nvmeq = dev->queues[qid];
  855. int vector = dev->entry[nvmeq->cq_vector].vector;
  856. spin_lock_irq(&nvmeq->q_lock);
  857. nvme_cancel_ios(nvmeq, false);
  858. while (bio_list_peek(&nvmeq->sq_cong)) {
  859. struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
  860. bio_endio(bio, -EIO);
  861. }
  862. spin_unlock_irq(&nvmeq->q_lock);
  863. irq_set_affinity_hint(vector, NULL);
  864. free_irq(vector, nvmeq);
  865. /* Don't tell the adapter to delete the admin queue */
  866. if (qid) {
  867. adapter_delete_sq(dev, qid);
  868. adapter_delete_cq(dev, qid);
  869. }
  870. nvme_free_queue_mem(nvmeq);
  871. }
  872. static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
  873. int depth, int vector)
  874. {
  875. struct device *dmadev = &dev->pci_dev->dev;
  876. unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
  877. sizeof(struct nvme_cmd_info));
  878. struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
  879. if (!nvmeq)
  880. return NULL;
  881. nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
  882. &nvmeq->cq_dma_addr, GFP_KERNEL);
  883. if (!nvmeq->cqes)
  884. goto free_nvmeq;
  885. memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
  886. nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
  887. &nvmeq->sq_dma_addr, GFP_KERNEL);
  888. if (!nvmeq->sq_cmds)
  889. goto free_cqdma;
  890. nvmeq->q_dmadev = dmadev;
  891. nvmeq->dev = dev;
  892. spin_lock_init(&nvmeq->q_lock);
  893. nvmeq->cq_head = 0;
  894. nvmeq->cq_phase = 1;
  895. init_waitqueue_head(&nvmeq->sq_full);
  896. init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
  897. bio_list_init(&nvmeq->sq_cong);
  898. nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
  899. nvmeq->q_depth = depth;
  900. nvmeq->cq_vector = vector;
  901. return nvmeq;
  902. free_cqdma:
  903. dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
  904. nvmeq->cq_dma_addr);
  905. free_nvmeq:
  906. kfree(nvmeq);
  907. return NULL;
  908. }
  909. static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
  910. const char *name)
  911. {
  912. if (use_threaded_interrupts)
  913. return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
  914. nvme_irq_check, nvme_irq,
  915. IRQF_DISABLED | IRQF_SHARED,
  916. name, nvmeq);
  917. return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
  918. IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
  919. }
  920. static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
  921. int cq_size, int vector)
  922. {
  923. int result;
  924. struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
  925. if (!nvmeq)
  926. return ERR_PTR(-ENOMEM);
  927. result = adapter_alloc_cq(dev, qid, nvmeq);
  928. if (result < 0)
  929. goto free_nvmeq;
  930. result = adapter_alloc_sq(dev, qid, nvmeq);
  931. if (result < 0)
  932. goto release_cq;
  933. result = queue_request_irq(dev, nvmeq, "nvme");
  934. if (result < 0)
  935. goto release_sq;
  936. return nvmeq;
  937. release_sq:
  938. adapter_delete_sq(dev, qid);
  939. release_cq:
  940. adapter_delete_cq(dev, qid);
  941. free_nvmeq:
  942. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  943. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  944. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  945. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  946. kfree(nvmeq);
  947. return ERR_PTR(result);
  948. }
  949. static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
  950. {
  951. unsigned long timeout;
  952. u32 bit = enabled ? NVME_CSTS_RDY : 0;
  953. timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
  954. while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
  955. msleep(100);
  956. if (fatal_signal_pending(current))
  957. return -EINTR;
  958. if (time_after(jiffies, timeout)) {
  959. dev_err(&dev->pci_dev->dev,
  960. "Device not ready; aborting initialisation\n");
  961. return -ENODEV;
  962. }
  963. }
  964. return 0;
  965. }
  966. /*
  967. * If the device has been passed off to us in an enabled state, just clear
  968. * the enabled bit. The spec says we should set the 'shutdown notification
  969. * bits', but doing so may cause the device to complete commands to the
  970. * admin queue ... and we don't know what memory that might be pointing at!
  971. */
  972. static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
  973. {
  974. u32 cc = readl(&dev->bar->cc);
  975. if (cc & NVME_CC_ENABLE)
  976. writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
  977. return nvme_wait_ready(dev, cap, false);
  978. }
  979. static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
  980. {
  981. return nvme_wait_ready(dev, cap, true);
  982. }
  983. static int nvme_configure_admin_queue(struct nvme_dev *dev)
  984. {
  985. int result;
  986. u32 aqa;
  987. u64 cap = readq(&dev->bar->cap);
  988. struct nvme_queue *nvmeq;
  989. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  990. dev->db_stride = NVME_CAP_STRIDE(cap);
  991. result = nvme_disable_ctrl(dev, cap);
  992. if (result < 0)
  993. return result;
  994. nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
  995. if (!nvmeq)
  996. return -ENOMEM;
  997. aqa = nvmeq->q_depth - 1;
  998. aqa |= aqa << 16;
  999. dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
  1000. dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
  1001. dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
  1002. dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
  1003. writel(aqa, &dev->bar->aqa);
  1004. writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
  1005. writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
  1006. writel(dev->ctrl_config, &dev->bar->cc);
  1007. result = nvme_enable_ctrl(dev, cap);
  1008. if (result)
  1009. goto free_q;
  1010. result = queue_request_irq(dev, nvmeq, "nvme admin");
  1011. if (result)
  1012. goto free_q;
  1013. dev->queues[0] = nvmeq;
  1014. return result;
  1015. free_q:
  1016. nvme_free_queue_mem(nvmeq);
  1017. return result;
  1018. }
  1019. struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
  1020. unsigned long addr, unsigned length)
  1021. {
  1022. int i, err, count, nents, offset;
  1023. struct scatterlist *sg;
  1024. struct page **pages;
  1025. struct nvme_iod *iod;
  1026. if (addr & 3)
  1027. return ERR_PTR(-EINVAL);
  1028. if (!length)
  1029. return ERR_PTR(-EINVAL);
  1030. offset = offset_in_page(addr);
  1031. count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
  1032. pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
  1033. if (!pages)
  1034. return ERR_PTR(-ENOMEM);
  1035. err = get_user_pages_fast(addr, count, 1, pages);
  1036. if (err < count) {
  1037. count = err;
  1038. err = -EFAULT;
  1039. goto put_pages;
  1040. }
  1041. iod = nvme_alloc_iod(count, length, GFP_KERNEL);
  1042. sg = iod->sg;
  1043. sg_init_table(sg, count);
  1044. for (i = 0; i < count; i++) {
  1045. sg_set_page(&sg[i], pages[i],
  1046. min_t(int, length, PAGE_SIZE - offset), offset);
  1047. length -= (PAGE_SIZE - offset);
  1048. offset = 0;
  1049. }
  1050. sg_mark_end(&sg[i - 1]);
  1051. iod->nents = count;
  1052. err = -ENOMEM;
  1053. nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
  1054. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1055. if (!nents)
  1056. goto free_iod;
  1057. kfree(pages);
  1058. return iod;
  1059. free_iod:
  1060. kfree(iod);
  1061. put_pages:
  1062. for (i = 0; i < count; i++)
  1063. put_page(pages[i]);
  1064. kfree(pages);
  1065. return ERR_PTR(err);
  1066. }
  1067. void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
  1068. struct nvme_iod *iod)
  1069. {
  1070. int i;
  1071. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  1072. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1073. for (i = 0; i < iod->nents; i++)
  1074. put_page(sg_page(&iod->sg[i]));
  1075. }
  1076. static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  1077. {
  1078. struct nvme_dev *dev = ns->dev;
  1079. struct nvme_queue *nvmeq;
  1080. struct nvme_user_io io;
  1081. struct nvme_command c;
  1082. unsigned length, meta_len;
  1083. int status, i;
  1084. struct nvme_iod *iod, *meta_iod = NULL;
  1085. dma_addr_t meta_dma_addr;
  1086. void *meta, *uninitialized_var(meta_mem);
  1087. if (copy_from_user(&io, uio, sizeof(io)))
  1088. return -EFAULT;
  1089. length = (io.nblocks + 1) << ns->lba_shift;
  1090. meta_len = (io.nblocks + 1) * ns->ms;
  1091. if (meta_len && ((io.metadata & 3) || !io.metadata))
  1092. return -EINVAL;
  1093. switch (io.opcode) {
  1094. case nvme_cmd_write:
  1095. case nvme_cmd_read:
  1096. case nvme_cmd_compare:
  1097. iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
  1098. break;
  1099. default:
  1100. return -EINVAL;
  1101. }
  1102. if (IS_ERR(iod))
  1103. return PTR_ERR(iod);
  1104. memset(&c, 0, sizeof(c));
  1105. c.rw.opcode = io.opcode;
  1106. c.rw.flags = io.flags;
  1107. c.rw.nsid = cpu_to_le32(ns->ns_id);
  1108. c.rw.slba = cpu_to_le64(io.slba);
  1109. c.rw.length = cpu_to_le16(io.nblocks);
  1110. c.rw.control = cpu_to_le16(io.control);
  1111. c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
  1112. c.rw.reftag = cpu_to_le32(io.reftag);
  1113. c.rw.apptag = cpu_to_le16(io.apptag);
  1114. c.rw.appmask = cpu_to_le16(io.appmask);
  1115. if (meta_len) {
  1116. meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
  1117. if (IS_ERR(meta_iod)) {
  1118. status = PTR_ERR(meta_iod);
  1119. meta_iod = NULL;
  1120. goto unmap;
  1121. }
  1122. meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
  1123. &meta_dma_addr, GFP_KERNEL);
  1124. if (!meta_mem) {
  1125. status = -ENOMEM;
  1126. goto unmap;
  1127. }
  1128. if (io.opcode & 1) {
  1129. int meta_offset = 0;
  1130. for (i = 0; i < meta_iod->nents; i++) {
  1131. meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
  1132. meta_iod->sg[i].offset;
  1133. memcpy(meta_mem + meta_offset, meta,
  1134. meta_iod->sg[i].length);
  1135. kunmap_atomic(meta);
  1136. meta_offset += meta_iod->sg[i].length;
  1137. }
  1138. }
  1139. c.rw.metadata = cpu_to_le64(meta_dma_addr);
  1140. }
  1141. length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
  1142. nvmeq = get_nvmeq(dev);
  1143. /*
  1144. * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
  1145. * disabled. We may be preempted at any point, and be rescheduled
  1146. * to a different CPU. That will cause cacheline bouncing, but no
  1147. * additional races since q_lock already protects against other CPUs.
  1148. */
  1149. put_nvmeq(nvmeq);
  1150. if (length != (io.nblocks + 1) << ns->lba_shift)
  1151. status = -ENOMEM;
  1152. else
  1153. status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
  1154. if (meta_len) {
  1155. if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
  1156. int meta_offset = 0;
  1157. for (i = 0; i < meta_iod->nents; i++) {
  1158. meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
  1159. meta_iod->sg[i].offset;
  1160. memcpy(meta, meta_mem + meta_offset,
  1161. meta_iod->sg[i].length);
  1162. kunmap_atomic(meta);
  1163. meta_offset += meta_iod->sg[i].length;
  1164. }
  1165. }
  1166. dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
  1167. meta_dma_addr);
  1168. }
  1169. unmap:
  1170. nvme_unmap_user_pages(dev, io.opcode & 1, iod);
  1171. nvme_free_iod(dev, iod);
  1172. if (meta_iod) {
  1173. nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
  1174. nvme_free_iod(dev, meta_iod);
  1175. }
  1176. return status;
  1177. }
  1178. static int nvme_user_admin_cmd(struct nvme_dev *dev,
  1179. struct nvme_admin_cmd __user *ucmd)
  1180. {
  1181. struct nvme_admin_cmd cmd;
  1182. struct nvme_command c;
  1183. int status, length;
  1184. struct nvme_iod *uninitialized_var(iod);
  1185. unsigned timeout;
  1186. if (!capable(CAP_SYS_ADMIN))
  1187. return -EACCES;
  1188. if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
  1189. return -EFAULT;
  1190. memset(&c, 0, sizeof(c));
  1191. c.common.opcode = cmd.opcode;
  1192. c.common.flags = cmd.flags;
  1193. c.common.nsid = cpu_to_le32(cmd.nsid);
  1194. c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
  1195. c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
  1196. c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
  1197. c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
  1198. c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
  1199. c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
  1200. c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
  1201. c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
  1202. length = cmd.data_len;
  1203. if (cmd.data_len) {
  1204. iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
  1205. length);
  1206. if (IS_ERR(iod))
  1207. return PTR_ERR(iod);
  1208. length = nvme_setup_prps(dev, &c.common, iod, length,
  1209. GFP_KERNEL);
  1210. }
  1211. timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
  1212. ADMIN_TIMEOUT;
  1213. if (length != cmd.data_len)
  1214. status = -ENOMEM;
  1215. else
  1216. status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
  1217. timeout);
  1218. if (cmd.data_len) {
  1219. nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
  1220. nvme_free_iod(dev, iod);
  1221. }
  1222. if (!status && copy_to_user(&ucmd->result, &cmd.result,
  1223. sizeof(cmd.result)))
  1224. status = -EFAULT;
  1225. return status;
  1226. }
  1227. static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
  1228. unsigned long arg)
  1229. {
  1230. struct nvme_ns *ns = bdev->bd_disk->private_data;
  1231. switch (cmd) {
  1232. case NVME_IOCTL_ID:
  1233. return ns->ns_id;
  1234. case NVME_IOCTL_ADMIN_CMD:
  1235. return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
  1236. case NVME_IOCTL_SUBMIT_IO:
  1237. return nvme_submit_io(ns, (void __user *)arg);
  1238. case SG_GET_VERSION_NUM:
  1239. return nvme_sg_get_version_num((void __user *)arg);
  1240. case SG_IO:
  1241. return nvme_sg_io(ns, (void __user *)arg);
  1242. default:
  1243. return -ENOTTY;
  1244. }
  1245. }
  1246. static const struct block_device_operations nvme_fops = {
  1247. .owner = THIS_MODULE,
  1248. .ioctl = nvme_ioctl,
  1249. .compat_ioctl = nvme_ioctl,
  1250. };
  1251. static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
  1252. {
  1253. while (bio_list_peek(&nvmeq->sq_cong)) {
  1254. struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
  1255. struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
  1256. if (bio_list_empty(&nvmeq->sq_cong))
  1257. remove_wait_queue(&nvmeq->sq_full,
  1258. &nvmeq->sq_cong_wait);
  1259. if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
  1260. if (bio_list_empty(&nvmeq->sq_cong))
  1261. add_wait_queue(&nvmeq->sq_full,
  1262. &nvmeq->sq_cong_wait);
  1263. bio_list_add_head(&nvmeq->sq_cong, bio);
  1264. break;
  1265. }
  1266. }
  1267. }
  1268. static int nvme_kthread(void *data)
  1269. {
  1270. struct nvme_dev *dev;
  1271. while (!kthread_should_stop()) {
  1272. set_current_state(TASK_INTERRUPTIBLE);
  1273. spin_lock(&dev_list_lock);
  1274. list_for_each_entry(dev, &dev_list, node) {
  1275. int i;
  1276. for (i = 0; i < dev->queue_count; i++) {
  1277. struct nvme_queue *nvmeq = dev->queues[i];
  1278. if (!nvmeq)
  1279. continue;
  1280. spin_lock_irq(&nvmeq->q_lock);
  1281. if (nvme_process_cq(nvmeq))
  1282. printk("process_cq did something\n");
  1283. nvme_cancel_ios(nvmeq, true);
  1284. nvme_resubmit_bios(nvmeq);
  1285. spin_unlock_irq(&nvmeq->q_lock);
  1286. }
  1287. }
  1288. spin_unlock(&dev_list_lock);
  1289. schedule_timeout(round_jiffies_relative(HZ));
  1290. }
  1291. return 0;
  1292. }
  1293. static DEFINE_IDA(nvme_index_ida);
  1294. static int nvme_get_ns_idx(void)
  1295. {
  1296. int index, error;
  1297. do {
  1298. if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
  1299. return -1;
  1300. spin_lock(&dev_list_lock);
  1301. error = ida_get_new(&nvme_index_ida, &index);
  1302. spin_unlock(&dev_list_lock);
  1303. } while (error == -EAGAIN);
  1304. if (error)
  1305. index = -1;
  1306. return index;
  1307. }
  1308. static void nvme_put_ns_idx(int index)
  1309. {
  1310. spin_lock(&dev_list_lock);
  1311. ida_remove(&nvme_index_ida, index);
  1312. spin_unlock(&dev_list_lock);
  1313. }
  1314. static void nvme_config_discard(struct nvme_ns *ns)
  1315. {
  1316. u32 logical_block_size = queue_logical_block_size(ns->queue);
  1317. ns->queue->limits.discard_zeroes_data = 0;
  1318. ns->queue->limits.discard_alignment = logical_block_size;
  1319. ns->queue->limits.discard_granularity = logical_block_size;
  1320. ns->queue->limits.max_discard_sectors = 0xffffffff;
  1321. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
  1322. }
  1323. static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
  1324. struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
  1325. {
  1326. struct nvme_ns *ns;
  1327. struct gendisk *disk;
  1328. int lbaf;
  1329. if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
  1330. return NULL;
  1331. ns = kzalloc(sizeof(*ns), GFP_KERNEL);
  1332. if (!ns)
  1333. return NULL;
  1334. ns->queue = blk_alloc_queue(GFP_KERNEL);
  1335. if (!ns->queue)
  1336. goto out_free_ns;
  1337. ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
  1338. queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
  1339. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
  1340. blk_queue_make_request(ns->queue, nvme_make_request);
  1341. ns->dev = dev;
  1342. ns->queue->queuedata = ns;
  1343. disk = alloc_disk(NVME_MINORS);
  1344. if (!disk)
  1345. goto out_free_queue;
  1346. ns->ns_id = nsid;
  1347. ns->disk = disk;
  1348. lbaf = id->flbas & 0xf;
  1349. ns->lba_shift = id->lbaf[lbaf].ds;
  1350. ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
  1351. blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
  1352. if (dev->max_hw_sectors)
  1353. blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
  1354. disk->major = nvme_major;
  1355. disk->minors = NVME_MINORS;
  1356. disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
  1357. disk->fops = &nvme_fops;
  1358. disk->private_data = ns;
  1359. disk->queue = ns->queue;
  1360. disk->driverfs_dev = &dev->pci_dev->dev;
  1361. sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
  1362. set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
  1363. if (dev->oncs & NVME_CTRL_ONCS_DSM)
  1364. nvme_config_discard(ns);
  1365. return ns;
  1366. out_free_queue:
  1367. blk_cleanup_queue(ns->queue);
  1368. out_free_ns:
  1369. kfree(ns);
  1370. return NULL;
  1371. }
  1372. static void nvme_ns_free(struct nvme_ns *ns)
  1373. {
  1374. int index = ns->disk->first_minor / NVME_MINORS;
  1375. put_disk(ns->disk);
  1376. nvme_put_ns_idx(index);
  1377. blk_cleanup_queue(ns->queue);
  1378. kfree(ns);
  1379. }
  1380. static int set_queue_count(struct nvme_dev *dev, int count)
  1381. {
  1382. int status;
  1383. u32 result;
  1384. u32 q_count = (count - 1) | ((count - 1) << 16);
  1385. status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
  1386. &result);
  1387. if (status)
  1388. return -EIO;
  1389. return min(result & 0xffff, result >> 16) + 1;
  1390. }
  1391. static int nvme_setup_io_queues(struct nvme_dev *dev)
  1392. {
  1393. int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
  1394. nr_io_queues = num_online_cpus();
  1395. result = set_queue_count(dev, nr_io_queues);
  1396. if (result < 0)
  1397. return result;
  1398. if (result < nr_io_queues)
  1399. nr_io_queues = result;
  1400. /* Deregister the admin queue's interrupt */
  1401. free_irq(dev->entry[0].vector, dev->queues[0]);
  1402. db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
  1403. if (db_bar_size > 8192) {
  1404. iounmap(dev->bar);
  1405. dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
  1406. db_bar_size);
  1407. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  1408. dev->queues[0]->q_db = dev->dbs;
  1409. }
  1410. for (i = 0; i < nr_io_queues; i++)
  1411. dev->entry[i].entry = i;
  1412. for (;;) {
  1413. result = pci_enable_msix(dev->pci_dev, dev->entry,
  1414. nr_io_queues);
  1415. if (result == 0) {
  1416. break;
  1417. } else if (result > 0) {
  1418. nr_io_queues = result;
  1419. continue;
  1420. } else {
  1421. nr_io_queues = 1;
  1422. break;
  1423. }
  1424. }
  1425. result = queue_request_irq(dev, dev->queues[0], "nvme admin");
  1426. /* XXX: handle failure here */
  1427. cpu = cpumask_first(cpu_online_mask);
  1428. for (i = 0; i < nr_io_queues; i++) {
  1429. irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
  1430. cpu = cpumask_next(cpu, cpu_online_mask);
  1431. }
  1432. q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
  1433. NVME_Q_DEPTH);
  1434. for (i = 0; i < nr_io_queues; i++) {
  1435. dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
  1436. if (IS_ERR(dev->queues[i + 1]))
  1437. return PTR_ERR(dev->queues[i + 1]);
  1438. dev->queue_count++;
  1439. }
  1440. for (; i < num_possible_cpus(); i++) {
  1441. int target = i % rounddown_pow_of_two(dev->queue_count - 1);
  1442. dev->queues[i + 1] = dev->queues[target + 1];
  1443. }
  1444. return 0;
  1445. }
  1446. static void nvme_free_queues(struct nvme_dev *dev)
  1447. {
  1448. int i;
  1449. for (i = dev->queue_count - 1; i >= 0; i--)
  1450. nvme_free_queue(dev, i);
  1451. }
  1452. /*
  1453. * Return: error value if an error occurred setting up the queues or calling
  1454. * Identify Device. 0 if these succeeded, even if adding some of the
  1455. * namespaces failed. At the moment, these failures are silent. TBD which
  1456. * failures should be reported.
  1457. */
  1458. static int nvme_dev_add(struct nvme_dev *dev)
  1459. {
  1460. int res, nn, i;
  1461. struct nvme_ns *ns;
  1462. struct nvme_id_ctrl *ctrl;
  1463. struct nvme_id_ns *id_ns;
  1464. void *mem;
  1465. dma_addr_t dma_addr;
  1466. int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
  1467. res = nvme_setup_io_queues(dev);
  1468. if (res)
  1469. return res;
  1470. mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
  1471. GFP_KERNEL);
  1472. if (!mem)
  1473. return -ENOMEM;
  1474. res = nvme_identify(dev, 0, 1, dma_addr);
  1475. if (res) {
  1476. res = -EIO;
  1477. goto out;
  1478. }
  1479. ctrl = mem;
  1480. nn = le32_to_cpup(&ctrl->nn);
  1481. dev->oncs = le16_to_cpup(&ctrl->oncs);
  1482. memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
  1483. memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
  1484. memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
  1485. if (ctrl->mdts)
  1486. dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
  1487. if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
  1488. (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
  1489. dev->stripe_size = 1 << (ctrl->vs[3] + shift);
  1490. id_ns = mem;
  1491. for (i = 1; i <= nn; i++) {
  1492. res = nvme_identify(dev, i, 0, dma_addr);
  1493. if (res)
  1494. continue;
  1495. if (id_ns->ncap == 0)
  1496. continue;
  1497. res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
  1498. dma_addr + 4096, NULL);
  1499. if (res)
  1500. memset(mem + 4096, 0, 4096);
  1501. ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
  1502. if (ns)
  1503. list_add_tail(&ns->list, &dev->namespaces);
  1504. }
  1505. list_for_each_entry(ns, &dev->namespaces, list)
  1506. add_disk(ns->disk);
  1507. res = 0;
  1508. out:
  1509. dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
  1510. return res;
  1511. }
  1512. static int nvme_dev_remove(struct nvme_dev *dev)
  1513. {
  1514. struct nvme_ns *ns, *next;
  1515. spin_lock(&dev_list_lock);
  1516. list_del(&dev->node);
  1517. spin_unlock(&dev_list_lock);
  1518. list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
  1519. list_del(&ns->list);
  1520. del_gendisk(ns->disk);
  1521. nvme_ns_free(ns);
  1522. }
  1523. nvme_free_queues(dev);
  1524. return 0;
  1525. }
  1526. static int nvme_setup_prp_pools(struct nvme_dev *dev)
  1527. {
  1528. struct device *dmadev = &dev->pci_dev->dev;
  1529. dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
  1530. PAGE_SIZE, PAGE_SIZE, 0);
  1531. if (!dev->prp_page_pool)
  1532. return -ENOMEM;
  1533. /* Optimisation for I/Os between 4k and 128k */
  1534. dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
  1535. 256, 256, 0);
  1536. if (!dev->prp_small_pool) {
  1537. dma_pool_destroy(dev->prp_page_pool);
  1538. return -ENOMEM;
  1539. }
  1540. return 0;
  1541. }
  1542. static void nvme_release_prp_pools(struct nvme_dev *dev)
  1543. {
  1544. dma_pool_destroy(dev->prp_page_pool);
  1545. dma_pool_destroy(dev->prp_small_pool);
  1546. }
  1547. static DEFINE_IDA(nvme_instance_ida);
  1548. static int nvme_set_instance(struct nvme_dev *dev)
  1549. {
  1550. int instance, error;
  1551. do {
  1552. if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
  1553. return -ENODEV;
  1554. spin_lock(&dev_list_lock);
  1555. error = ida_get_new(&nvme_instance_ida, &instance);
  1556. spin_unlock(&dev_list_lock);
  1557. } while (error == -EAGAIN);
  1558. if (error)
  1559. return -ENODEV;
  1560. dev->instance = instance;
  1561. return 0;
  1562. }
  1563. static void nvme_release_instance(struct nvme_dev *dev)
  1564. {
  1565. spin_lock(&dev_list_lock);
  1566. ida_remove(&nvme_instance_ida, dev->instance);
  1567. spin_unlock(&dev_list_lock);
  1568. }
  1569. static void nvme_free_dev(struct kref *kref)
  1570. {
  1571. struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
  1572. nvme_dev_remove(dev);
  1573. pci_disable_msix(dev->pci_dev);
  1574. iounmap(dev->bar);
  1575. nvme_release_instance(dev);
  1576. nvme_release_prp_pools(dev);
  1577. pci_disable_device(dev->pci_dev);
  1578. pci_release_regions(dev->pci_dev);
  1579. kfree(dev->queues);
  1580. kfree(dev->entry);
  1581. kfree(dev);
  1582. }
  1583. static int nvme_dev_open(struct inode *inode, struct file *f)
  1584. {
  1585. struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
  1586. miscdev);
  1587. kref_get(&dev->kref);
  1588. f->private_data = dev;
  1589. return 0;
  1590. }
  1591. static int nvme_dev_release(struct inode *inode, struct file *f)
  1592. {
  1593. struct nvme_dev *dev = f->private_data;
  1594. kref_put(&dev->kref, nvme_free_dev);
  1595. return 0;
  1596. }
  1597. static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
  1598. {
  1599. struct nvme_dev *dev = f->private_data;
  1600. switch (cmd) {
  1601. case NVME_IOCTL_ADMIN_CMD:
  1602. return nvme_user_admin_cmd(dev, (void __user *)arg);
  1603. default:
  1604. return -ENOTTY;
  1605. }
  1606. }
  1607. static const struct file_operations nvme_dev_fops = {
  1608. .owner = THIS_MODULE,
  1609. .open = nvme_dev_open,
  1610. .release = nvme_dev_release,
  1611. .unlocked_ioctl = nvme_dev_ioctl,
  1612. .compat_ioctl = nvme_dev_ioctl,
  1613. };
  1614. static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1615. {
  1616. int bars, result = -ENOMEM;
  1617. struct nvme_dev *dev;
  1618. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1619. if (!dev)
  1620. return -ENOMEM;
  1621. dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
  1622. GFP_KERNEL);
  1623. if (!dev->entry)
  1624. goto free;
  1625. dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
  1626. GFP_KERNEL);
  1627. if (!dev->queues)
  1628. goto free;
  1629. if (pci_enable_device_mem(pdev))
  1630. goto free;
  1631. pci_set_master(pdev);
  1632. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1633. if (pci_request_selected_regions(pdev, bars, "nvme"))
  1634. goto disable;
  1635. INIT_LIST_HEAD(&dev->namespaces);
  1636. dev->pci_dev = pdev;
  1637. pci_set_drvdata(pdev, dev);
  1638. dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1639. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  1640. result = nvme_set_instance(dev);
  1641. if (result)
  1642. goto disable;
  1643. dev->entry[0].vector = pdev->irq;
  1644. result = nvme_setup_prp_pools(dev);
  1645. if (result)
  1646. goto disable_msix;
  1647. dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
  1648. if (!dev->bar) {
  1649. result = -ENOMEM;
  1650. goto disable_msix;
  1651. }
  1652. result = nvme_configure_admin_queue(dev);
  1653. if (result)
  1654. goto unmap;
  1655. dev->queue_count++;
  1656. spin_lock(&dev_list_lock);
  1657. list_add(&dev->node, &dev_list);
  1658. spin_unlock(&dev_list_lock);
  1659. result = nvme_dev_add(dev);
  1660. if (result)
  1661. goto delete;
  1662. scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
  1663. dev->miscdev.minor = MISC_DYNAMIC_MINOR;
  1664. dev->miscdev.parent = &pdev->dev;
  1665. dev->miscdev.name = dev->name;
  1666. dev->miscdev.fops = &nvme_dev_fops;
  1667. result = misc_register(&dev->miscdev);
  1668. if (result)
  1669. goto remove;
  1670. kref_init(&dev->kref);
  1671. return 0;
  1672. remove:
  1673. nvme_dev_remove(dev);
  1674. delete:
  1675. spin_lock(&dev_list_lock);
  1676. list_del(&dev->node);
  1677. spin_unlock(&dev_list_lock);
  1678. nvme_free_queues(dev);
  1679. unmap:
  1680. iounmap(dev->bar);
  1681. disable_msix:
  1682. pci_disable_msix(pdev);
  1683. nvme_release_instance(dev);
  1684. nvme_release_prp_pools(dev);
  1685. disable:
  1686. pci_disable_device(pdev);
  1687. pci_release_regions(pdev);
  1688. free:
  1689. kfree(dev->queues);
  1690. kfree(dev->entry);
  1691. kfree(dev);
  1692. return result;
  1693. }
  1694. static void nvme_remove(struct pci_dev *pdev)
  1695. {
  1696. struct nvme_dev *dev = pci_get_drvdata(pdev);
  1697. misc_deregister(&dev->miscdev);
  1698. kref_put(&dev->kref, nvme_free_dev);
  1699. }
  1700. /* These functions are yet to be implemented */
  1701. #define nvme_error_detected NULL
  1702. #define nvme_dump_registers NULL
  1703. #define nvme_link_reset NULL
  1704. #define nvme_slot_reset NULL
  1705. #define nvme_error_resume NULL
  1706. #define nvme_suspend NULL
  1707. #define nvme_resume NULL
  1708. static const struct pci_error_handlers nvme_err_handler = {
  1709. .error_detected = nvme_error_detected,
  1710. .mmio_enabled = nvme_dump_registers,
  1711. .link_reset = nvme_link_reset,
  1712. .slot_reset = nvme_slot_reset,
  1713. .resume = nvme_error_resume,
  1714. };
  1715. /* Move to pci_ids.h later */
  1716. #define PCI_CLASS_STORAGE_EXPRESS 0x010802
  1717. static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
  1718. { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
  1719. { 0, }
  1720. };
  1721. MODULE_DEVICE_TABLE(pci, nvme_id_table);
  1722. static struct pci_driver nvme_driver = {
  1723. .name = "nvme",
  1724. .id_table = nvme_id_table,
  1725. .probe = nvme_probe,
  1726. .remove = nvme_remove,
  1727. .suspend = nvme_suspend,
  1728. .resume = nvme_resume,
  1729. .err_handler = &nvme_err_handler,
  1730. };
  1731. static int __init nvme_init(void)
  1732. {
  1733. int result;
  1734. nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
  1735. if (IS_ERR(nvme_thread))
  1736. return PTR_ERR(nvme_thread);
  1737. result = register_blkdev(nvme_major, "nvme");
  1738. if (result < 0)
  1739. goto kill_kthread;
  1740. else if (result > 0)
  1741. nvme_major = result;
  1742. result = pci_register_driver(&nvme_driver);
  1743. if (result)
  1744. goto unregister_blkdev;
  1745. return 0;
  1746. unregister_blkdev:
  1747. unregister_blkdev(nvme_major, "nvme");
  1748. kill_kthread:
  1749. kthread_stop(nvme_thread);
  1750. return result;
  1751. }
  1752. static void __exit nvme_exit(void)
  1753. {
  1754. pci_unregister_driver(&nvme_driver);
  1755. unregister_blkdev(nvme_major, "nvme");
  1756. kthread_stop(nvme_thread);
  1757. }
  1758. MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
  1759. MODULE_LICENSE("GPL");
  1760. MODULE_VERSION("0.8");
  1761. module_init(nvme_init);
  1762. module_exit(nvme_exit);