nvme.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731
  1. /*
  2. * NVM Express device driver
  3. * Copyright (c) 2011, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/nvme.h>
  19. #include <linux/bio.h>
  20. #include <linux/bitops.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/fs.h>
  25. #include <linux/genhd.h>
  26. #include <linux/idr.h>
  27. #include <linux/init.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/kdev_t.h>
  31. #include <linux/kthread.h>
  32. #include <linux/kernel.h>
  33. #include <linux/mm.h>
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/pci.h>
  37. #include <linux/poison.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/types.h>
  41. #include <linux/version.h>
  42. #define NVME_Q_DEPTH 1024
  43. #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
  44. #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
  45. #define NVME_MINORS 64
  46. #define NVME_IO_TIMEOUT (5 * HZ)
  47. #define ADMIN_TIMEOUT (60 * HZ)
  48. static int nvme_major;
  49. module_param(nvme_major, int, 0);
  50. static int use_threaded_interrupts;
  51. module_param(use_threaded_interrupts, int, 0);
  52. static DEFINE_SPINLOCK(dev_list_lock);
  53. static LIST_HEAD(dev_list);
  54. static struct task_struct *nvme_thread;
  55. /*
  56. * Represents an NVM Express device. Each nvme_dev is a PCI function.
  57. */
  58. struct nvme_dev {
  59. struct list_head node;
  60. struct nvme_queue **queues;
  61. u32 __iomem *dbs;
  62. struct pci_dev *pci_dev;
  63. struct dma_pool *prp_page_pool;
  64. struct dma_pool *prp_small_pool;
  65. int instance;
  66. int queue_count;
  67. int db_stride;
  68. u32 ctrl_config;
  69. struct msix_entry *entry;
  70. struct nvme_bar __iomem *bar;
  71. struct list_head namespaces;
  72. char serial[20];
  73. char model[40];
  74. char firmware_rev[8];
  75. };
  76. /*
  77. * An NVM Express namespace is equivalent to a SCSI LUN
  78. */
  79. struct nvme_ns {
  80. struct list_head list;
  81. struct nvme_dev *dev;
  82. struct request_queue *queue;
  83. struct gendisk *disk;
  84. int ns_id;
  85. int lba_shift;
  86. };
  87. /*
  88. * An NVM Express queue. Each device has at least two (one for admin
  89. * commands and one for I/O commands).
  90. */
  91. struct nvme_queue {
  92. struct device *q_dmadev;
  93. struct nvme_dev *dev;
  94. spinlock_t q_lock;
  95. struct nvme_command *sq_cmds;
  96. volatile struct nvme_completion *cqes;
  97. dma_addr_t sq_dma_addr;
  98. dma_addr_t cq_dma_addr;
  99. wait_queue_head_t sq_full;
  100. wait_queue_t sq_cong_wait;
  101. struct bio_list sq_cong;
  102. u32 __iomem *q_db;
  103. u16 q_depth;
  104. u16 cq_vector;
  105. u16 sq_head;
  106. u16 sq_tail;
  107. u16 cq_head;
  108. u16 cq_phase;
  109. unsigned long cmdid_data[];
  110. };
  111. /*
  112. * Check we didin't inadvertently grow the command struct
  113. */
  114. static inline void _nvme_check_size(void)
  115. {
  116. BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
  117. BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
  118. BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
  119. BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
  120. BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
  121. BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
  122. BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
  123. BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
  124. BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
  125. }
  126. typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
  127. struct nvme_completion *);
  128. struct nvme_cmd_info {
  129. nvme_completion_fn fn;
  130. void *ctx;
  131. unsigned long timeout;
  132. };
  133. static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
  134. {
  135. return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
  136. }
  137. /**
  138. * alloc_cmdid() - Allocate a Command ID
  139. * @nvmeq: The queue that will be used for this command
  140. * @ctx: A pointer that will be passed to the handler
  141. * @handler: The function to call on completion
  142. *
  143. * Allocate a Command ID for a queue. The data passed in will
  144. * be passed to the completion handler. This is implemented by using
  145. * the bottom two bits of the ctx pointer to store the handler ID.
  146. * Passing in a pointer that's not 4-byte aligned will cause a BUG.
  147. * We can change this if it becomes a problem.
  148. *
  149. * May be called with local interrupts disabled and the q_lock held,
  150. * or with interrupts enabled and no locks held.
  151. */
  152. static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
  153. nvme_completion_fn handler, unsigned timeout)
  154. {
  155. int depth = nvmeq->q_depth - 1;
  156. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  157. int cmdid;
  158. do {
  159. cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
  160. if (cmdid >= depth)
  161. return -EBUSY;
  162. } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
  163. info[cmdid].fn = handler;
  164. info[cmdid].ctx = ctx;
  165. info[cmdid].timeout = jiffies + timeout;
  166. return cmdid;
  167. }
  168. static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
  169. nvme_completion_fn handler, unsigned timeout)
  170. {
  171. int cmdid;
  172. wait_event_killable(nvmeq->sq_full,
  173. (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
  174. return (cmdid < 0) ? -EINTR : cmdid;
  175. }
  176. /* Special values must be less than 0x1000 */
  177. #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
  178. #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
  179. #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
  180. #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
  181. #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
  182. static void special_completion(struct nvme_dev *dev, void *ctx,
  183. struct nvme_completion *cqe)
  184. {
  185. if (ctx == CMD_CTX_CANCELLED)
  186. return;
  187. if (ctx == CMD_CTX_FLUSH)
  188. return;
  189. if (ctx == CMD_CTX_COMPLETED) {
  190. dev_warn(&dev->pci_dev->dev,
  191. "completed id %d twice on queue %d\n",
  192. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  193. return;
  194. }
  195. if (ctx == CMD_CTX_INVALID) {
  196. dev_warn(&dev->pci_dev->dev,
  197. "invalid id %d completed on queue %d\n",
  198. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  199. return;
  200. }
  201. dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
  202. }
  203. /*
  204. * Called with local interrupts disabled and the q_lock held. May not sleep.
  205. */
  206. static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
  207. nvme_completion_fn *fn)
  208. {
  209. void *ctx;
  210. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  211. if (cmdid >= nvmeq->q_depth) {
  212. *fn = special_completion;
  213. return CMD_CTX_INVALID;
  214. }
  215. *fn = info[cmdid].fn;
  216. ctx = info[cmdid].ctx;
  217. info[cmdid].fn = special_completion;
  218. info[cmdid].ctx = CMD_CTX_COMPLETED;
  219. clear_bit(cmdid, nvmeq->cmdid_data);
  220. wake_up(&nvmeq->sq_full);
  221. return ctx;
  222. }
  223. static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
  224. nvme_completion_fn *fn)
  225. {
  226. void *ctx;
  227. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  228. if (fn)
  229. *fn = info[cmdid].fn;
  230. ctx = info[cmdid].ctx;
  231. info[cmdid].fn = special_completion;
  232. info[cmdid].ctx = CMD_CTX_CANCELLED;
  233. return ctx;
  234. }
  235. static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
  236. {
  237. return dev->queues[get_cpu() + 1];
  238. }
  239. static void put_nvmeq(struct nvme_queue *nvmeq)
  240. {
  241. put_cpu();
  242. }
  243. /**
  244. * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  245. * @nvmeq: The queue to use
  246. * @cmd: The command to send
  247. *
  248. * Safe to use from interrupt context
  249. */
  250. static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
  251. {
  252. unsigned long flags;
  253. u16 tail;
  254. spin_lock_irqsave(&nvmeq->q_lock, flags);
  255. tail = nvmeq->sq_tail;
  256. memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
  257. if (++tail == nvmeq->q_depth)
  258. tail = 0;
  259. writel(tail, nvmeq->q_db);
  260. nvmeq->sq_tail = tail;
  261. spin_unlock_irqrestore(&nvmeq->q_lock, flags);
  262. return 0;
  263. }
  264. /*
  265. * The nvme_iod describes the data in an I/O, including the list of PRP
  266. * entries. You can't see it in this data structure because C doesn't let
  267. * me express that. Use nvme_alloc_iod to ensure there's enough space
  268. * allocated to store the PRP list.
  269. */
  270. struct nvme_iod {
  271. void *private; /* For the use of the submitter of the I/O */
  272. int npages; /* In the PRP list. 0 means small pool in use */
  273. int offset; /* Of PRP list */
  274. int nents; /* Used in scatterlist */
  275. int length; /* Of data, in bytes */
  276. dma_addr_t first_dma;
  277. struct scatterlist sg[0];
  278. };
  279. static __le64 **iod_list(struct nvme_iod *iod)
  280. {
  281. return ((void *)iod) + iod->offset;
  282. }
  283. /*
  284. * Will slightly overestimate the number of pages needed. This is OK
  285. * as it only leads to a small amount of wasted memory for the lifetime of
  286. * the I/O.
  287. */
  288. static int nvme_npages(unsigned size)
  289. {
  290. unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
  291. return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
  292. }
  293. static struct nvme_iod *
  294. nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
  295. {
  296. struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
  297. sizeof(__le64 *) * nvme_npages(nbytes) +
  298. sizeof(struct scatterlist) * nseg, gfp);
  299. if (iod) {
  300. iod->offset = offsetof(struct nvme_iod, sg[nseg]);
  301. iod->npages = -1;
  302. iod->length = nbytes;
  303. }
  304. return iod;
  305. }
  306. static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
  307. {
  308. const int last_prp = PAGE_SIZE / 8 - 1;
  309. int i;
  310. __le64 **list = iod_list(iod);
  311. dma_addr_t prp_dma = iod->first_dma;
  312. if (iod->npages == 0)
  313. dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
  314. for (i = 0; i < iod->npages; i++) {
  315. __le64 *prp_list = list[i];
  316. dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
  317. dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
  318. prp_dma = next_prp_dma;
  319. }
  320. kfree(iod);
  321. }
  322. static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
  323. {
  324. struct nvme_queue *nvmeq = get_nvmeq(dev);
  325. if (bio_list_empty(&nvmeq->sq_cong))
  326. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  327. bio_list_add(&nvmeq->sq_cong, bio);
  328. put_nvmeq(nvmeq);
  329. wake_up_process(nvme_thread);
  330. }
  331. static void bio_completion(struct nvme_dev *dev, void *ctx,
  332. struct nvme_completion *cqe)
  333. {
  334. struct nvme_iod *iod = ctx;
  335. struct bio *bio = iod->private;
  336. u16 status = le16_to_cpup(&cqe->status) >> 1;
  337. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  338. bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  339. nvme_free_iod(dev, iod);
  340. if (status) {
  341. bio_endio(bio, -EIO);
  342. } else if (bio->bi_vcnt > bio->bi_idx) {
  343. requeue_bio(dev, bio);
  344. } else {
  345. bio_endio(bio, 0);
  346. }
  347. }
  348. /* length is in bytes. gfp flags indicates whether we may sleep. */
  349. static int nvme_setup_prps(struct nvme_dev *dev,
  350. struct nvme_common_command *cmd, struct nvme_iod *iod,
  351. int total_len, gfp_t gfp)
  352. {
  353. struct dma_pool *pool;
  354. int length = total_len;
  355. struct scatterlist *sg = iod->sg;
  356. int dma_len = sg_dma_len(sg);
  357. u64 dma_addr = sg_dma_address(sg);
  358. int offset = offset_in_page(dma_addr);
  359. __le64 *prp_list;
  360. __le64 **list = iod_list(iod);
  361. dma_addr_t prp_dma;
  362. int nprps, i;
  363. cmd->prp1 = cpu_to_le64(dma_addr);
  364. length -= (PAGE_SIZE - offset);
  365. if (length <= 0)
  366. return total_len;
  367. dma_len -= (PAGE_SIZE - offset);
  368. if (dma_len) {
  369. dma_addr += (PAGE_SIZE - offset);
  370. } else {
  371. sg = sg_next(sg);
  372. dma_addr = sg_dma_address(sg);
  373. dma_len = sg_dma_len(sg);
  374. }
  375. if (length <= PAGE_SIZE) {
  376. cmd->prp2 = cpu_to_le64(dma_addr);
  377. return total_len;
  378. }
  379. nprps = DIV_ROUND_UP(length, PAGE_SIZE);
  380. if (nprps <= (256 / 8)) {
  381. pool = dev->prp_small_pool;
  382. iod->npages = 0;
  383. } else {
  384. pool = dev->prp_page_pool;
  385. iod->npages = 1;
  386. }
  387. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  388. if (!prp_list) {
  389. cmd->prp2 = cpu_to_le64(dma_addr);
  390. iod->npages = -1;
  391. return (total_len - length) + PAGE_SIZE;
  392. }
  393. list[0] = prp_list;
  394. iod->first_dma = prp_dma;
  395. cmd->prp2 = cpu_to_le64(prp_dma);
  396. i = 0;
  397. for (;;) {
  398. if (i == PAGE_SIZE / 8) {
  399. __le64 *old_prp_list = prp_list;
  400. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  401. if (!prp_list)
  402. return total_len - length;
  403. list[iod->npages++] = prp_list;
  404. prp_list[0] = old_prp_list[i - 1];
  405. old_prp_list[i - 1] = cpu_to_le64(prp_dma);
  406. i = 1;
  407. }
  408. prp_list[i++] = cpu_to_le64(dma_addr);
  409. dma_len -= PAGE_SIZE;
  410. dma_addr += PAGE_SIZE;
  411. length -= PAGE_SIZE;
  412. if (length <= 0)
  413. break;
  414. if (dma_len > 0)
  415. continue;
  416. BUG_ON(dma_len < 0);
  417. sg = sg_next(sg);
  418. dma_addr = sg_dma_address(sg);
  419. dma_len = sg_dma_len(sg);
  420. }
  421. return total_len;
  422. }
  423. /* NVMe scatterlists require no holes in the virtual address */
  424. #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
  425. (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
  426. static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
  427. struct bio *bio, enum dma_data_direction dma_dir, int psegs)
  428. {
  429. struct bio_vec *bvec, *bvprv = NULL;
  430. struct scatterlist *sg = NULL;
  431. int i, old_idx, length = 0, nsegs = 0;
  432. sg_init_table(iod->sg, psegs);
  433. old_idx = bio->bi_idx;
  434. bio_for_each_segment(bvec, bio, i) {
  435. if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
  436. sg->length += bvec->bv_len;
  437. } else {
  438. if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
  439. break;
  440. sg = sg ? sg + 1 : iod->sg;
  441. sg_set_page(sg, bvec->bv_page, bvec->bv_len,
  442. bvec->bv_offset);
  443. nsegs++;
  444. }
  445. length += bvec->bv_len;
  446. bvprv = bvec;
  447. }
  448. bio->bi_idx = i;
  449. iod->nents = nsegs;
  450. sg_mark_end(sg);
  451. if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
  452. bio->bi_idx = old_idx;
  453. return -ENOMEM;
  454. }
  455. return length;
  456. }
  457. static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  458. int cmdid)
  459. {
  460. struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  461. memset(cmnd, 0, sizeof(*cmnd));
  462. cmnd->common.opcode = nvme_cmd_flush;
  463. cmnd->common.command_id = cmdid;
  464. cmnd->common.nsid = cpu_to_le32(ns->ns_id);
  465. if (++nvmeq->sq_tail == nvmeq->q_depth)
  466. nvmeq->sq_tail = 0;
  467. writel(nvmeq->sq_tail, nvmeq->q_db);
  468. return 0;
  469. }
  470. static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
  471. {
  472. int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
  473. special_completion, NVME_IO_TIMEOUT);
  474. if (unlikely(cmdid < 0))
  475. return cmdid;
  476. return nvme_submit_flush(nvmeq, ns, cmdid);
  477. }
  478. /*
  479. * Called with local interrupts disabled and the q_lock held. May not sleep.
  480. */
  481. static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  482. struct bio *bio)
  483. {
  484. struct nvme_command *cmnd;
  485. struct nvme_iod *iod;
  486. enum dma_data_direction dma_dir;
  487. int cmdid, length, result = -ENOMEM;
  488. u16 control;
  489. u32 dsmgmt;
  490. int psegs = bio_phys_segments(ns->queue, bio);
  491. if ((bio->bi_rw & REQ_FLUSH) && psegs) {
  492. result = nvme_submit_flush_data(nvmeq, ns);
  493. if (result)
  494. return result;
  495. }
  496. iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
  497. if (!iod)
  498. goto nomem;
  499. iod->private = bio;
  500. result = -EBUSY;
  501. cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
  502. if (unlikely(cmdid < 0))
  503. goto free_iod;
  504. if ((bio->bi_rw & REQ_FLUSH) && !psegs)
  505. return nvme_submit_flush(nvmeq, ns, cmdid);
  506. control = 0;
  507. if (bio->bi_rw & REQ_FUA)
  508. control |= NVME_RW_FUA;
  509. if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
  510. control |= NVME_RW_LR;
  511. dsmgmt = 0;
  512. if (bio->bi_rw & REQ_RAHEAD)
  513. dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
  514. cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  515. memset(cmnd, 0, sizeof(*cmnd));
  516. if (bio_data_dir(bio)) {
  517. cmnd->rw.opcode = nvme_cmd_write;
  518. dma_dir = DMA_TO_DEVICE;
  519. } else {
  520. cmnd->rw.opcode = nvme_cmd_read;
  521. dma_dir = DMA_FROM_DEVICE;
  522. }
  523. result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
  524. if (result < 0)
  525. goto free_iod;
  526. length = result;
  527. cmnd->rw.command_id = cmdid;
  528. cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
  529. length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
  530. GFP_ATOMIC);
  531. cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
  532. cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
  533. cmnd->rw.control = cpu_to_le16(control);
  534. cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  535. bio->bi_sector += length >> 9;
  536. if (++nvmeq->sq_tail == nvmeq->q_depth)
  537. nvmeq->sq_tail = 0;
  538. writel(nvmeq->sq_tail, nvmeq->q_db);
  539. return 0;
  540. free_iod:
  541. nvme_free_iod(nvmeq->dev, iod);
  542. nomem:
  543. return result;
  544. }
  545. /*
  546. * NB: return value of non-zero would mean that we were a stacking driver.
  547. * make_request must always succeed.
  548. */
  549. static int nvme_make_request(struct request_queue *q, struct bio *bio)
  550. {
  551. struct nvme_ns *ns = q->queuedata;
  552. struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
  553. int result = -EBUSY;
  554. spin_lock_irq(&nvmeq->q_lock);
  555. if (bio_list_empty(&nvmeq->sq_cong))
  556. result = nvme_submit_bio_queue(nvmeq, ns, bio);
  557. if (unlikely(result)) {
  558. if (bio_list_empty(&nvmeq->sq_cong))
  559. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  560. bio_list_add(&nvmeq->sq_cong, bio);
  561. }
  562. spin_unlock_irq(&nvmeq->q_lock);
  563. put_nvmeq(nvmeq);
  564. return 0;
  565. }
  566. static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
  567. {
  568. u16 head, phase;
  569. head = nvmeq->cq_head;
  570. phase = nvmeq->cq_phase;
  571. for (;;) {
  572. void *ctx;
  573. nvme_completion_fn fn;
  574. struct nvme_completion cqe = nvmeq->cqes[head];
  575. if ((le16_to_cpu(cqe.status) & 1) != phase)
  576. break;
  577. nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
  578. if (++head == nvmeq->q_depth) {
  579. head = 0;
  580. phase = !phase;
  581. }
  582. ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
  583. fn(nvmeq->dev, ctx, &cqe);
  584. }
  585. /* If the controller ignores the cq head doorbell and continuously
  586. * writes to the queue, it is theoretically possible to wrap around
  587. * the queue twice and mistakenly return IRQ_NONE. Linux only
  588. * requires that 0.1% of your interrupts are handled, so this isn't
  589. * a big problem.
  590. */
  591. if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
  592. return IRQ_NONE;
  593. writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
  594. nvmeq->cq_head = head;
  595. nvmeq->cq_phase = phase;
  596. return IRQ_HANDLED;
  597. }
  598. static irqreturn_t nvme_irq(int irq, void *data)
  599. {
  600. irqreturn_t result;
  601. struct nvme_queue *nvmeq = data;
  602. spin_lock(&nvmeq->q_lock);
  603. result = nvme_process_cq(nvmeq);
  604. spin_unlock(&nvmeq->q_lock);
  605. return result;
  606. }
  607. static irqreturn_t nvme_irq_check(int irq, void *data)
  608. {
  609. struct nvme_queue *nvmeq = data;
  610. struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
  611. if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
  612. return IRQ_NONE;
  613. return IRQ_WAKE_THREAD;
  614. }
  615. static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
  616. {
  617. spin_lock_irq(&nvmeq->q_lock);
  618. cancel_cmdid(nvmeq, cmdid, NULL);
  619. spin_unlock_irq(&nvmeq->q_lock);
  620. }
  621. struct sync_cmd_info {
  622. struct task_struct *task;
  623. u32 result;
  624. int status;
  625. };
  626. static void sync_completion(struct nvme_dev *dev, void *ctx,
  627. struct nvme_completion *cqe)
  628. {
  629. struct sync_cmd_info *cmdinfo = ctx;
  630. cmdinfo->result = le32_to_cpup(&cqe->result);
  631. cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
  632. wake_up_process(cmdinfo->task);
  633. }
  634. /*
  635. * Returns 0 on success. If the result is negative, it's a Linux error code;
  636. * if the result is positive, it's an NVM Express status code
  637. */
  638. static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
  639. struct nvme_command *cmd, u32 *result, unsigned timeout)
  640. {
  641. int cmdid;
  642. struct sync_cmd_info cmdinfo;
  643. cmdinfo.task = current;
  644. cmdinfo.status = -EINTR;
  645. cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
  646. timeout);
  647. if (cmdid < 0)
  648. return cmdid;
  649. cmd->common.command_id = cmdid;
  650. set_current_state(TASK_KILLABLE);
  651. nvme_submit_cmd(nvmeq, cmd);
  652. schedule();
  653. if (cmdinfo.status == -EINTR) {
  654. nvme_abort_command(nvmeq, cmdid);
  655. return -EINTR;
  656. }
  657. if (result)
  658. *result = cmdinfo.result;
  659. return cmdinfo.status;
  660. }
  661. static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
  662. u32 *result)
  663. {
  664. return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
  665. }
  666. static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
  667. {
  668. int status;
  669. struct nvme_command c;
  670. memset(&c, 0, sizeof(c));
  671. c.delete_queue.opcode = opcode;
  672. c.delete_queue.qid = cpu_to_le16(id);
  673. status = nvme_submit_admin_cmd(dev, &c, NULL);
  674. if (status)
  675. return -EIO;
  676. return 0;
  677. }
  678. static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
  679. struct nvme_queue *nvmeq)
  680. {
  681. int status;
  682. struct nvme_command c;
  683. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
  684. memset(&c, 0, sizeof(c));
  685. c.create_cq.opcode = nvme_admin_create_cq;
  686. c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
  687. c.create_cq.cqid = cpu_to_le16(qid);
  688. c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  689. c.create_cq.cq_flags = cpu_to_le16(flags);
  690. c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
  691. status = nvme_submit_admin_cmd(dev, &c, NULL);
  692. if (status)
  693. return -EIO;
  694. return 0;
  695. }
  696. static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
  697. struct nvme_queue *nvmeq)
  698. {
  699. int status;
  700. struct nvme_command c;
  701. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
  702. memset(&c, 0, sizeof(c));
  703. c.create_sq.opcode = nvme_admin_create_sq;
  704. c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
  705. c.create_sq.sqid = cpu_to_le16(qid);
  706. c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  707. c.create_sq.sq_flags = cpu_to_le16(flags);
  708. c.create_sq.cqid = cpu_to_le16(qid);
  709. status = nvme_submit_admin_cmd(dev, &c, NULL);
  710. if (status)
  711. return -EIO;
  712. return 0;
  713. }
  714. static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
  715. {
  716. return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
  717. }
  718. static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
  719. {
  720. return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
  721. }
  722. static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
  723. dma_addr_t dma_addr)
  724. {
  725. struct nvme_command c;
  726. memset(&c, 0, sizeof(c));
  727. c.identify.opcode = nvme_admin_identify;
  728. c.identify.nsid = cpu_to_le32(nsid);
  729. c.identify.prp1 = cpu_to_le64(dma_addr);
  730. c.identify.cns = cpu_to_le32(cns);
  731. return nvme_submit_admin_cmd(dev, &c, NULL);
  732. }
  733. static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
  734. unsigned dword11, dma_addr_t dma_addr, u32 *result)
  735. {
  736. struct nvme_command c;
  737. memset(&c, 0, sizeof(c));
  738. c.features.opcode = nvme_admin_get_features;
  739. c.features.prp1 = cpu_to_le64(dma_addr);
  740. c.features.fid = cpu_to_le32(fid);
  741. c.features.dword11 = cpu_to_le32(dword11);
  742. return nvme_submit_admin_cmd(dev, &c, result);
  743. }
  744. static void nvme_free_queue(struct nvme_dev *dev, int qid)
  745. {
  746. struct nvme_queue *nvmeq = dev->queues[qid];
  747. int vector = dev->entry[nvmeq->cq_vector].vector;
  748. irq_set_affinity_hint(vector, NULL);
  749. free_irq(vector, nvmeq);
  750. /* Don't tell the adapter to delete the admin queue */
  751. if (qid) {
  752. adapter_delete_sq(dev, qid);
  753. adapter_delete_cq(dev, qid);
  754. }
  755. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  756. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  757. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  758. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  759. kfree(nvmeq);
  760. }
  761. static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
  762. int depth, int vector)
  763. {
  764. struct device *dmadev = &dev->pci_dev->dev;
  765. unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
  766. struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
  767. if (!nvmeq)
  768. return NULL;
  769. nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
  770. &nvmeq->cq_dma_addr, GFP_KERNEL);
  771. if (!nvmeq->cqes)
  772. goto free_nvmeq;
  773. memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
  774. nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
  775. &nvmeq->sq_dma_addr, GFP_KERNEL);
  776. if (!nvmeq->sq_cmds)
  777. goto free_cqdma;
  778. nvmeq->q_dmadev = dmadev;
  779. nvmeq->dev = dev;
  780. spin_lock_init(&nvmeq->q_lock);
  781. nvmeq->cq_head = 0;
  782. nvmeq->cq_phase = 1;
  783. init_waitqueue_head(&nvmeq->sq_full);
  784. init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
  785. bio_list_init(&nvmeq->sq_cong);
  786. nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
  787. nvmeq->q_depth = depth;
  788. nvmeq->cq_vector = vector;
  789. return nvmeq;
  790. free_cqdma:
  791. dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
  792. nvmeq->cq_dma_addr);
  793. free_nvmeq:
  794. kfree(nvmeq);
  795. return NULL;
  796. }
  797. static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
  798. const char *name)
  799. {
  800. if (use_threaded_interrupts)
  801. return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
  802. nvme_irq_check, nvme_irq,
  803. IRQF_DISABLED | IRQF_SHARED,
  804. name, nvmeq);
  805. return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
  806. IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
  807. }
  808. static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
  809. int qid, int cq_size, int vector)
  810. {
  811. int result;
  812. struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
  813. if (!nvmeq)
  814. return ERR_PTR(-ENOMEM);
  815. result = adapter_alloc_cq(dev, qid, nvmeq);
  816. if (result < 0)
  817. goto free_nvmeq;
  818. result = adapter_alloc_sq(dev, qid, nvmeq);
  819. if (result < 0)
  820. goto release_cq;
  821. result = queue_request_irq(dev, nvmeq, "nvme");
  822. if (result < 0)
  823. goto release_sq;
  824. return nvmeq;
  825. release_sq:
  826. adapter_delete_sq(dev, qid);
  827. release_cq:
  828. adapter_delete_cq(dev, qid);
  829. free_nvmeq:
  830. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  831. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  832. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  833. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  834. kfree(nvmeq);
  835. return ERR_PTR(result);
  836. }
  837. static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
  838. {
  839. int result;
  840. u32 aqa;
  841. u64 cap;
  842. unsigned long timeout;
  843. struct nvme_queue *nvmeq;
  844. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  845. nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
  846. if (!nvmeq)
  847. return -ENOMEM;
  848. aqa = nvmeq->q_depth - 1;
  849. aqa |= aqa << 16;
  850. dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
  851. dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
  852. dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
  853. dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
  854. writel(0, &dev->bar->cc);
  855. writel(aqa, &dev->bar->aqa);
  856. writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
  857. writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
  858. writel(dev->ctrl_config, &dev->bar->cc);
  859. cap = readq(&dev->bar->cap);
  860. timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
  861. dev->db_stride = NVME_CAP_STRIDE(cap);
  862. while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
  863. msleep(100);
  864. if (fatal_signal_pending(current))
  865. return -EINTR;
  866. if (time_after(jiffies, timeout)) {
  867. dev_err(&dev->pci_dev->dev,
  868. "Device not ready; aborting initialisation\n");
  869. return -ENODEV;
  870. }
  871. }
  872. result = queue_request_irq(dev, nvmeq, "nvme admin");
  873. dev->queues[0] = nvmeq;
  874. return result;
  875. }
  876. static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
  877. unsigned long addr, unsigned length)
  878. {
  879. int i, err, count, nents, offset;
  880. struct scatterlist *sg;
  881. struct page **pages;
  882. struct nvme_iod *iod;
  883. if (addr & 3)
  884. return ERR_PTR(-EINVAL);
  885. if (!length)
  886. return ERR_PTR(-EINVAL);
  887. offset = offset_in_page(addr);
  888. count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
  889. pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
  890. err = get_user_pages_fast(addr, count, 1, pages);
  891. if (err < count) {
  892. count = err;
  893. err = -EFAULT;
  894. goto put_pages;
  895. }
  896. iod = nvme_alloc_iod(count, length, GFP_KERNEL);
  897. sg = iod->sg;
  898. sg_init_table(sg, count);
  899. for (i = 0; i < count; i++) {
  900. sg_set_page(&sg[i], pages[i],
  901. min_t(int, length, PAGE_SIZE - offset), offset);
  902. length -= (PAGE_SIZE - offset);
  903. offset = 0;
  904. }
  905. sg_mark_end(&sg[i - 1]);
  906. iod->nents = count;
  907. err = -ENOMEM;
  908. nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
  909. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  910. if (!nents)
  911. goto free_iod;
  912. kfree(pages);
  913. return iod;
  914. free_iod:
  915. kfree(iod);
  916. put_pages:
  917. for (i = 0; i < count; i++)
  918. put_page(pages[i]);
  919. kfree(pages);
  920. return ERR_PTR(err);
  921. }
  922. static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
  923. struct nvme_iod *iod)
  924. {
  925. int i;
  926. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  927. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  928. for (i = 0; i < iod->nents; i++)
  929. put_page(sg_page(&iod->sg[i]));
  930. }
  931. static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  932. {
  933. struct nvme_dev *dev = ns->dev;
  934. struct nvme_queue *nvmeq;
  935. struct nvme_user_io io;
  936. struct nvme_command c;
  937. unsigned length;
  938. int status;
  939. struct nvme_iod *iod;
  940. if (copy_from_user(&io, uio, sizeof(io)))
  941. return -EFAULT;
  942. length = (io.nblocks + 1) << ns->lba_shift;
  943. switch (io.opcode) {
  944. case nvme_cmd_write:
  945. case nvme_cmd_read:
  946. case nvme_cmd_compare:
  947. iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
  948. break;
  949. default:
  950. return -EINVAL;
  951. }
  952. if (IS_ERR(iod))
  953. return PTR_ERR(iod);
  954. memset(&c, 0, sizeof(c));
  955. c.rw.opcode = io.opcode;
  956. c.rw.flags = io.flags;
  957. c.rw.nsid = cpu_to_le32(ns->ns_id);
  958. c.rw.slba = cpu_to_le64(io.slba);
  959. c.rw.length = cpu_to_le16(io.nblocks);
  960. c.rw.control = cpu_to_le16(io.control);
  961. c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
  962. c.rw.reftag = io.reftag;
  963. c.rw.apptag = io.apptag;
  964. c.rw.appmask = io.appmask;
  965. /* XXX: metadata */
  966. length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
  967. nvmeq = get_nvmeq(dev);
  968. /*
  969. * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
  970. * disabled. We may be preempted at any point, and be rescheduled
  971. * to a different CPU. That will cause cacheline bouncing, but no
  972. * additional races since q_lock already protects against other CPUs.
  973. */
  974. put_nvmeq(nvmeq);
  975. if (length != (io.nblocks + 1) << ns->lba_shift)
  976. status = -ENOMEM;
  977. else
  978. status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
  979. nvme_unmap_user_pages(dev, io.opcode & 1, iod);
  980. nvme_free_iod(dev, iod);
  981. return status;
  982. }
  983. static int nvme_user_admin_cmd(struct nvme_ns *ns,
  984. struct nvme_admin_cmd __user *ucmd)
  985. {
  986. struct nvme_dev *dev = ns->dev;
  987. struct nvme_admin_cmd cmd;
  988. struct nvme_command c;
  989. int status, length;
  990. struct nvme_iod *iod;
  991. if (!capable(CAP_SYS_ADMIN))
  992. return -EACCES;
  993. if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
  994. return -EFAULT;
  995. memset(&c, 0, sizeof(c));
  996. c.common.opcode = cmd.opcode;
  997. c.common.flags = cmd.flags;
  998. c.common.nsid = cpu_to_le32(cmd.nsid);
  999. c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
  1000. c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
  1001. c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
  1002. c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
  1003. c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
  1004. c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
  1005. c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
  1006. c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
  1007. length = cmd.data_len;
  1008. if (cmd.data_len) {
  1009. iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
  1010. length);
  1011. if (IS_ERR(iod))
  1012. return PTR_ERR(iod);
  1013. length = nvme_setup_prps(dev, &c.common, iod, length,
  1014. GFP_KERNEL);
  1015. }
  1016. if (length != cmd.data_len)
  1017. status = -ENOMEM;
  1018. else
  1019. status = nvme_submit_admin_cmd(dev, &c, NULL);
  1020. if (cmd.data_len) {
  1021. nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
  1022. nvme_free_iod(dev, iod);
  1023. }
  1024. return status;
  1025. }
  1026. static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
  1027. unsigned long arg)
  1028. {
  1029. struct nvme_ns *ns = bdev->bd_disk->private_data;
  1030. switch (cmd) {
  1031. case NVME_IOCTL_ID:
  1032. return ns->ns_id;
  1033. case NVME_IOCTL_ADMIN_CMD:
  1034. return nvme_user_admin_cmd(ns, (void __user *)arg);
  1035. case NVME_IOCTL_SUBMIT_IO:
  1036. return nvme_submit_io(ns, (void __user *)arg);
  1037. default:
  1038. return -ENOTTY;
  1039. }
  1040. }
  1041. static const struct block_device_operations nvme_fops = {
  1042. .owner = THIS_MODULE,
  1043. .ioctl = nvme_ioctl,
  1044. .compat_ioctl = nvme_ioctl,
  1045. };
  1046. static void nvme_timeout_ios(struct nvme_queue *nvmeq)
  1047. {
  1048. int depth = nvmeq->q_depth - 1;
  1049. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  1050. unsigned long now = jiffies;
  1051. int cmdid;
  1052. for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
  1053. void *ctx;
  1054. nvme_completion_fn fn;
  1055. static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
  1056. if (!time_after(now, info[cmdid].timeout))
  1057. continue;
  1058. dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
  1059. ctx = cancel_cmdid(nvmeq, cmdid, &fn);
  1060. fn(nvmeq->dev, ctx, &cqe);
  1061. }
  1062. }
  1063. static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
  1064. {
  1065. while (bio_list_peek(&nvmeq->sq_cong)) {
  1066. struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
  1067. struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
  1068. if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
  1069. bio_list_add_head(&nvmeq->sq_cong, bio);
  1070. break;
  1071. }
  1072. if (bio_list_empty(&nvmeq->sq_cong))
  1073. remove_wait_queue(&nvmeq->sq_full,
  1074. &nvmeq->sq_cong_wait);
  1075. }
  1076. }
  1077. static int nvme_kthread(void *data)
  1078. {
  1079. struct nvme_dev *dev;
  1080. while (!kthread_should_stop()) {
  1081. __set_current_state(TASK_RUNNING);
  1082. spin_lock(&dev_list_lock);
  1083. list_for_each_entry(dev, &dev_list, node) {
  1084. int i;
  1085. for (i = 0; i < dev->queue_count; i++) {
  1086. struct nvme_queue *nvmeq = dev->queues[i];
  1087. if (!nvmeq)
  1088. continue;
  1089. spin_lock_irq(&nvmeq->q_lock);
  1090. if (nvme_process_cq(nvmeq))
  1091. printk("process_cq did something\n");
  1092. nvme_timeout_ios(nvmeq);
  1093. nvme_resubmit_bios(nvmeq);
  1094. spin_unlock_irq(&nvmeq->q_lock);
  1095. }
  1096. }
  1097. spin_unlock(&dev_list_lock);
  1098. set_current_state(TASK_INTERRUPTIBLE);
  1099. schedule_timeout(HZ);
  1100. }
  1101. return 0;
  1102. }
  1103. static DEFINE_IDA(nvme_index_ida);
  1104. static int nvme_get_ns_idx(void)
  1105. {
  1106. int index, error;
  1107. do {
  1108. if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
  1109. return -1;
  1110. spin_lock(&dev_list_lock);
  1111. error = ida_get_new(&nvme_index_ida, &index);
  1112. spin_unlock(&dev_list_lock);
  1113. } while (error == -EAGAIN);
  1114. if (error)
  1115. index = -1;
  1116. return index;
  1117. }
  1118. static void nvme_put_ns_idx(int index)
  1119. {
  1120. spin_lock(&dev_list_lock);
  1121. ida_remove(&nvme_index_ida, index);
  1122. spin_unlock(&dev_list_lock);
  1123. }
  1124. static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
  1125. struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
  1126. {
  1127. struct nvme_ns *ns;
  1128. struct gendisk *disk;
  1129. int lbaf;
  1130. if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
  1131. return NULL;
  1132. ns = kzalloc(sizeof(*ns), GFP_KERNEL);
  1133. if (!ns)
  1134. return NULL;
  1135. ns->queue = blk_alloc_queue(GFP_KERNEL);
  1136. if (!ns->queue)
  1137. goto out_free_ns;
  1138. ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
  1139. queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
  1140. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
  1141. /* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
  1142. blk_queue_make_request(ns->queue, nvme_make_request);
  1143. ns->dev = dev;
  1144. ns->queue->queuedata = ns;
  1145. disk = alloc_disk(NVME_MINORS);
  1146. if (!disk)
  1147. goto out_free_queue;
  1148. ns->ns_id = nsid;
  1149. ns->disk = disk;
  1150. lbaf = id->flbas & 0xf;
  1151. ns->lba_shift = id->lbaf[lbaf].ds;
  1152. disk->major = nvme_major;
  1153. disk->minors = NVME_MINORS;
  1154. disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
  1155. disk->fops = &nvme_fops;
  1156. disk->private_data = ns;
  1157. disk->queue = ns->queue;
  1158. disk->driverfs_dev = &dev->pci_dev->dev;
  1159. sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
  1160. set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
  1161. return ns;
  1162. out_free_queue:
  1163. blk_cleanup_queue(ns->queue);
  1164. out_free_ns:
  1165. kfree(ns);
  1166. return NULL;
  1167. }
  1168. static void nvme_ns_free(struct nvme_ns *ns)
  1169. {
  1170. int index = ns->disk->first_minor / NVME_MINORS;
  1171. put_disk(ns->disk);
  1172. nvme_put_ns_idx(index);
  1173. blk_cleanup_queue(ns->queue);
  1174. kfree(ns);
  1175. }
  1176. static int set_queue_count(struct nvme_dev *dev, int count)
  1177. {
  1178. int status;
  1179. u32 result;
  1180. u32 q_count = (count - 1) | ((count - 1) << 16);
  1181. status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
  1182. &result);
  1183. if (status)
  1184. return -EIO;
  1185. return min(result & 0xffff, result >> 16) + 1;
  1186. }
  1187. static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
  1188. {
  1189. int result, cpu, i, nr_io_queues, db_bar_size;
  1190. nr_io_queues = num_online_cpus();
  1191. result = set_queue_count(dev, nr_io_queues);
  1192. if (result < 0)
  1193. return result;
  1194. if (result < nr_io_queues)
  1195. nr_io_queues = result;
  1196. /* Deregister the admin queue's interrupt */
  1197. free_irq(dev->entry[0].vector, dev->queues[0]);
  1198. db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
  1199. if (db_bar_size > 8192) {
  1200. iounmap(dev->bar);
  1201. dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
  1202. db_bar_size);
  1203. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  1204. dev->queues[0]->q_db = dev->dbs;
  1205. }
  1206. for (i = 0; i < nr_io_queues; i++)
  1207. dev->entry[i].entry = i;
  1208. for (;;) {
  1209. result = pci_enable_msix(dev->pci_dev, dev->entry,
  1210. nr_io_queues);
  1211. if (result == 0) {
  1212. break;
  1213. } else if (result > 0) {
  1214. nr_io_queues = result;
  1215. continue;
  1216. } else {
  1217. nr_io_queues = 1;
  1218. break;
  1219. }
  1220. }
  1221. result = queue_request_irq(dev, dev->queues[0], "nvme admin");
  1222. /* XXX: handle failure here */
  1223. cpu = cpumask_first(cpu_online_mask);
  1224. for (i = 0; i < nr_io_queues; i++) {
  1225. irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
  1226. cpu = cpumask_next(cpu, cpu_online_mask);
  1227. }
  1228. for (i = 0; i < nr_io_queues; i++) {
  1229. dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
  1230. NVME_Q_DEPTH, i);
  1231. if (IS_ERR(dev->queues[i + 1]))
  1232. return PTR_ERR(dev->queues[i + 1]);
  1233. dev->queue_count++;
  1234. }
  1235. for (; i < num_possible_cpus(); i++) {
  1236. int target = i % rounddown_pow_of_two(dev->queue_count - 1);
  1237. dev->queues[i + 1] = dev->queues[target + 1];
  1238. }
  1239. return 0;
  1240. }
  1241. static void nvme_free_queues(struct nvme_dev *dev)
  1242. {
  1243. int i;
  1244. for (i = dev->queue_count - 1; i >= 0; i--)
  1245. nvme_free_queue(dev, i);
  1246. }
  1247. static int __devinit nvme_dev_add(struct nvme_dev *dev)
  1248. {
  1249. int res, nn, i;
  1250. struct nvme_ns *ns, *next;
  1251. struct nvme_id_ctrl *ctrl;
  1252. struct nvme_id_ns *id_ns;
  1253. void *mem;
  1254. dma_addr_t dma_addr;
  1255. res = nvme_setup_io_queues(dev);
  1256. if (res)
  1257. return res;
  1258. mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
  1259. GFP_KERNEL);
  1260. res = nvme_identify(dev, 0, 1, dma_addr);
  1261. if (res) {
  1262. res = -EIO;
  1263. goto out_free;
  1264. }
  1265. ctrl = mem;
  1266. nn = le32_to_cpup(&ctrl->nn);
  1267. memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
  1268. memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
  1269. memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
  1270. id_ns = mem;
  1271. for (i = 1; i <= nn; i++) {
  1272. res = nvme_identify(dev, i, 0, dma_addr);
  1273. if (res)
  1274. continue;
  1275. if (id_ns->ncap == 0)
  1276. continue;
  1277. res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
  1278. dma_addr + 4096, NULL);
  1279. if (res)
  1280. continue;
  1281. ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
  1282. if (ns)
  1283. list_add_tail(&ns->list, &dev->namespaces);
  1284. }
  1285. list_for_each_entry(ns, &dev->namespaces, list)
  1286. add_disk(ns->disk);
  1287. goto out;
  1288. out_free:
  1289. list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
  1290. list_del(&ns->list);
  1291. nvme_ns_free(ns);
  1292. }
  1293. out:
  1294. dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
  1295. return res;
  1296. }
  1297. static int nvme_dev_remove(struct nvme_dev *dev)
  1298. {
  1299. struct nvme_ns *ns, *next;
  1300. spin_lock(&dev_list_lock);
  1301. list_del(&dev->node);
  1302. spin_unlock(&dev_list_lock);
  1303. /* TODO: wait all I/O finished or cancel them */
  1304. list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
  1305. list_del(&ns->list);
  1306. del_gendisk(ns->disk);
  1307. nvme_ns_free(ns);
  1308. }
  1309. nvme_free_queues(dev);
  1310. return 0;
  1311. }
  1312. static int nvme_setup_prp_pools(struct nvme_dev *dev)
  1313. {
  1314. struct device *dmadev = &dev->pci_dev->dev;
  1315. dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
  1316. PAGE_SIZE, PAGE_SIZE, 0);
  1317. if (!dev->prp_page_pool)
  1318. return -ENOMEM;
  1319. /* Optimisation for I/Os between 4k and 128k */
  1320. dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
  1321. 256, 256, 0);
  1322. if (!dev->prp_small_pool) {
  1323. dma_pool_destroy(dev->prp_page_pool);
  1324. return -ENOMEM;
  1325. }
  1326. return 0;
  1327. }
  1328. static void nvme_release_prp_pools(struct nvme_dev *dev)
  1329. {
  1330. dma_pool_destroy(dev->prp_page_pool);
  1331. dma_pool_destroy(dev->prp_small_pool);
  1332. }
  1333. /* XXX: Use an ida or something to let remove / add work correctly */
  1334. static void nvme_set_instance(struct nvme_dev *dev)
  1335. {
  1336. static int instance;
  1337. dev->instance = instance++;
  1338. }
  1339. static void nvme_release_instance(struct nvme_dev *dev)
  1340. {
  1341. }
  1342. static int __devinit nvme_probe(struct pci_dev *pdev,
  1343. const struct pci_device_id *id)
  1344. {
  1345. int bars, result = -ENOMEM;
  1346. struct nvme_dev *dev;
  1347. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1348. if (!dev)
  1349. return -ENOMEM;
  1350. dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
  1351. GFP_KERNEL);
  1352. if (!dev->entry)
  1353. goto free;
  1354. dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
  1355. GFP_KERNEL);
  1356. if (!dev->queues)
  1357. goto free;
  1358. if (pci_enable_device_mem(pdev))
  1359. goto free;
  1360. pci_set_master(pdev);
  1361. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1362. if (pci_request_selected_regions(pdev, bars, "nvme"))
  1363. goto disable;
  1364. INIT_LIST_HEAD(&dev->namespaces);
  1365. dev->pci_dev = pdev;
  1366. pci_set_drvdata(pdev, dev);
  1367. dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1368. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  1369. nvme_set_instance(dev);
  1370. dev->entry[0].vector = pdev->irq;
  1371. result = nvme_setup_prp_pools(dev);
  1372. if (result)
  1373. goto disable_msix;
  1374. dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
  1375. if (!dev->bar) {
  1376. result = -ENOMEM;
  1377. goto disable_msix;
  1378. }
  1379. result = nvme_configure_admin_queue(dev);
  1380. if (result)
  1381. goto unmap;
  1382. dev->queue_count++;
  1383. spin_lock(&dev_list_lock);
  1384. list_add(&dev->node, &dev_list);
  1385. spin_unlock(&dev_list_lock);
  1386. result = nvme_dev_add(dev);
  1387. if (result)
  1388. goto delete;
  1389. return 0;
  1390. delete:
  1391. spin_lock(&dev_list_lock);
  1392. list_del(&dev->node);
  1393. spin_unlock(&dev_list_lock);
  1394. nvme_free_queues(dev);
  1395. unmap:
  1396. iounmap(dev->bar);
  1397. disable_msix:
  1398. pci_disable_msix(pdev);
  1399. nvme_release_instance(dev);
  1400. nvme_release_prp_pools(dev);
  1401. disable:
  1402. pci_disable_device(pdev);
  1403. pci_release_regions(pdev);
  1404. free:
  1405. kfree(dev->queues);
  1406. kfree(dev->entry);
  1407. kfree(dev);
  1408. return result;
  1409. }
  1410. static void __devexit nvme_remove(struct pci_dev *pdev)
  1411. {
  1412. struct nvme_dev *dev = pci_get_drvdata(pdev);
  1413. nvme_dev_remove(dev);
  1414. pci_disable_msix(pdev);
  1415. iounmap(dev->bar);
  1416. nvme_release_instance(dev);
  1417. nvme_release_prp_pools(dev);
  1418. pci_disable_device(pdev);
  1419. pci_release_regions(pdev);
  1420. kfree(dev->queues);
  1421. kfree(dev->entry);
  1422. kfree(dev);
  1423. }
  1424. /* These functions are yet to be implemented */
  1425. #define nvme_error_detected NULL
  1426. #define nvme_dump_registers NULL
  1427. #define nvme_link_reset NULL
  1428. #define nvme_slot_reset NULL
  1429. #define nvme_error_resume NULL
  1430. #define nvme_suspend NULL
  1431. #define nvme_resume NULL
  1432. static struct pci_error_handlers nvme_err_handler = {
  1433. .error_detected = nvme_error_detected,
  1434. .mmio_enabled = nvme_dump_registers,
  1435. .link_reset = nvme_link_reset,
  1436. .slot_reset = nvme_slot_reset,
  1437. .resume = nvme_error_resume,
  1438. };
  1439. /* Move to pci_ids.h later */
  1440. #define PCI_CLASS_STORAGE_EXPRESS 0x010802
  1441. static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
  1442. { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
  1443. { 0, }
  1444. };
  1445. MODULE_DEVICE_TABLE(pci, nvme_id_table);
  1446. static struct pci_driver nvme_driver = {
  1447. .name = "nvme",
  1448. .id_table = nvme_id_table,
  1449. .probe = nvme_probe,
  1450. .remove = __devexit_p(nvme_remove),
  1451. .suspend = nvme_suspend,
  1452. .resume = nvme_resume,
  1453. .err_handler = &nvme_err_handler,
  1454. };
  1455. static int __init nvme_init(void)
  1456. {
  1457. int result = -EBUSY;
  1458. nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
  1459. if (IS_ERR(nvme_thread))
  1460. return PTR_ERR(nvme_thread);
  1461. nvme_major = register_blkdev(nvme_major, "nvme");
  1462. if (nvme_major <= 0)
  1463. goto kill_kthread;
  1464. result = pci_register_driver(&nvme_driver);
  1465. if (result)
  1466. goto unregister_blkdev;
  1467. return 0;
  1468. unregister_blkdev:
  1469. unregister_blkdev(nvme_major, "nvme");
  1470. kill_kthread:
  1471. kthread_stop(nvme_thread);
  1472. return result;
  1473. }
  1474. static void __exit nvme_exit(void)
  1475. {
  1476. pci_unregister_driver(&nvme_driver);
  1477. unregister_blkdev(nvme_major, "nvme");
  1478. kthread_stop(nvme_thread);
  1479. }
  1480. MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
  1481. MODULE_LICENSE("GPL");
  1482. MODULE_VERSION("0.7");
  1483. module_init(nvme_init);
  1484. module_exit(nvme_exit);