nvme.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792
  1. /*
  2. * NVM Express device driver
  3. * Copyright (c) 2011, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. */
  18. #include <linux/nvme.h>
  19. #include <linux/bio.h>
  20. #include <linux/bitops.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/fs.h>
  25. #include <linux/genhd.h>
  26. #include <linux/idr.h>
  27. #include <linux/init.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/kdev_t.h>
  31. #include <linux/kthread.h>
  32. #include <linux/kernel.h>
  33. #include <linux/mm.h>
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/pci.h>
  37. #include <linux/poison.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/types.h>
  41. #include <asm-generic/io-64-nonatomic-lo-hi.h>
  42. #define NVME_Q_DEPTH 1024
  43. #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
  44. #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
  45. #define NVME_MINORS 64
  46. #define NVME_IO_TIMEOUT (5 * HZ)
  47. #define ADMIN_TIMEOUT (60 * HZ)
  48. static int nvme_major;
  49. module_param(nvme_major, int, 0);
  50. static int use_threaded_interrupts;
  51. module_param(use_threaded_interrupts, int, 0);
  52. static DEFINE_SPINLOCK(dev_list_lock);
  53. static LIST_HEAD(dev_list);
  54. static struct task_struct *nvme_thread;
  55. /*
  56. * Represents an NVM Express device. Each nvme_dev is a PCI function.
  57. */
  58. struct nvme_dev {
  59. struct list_head node;
  60. struct nvme_queue **queues;
  61. u32 __iomem *dbs;
  62. struct pci_dev *pci_dev;
  63. struct dma_pool *prp_page_pool;
  64. struct dma_pool *prp_small_pool;
  65. int instance;
  66. int queue_count;
  67. int db_stride;
  68. u32 ctrl_config;
  69. struct msix_entry *entry;
  70. struct nvme_bar __iomem *bar;
  71. struct list_head namespaces;
  72. char serial[20];
  73. char model[40];
  74. char firmware_rev[8];
  75. u32 max_hw_sectors;
  76. };
  77. /*
  78. * An NVM Express namespace is equivalent to a SCSI LUN
  79. */
  80. struct nvme_ns {
  81. struct list_head list;
  82. struct nvme_dev *dev;
  83. struct request_queue *queue;
  84. struct gendisk *disk;
  85. int ns_id;
  86. int lba_shift;
  87. };
  88. /*
  89. * An NVM Express queue. Each device has at least two (one for admin
  90. * commands and one for I/O commands).
  91. */
  92. struct nvme_queue {
  93. struct device *q_dmadev;
  94. struct nvme_dev *dev;
  95. spinlock_t q_lock;
  96. struct nvme_command *sq_cmds;
  97. volatile struct nvme_completion *cqes;
  98. dma_addr_t sq_dma_addr;
  99. dma_addr_t cq_dma_addr;
  100. wait_queue_head_t sq_full;
  101. wait_queue_t sq_cong_wait;
  102. struct bio_list sq_cong;
  103. u32 __iomem *q_db;
  104. u16 q_depth;
  105. u16 cq_vector;
  106. u16 sq_head;
  107. u16 sq_tail;
  108. u16 cq_head;
  109. u16 cq_phase;
  110. unsigned long cmdid_data[];
  111. };
  112. /*
  113. * Check we didin't inadvertently grow the command struct
  114. */
  115. static inline void _nvme_check_size(void)
  116. {
  117. BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
  118. BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
  119. BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
  120. BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
  121. BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
  122. BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
  123. BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
  124. BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
  125. BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
  126. }
  127. typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
  128. struct nvme_completion *);
  129. struct nvme_cmd_info {
  130. nvme_completion_fn fn;
  131. void *ctx;
  132. unsigned long timeout;
  133. };
  134. static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
  135. {
  136. return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
  137. }
  138. /**
  139. * alloc_cmdid() - Allocate a Command ID
  140. * @nvmeq: The queue that will be used for this command
  141. * @ctx: A pointer that will be passed to the handler
  142. * @handler: The function to call on completion
  143. *
  144. * Allocate a Command ID for a queue. The data passed in will
  145. * be passed to the completion handler. This is implemented by using
  146. * the bottom two bits of the ctx pointer to store the handler ID.
  147. * Passing in a pointer that's not 4-byte aligned will cause a BUG.
  148. * We can change this if it becomes a problem.
  149. *
  150. * May be called with local interrupts disabled and the q_lock held,
  151. * or with interrupts enabled and no locks held.
  152. */
  153. static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
  154. nvme_completion_fn handler, unsigned timeout)
  155. {
  156. int depth = nvmeq->q_depth - 1;
  157. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  158. int cmdid;
  159. do {
  160. cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
  161. if (cmdid >= depth)
  162. return -EBUSY;
  163. } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
  164. info[cmdid].fn = handler;
  165. info[cmdid].ctx = ctx;
  166. info[cmdid].timeout = jiffies + timeout;
  167. return cmdid;
  168. }
  169. static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
  170. nvme_completion_fn handler, unsigned timeout)
  171. {
  172. int cmdid;
  173. wait_event_killable(nvmeq->sq_full,
  174. (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
  175. return (cmdid < 0) ? -EINTR : cmdid;
  176. }
  177. /* Special values must be less than 0x1000 */
  178. #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
  179. #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
  180. #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
  181. #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
  182. #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
  183. static void special_completion(struct nvme_dev *dev, void *ctx,
  184. struct nvme_completion *cqe)
  185. {
  186. if (ctx == CMD_CTX_CANCELLED)
  187. return;
  188. if (ctx == CMD_CTX_FLUSH)
  189. return;
  190. if (ctx == CMD_CTX_COMPLETED) {
  191. dev_warn(&dev->pci_dev->dev,
  192. "completed id %d twice on queue %d\n",
  193. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  194. return;
  195. }
  196. if (ctx == CMD_CTX_INVALID) {
  197. dev_warn(&dev->pci_dev->dev,
  198. "invalid id %d completed on queue %d\n",
  199. cqe->command_id, le16_to_cpup(&cqe->sq_id));
  200. return;
  201. }
  202. dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
  203. }
  204. /*
  205. * Called with local interrupts disabled and the q_lock held. May not sleep.
  206. */
  207. static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
  208. nvme_completion_fn *fn)
  209. {
  210. void *ctx;
  211. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  212. if (cmdid >= nvmeq->q_depth) {
  213. *fn = special_completion;
  214. return CMD_CTX_INVALID;
  215. }
  216. *fn = info[cmdid].fn;
  217. ctx = info[cmdid].ctx;
  218. info[cmdid].fn = special_completion;
  219. info[cmdid].ctx = CMD_CTX_COMPLETED;
  220. clear_bit(cmdid, nvmeq->cmdid_data);
  221. wake_up(&nvmeq->sq_full);
  222. return ctx;
  223. }
  224. static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
  225. nvme_completion_fn *fn)
  226. {
  227. void *ctx;
  228. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  229. if (fn)
  230. *fn = info[cmdid].fn;
  231. ctx = info[cmdid].ctx;
  232. info[cmdid].fn = special_completion;
  233. info[cmdid].ctx = CMD_CTX_CANCELLED;
  234. return ctx;
  235. }
  236. static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
  237. {
  238. return dev->queues[get_cpu() + 1];
  239. }
  240. static void put_nvmeq(struct nvme_queue *nvmeq)
  241. {
  242. put_cpu();
  243. }
  244. /**
  245. * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  246. * @nvmeq: The queue to use
  247. * @cmd: The command to send
  248. *
  249. * Safe to use from interrupt context
  250. */
  251. static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
  252. {
  253. unsigned long flags;
  254. u16 tail;
  255. spin_lock_irqsave(&nvmeq->q_lock, flags);
  256. tail = nvmeq->sq_tail;
  257. memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
  258. if (++tail == nvmeq->q_depth)
  259. tail = 0;
  260. writel(tail, nvmeq->q_db);
  261. nvmeq->sq_tail = tail;
  262. spin_unlock_irqrestore(&nvmeq->q_lock, flags);
  263. return 0;
  264. }
  265. /*
  266. * The nvme_iod describes the data in an I/O, including the list of PRP
  267. * entries. You can't see it in this data structure because C doesn't let
  268. * me express that. Use nvme_alloc_iod to ensure there's enough space
  269. * allocated to store the PRP list.
  270. */
  271. struct nvme_iod {
  272. void *private; /* For the use of the submitter of the I/O */
  273. int npages; /* In the PRP list. 0 means small pool in use */
  274. int offset; /* Of PRP list */
  275. int nents; /* Used in scatterlist */
  276. int length; /* Of data, in bytes */
  277. dma_addr_t first_dma;
  278. struct scatterlist sg[0];
  279. };
  280. static __le64 **iod_list(struct nvme_iod *iod)
  281. {
  282. return ((void *)iod) + iod->offset;
  283. }
  284. /*
  285. * Will slightly overestimate the number of pages needed. This is OK
  286. * as it only leads to a small amount of wasted memory for the lifetime of
  287. * the I/O.
  288. */
  289. static int nvme_npages(unsigned size)
  290. {
  291. unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
  292. return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
  293. }
  294. static struct nvme_iod *
  295. nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
  296. {
  297. struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
  298. sizeof(__le64 *) * nvme_npages(nbytes) +
  299. sizeof(struct scatterlist) * nseg, gfp);
  300. if (iod) {
  301. iod->offset = offsetof(struct nvme_iod, sg[nseg]);
  302. iod->npages = -1;
  303. iod->length = nbytes;
  304. }
  305. return iod;
  306. }
  307. static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
  308. {
  309. const int last_prp = PAGE_SIZE / 8 - 1;
  310. int i;
  311. __le64 **list = iod_list(iod);
  312. dma_addr_t prp_dma = iod->first_dma;
  313. if (iod->npages == 0)
  314. dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
  315. for (i = 0; i < iod->npages; i++) {
  316. __le64 *prp_list = list[i];
  317. dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
  318. dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
  319. prp_dma = next_prp_dma;
  320. }
  321. kfree(iod);
  322. }
  323. static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
  324. {
  325. struct nvme_queue *nvmeq = get_nvmeq(dev);
  326. if (bio_list_empty(&nvmeq->sq_cong))
  327. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  328. bio_list_add(&nvmeq->sq_cong, bio);
  329. put_nvmeq(nvmeq);
  330. wake_up_process(nvme_thread);
  331. }
  332. static void bio_completion(struct nvme_dev *dev, void *ctx,
  333. struct nvme_completion *cqe)
  334. {
  335. struct nvme_iod *iod = ctx;
  336. struct bio *bio = iod->private;
  337. u16 status = le16_to_cpup(&cqe->status) >> 1;
  338. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  339. bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  340. nvme_free_iod(dev, iod);
  341. if (status) {
  342. bio_endio(bio, -EIO);
  343. } else if (bio->bi_vcnt > bio->bi_idx) {
  344. requeue_bio(dev, bio);
  345. } else {
  346. bio_endio(bio, 0);
  347. }
  348. }
  349. /* length is in bytes. gfp flags indicates whether we may sleep. */
  350. static int nvme_setup_prps(struct nvme_dev *dev,
  351. struct nvme_common_command *cmd, struct nvme_iod *iod,
  352. int total_len, gfp_t gfp)
  353. {
  354. struct dma_pool *pool;
  355. int length = total_len;
  356. struct scatterlist *sg = iod->sg;
  357. int dma_len = sg_dma_len(sg);
  358. u64 dma_addr = sg_dma_address(sg);
  359. int offset = offset_in_page(dma_addr);
  360. __le64 *prp_list;
  361. __le64 **list = iod_list(iod);
  362. dma_addr_t prp_dma;
  363. int nprps, i;
  364. cmd->prp1 = cpu_to_le64(dma_addr);
  365. length -= (PAGE_SIZE - offset);
  366. if (length <= 0)
  367. return total_len;
  368. dma_len -= (PAGE_SIZE - offset);
  369. if (dma_len) {
  370. dma_addr += (PAGE_SIZE - offset);
  371. } else {
  372. sg = sg_next(sg);
  373. dma_addr = sg_dma_address(sg);
  374. dma_len = sg_dma_len(sg);
  375. }
  376. if (length <= PAGE_SIZE) {
  377. cmd->prp2 = cpu_to_le64(dma_addr);
  378. return total_len;
  379. }
  380. nprps = DIV_ROUND_UP(length, PAGE_SIZE);
  381. if (nprps <= (256 / 8)) {
  382. pool = dev->prp_small_pool;
  383. iod->npages = 0;
  384. } else {
  385. pool = dev->prp_page_pool;
  386. iod->npages = 1;
  387. }
  388. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  389. if (!prp_list) {
  390. cmd->prp2 = cpu_to_le64(dma_addr);
  391. iod->npages = -1;
  392. return (total_len - length) + PAGE_SIZE;
  393. }
  394. list[0] = prp_list;
  395. iod->first_dma = prp_dma;
  396. cmd->prp2 = cpu_to_le64(prp_dma);
  397. i = 0;
  398. for (;;) {
  399. if (i == PAGE_SIZE / 8) {
  400. __le64 *old_prp_list = prp_list;
  401. prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
  402. if (!prp_list)
  403. return total_len - length;
  404. list[iod->npages++] = prp_list;
  405. prp_list[0] = old_prp_list[i - 1];
  406. old_prp_list[i - 1] = cpu_to_le64(prp_dma);
  407. i = 1;
  408. }
  409. prp_list[i++] = cpu_to_le64(dma_addr);
  410. dma_len -= PAGE_SIZE;
  411. dma_addr += PAGE_SIZE;
  412. length -= PAGE_SIZE;
  413. if (length <= 0)
  414. break;
  415. if (dma_len > 0)
  416. continue;
  417. BUG_ON(dma_len < 0);
  418. sg = sg_next(sg);
  419. dma_addr = sg_dma_address(sg);
  420. dma_len = sg_dma_len(sg);
  421. }
  422. return total_len;
  423. }
  424. /* NVMe scatterlists require no holes in the virtual address */
  425. #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
  426. (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
  427. static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
  428. struct bio *bio, enum dma_data_direction dma_dir, int psegs)
  429. {
  430. struct bio_vec *bvec, *bvprv = NULL;
  431. struct scatterlist *sg = NULL;
  432. int i, old_idx, length = 0, nsegs = 0;
  433. sg_init_table(iod->sg, psegs);
  434. old_idx = bio->bi_idx;
  435. bio_for_each_segment(bvec, bio, i) {
  436. if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
  437. sg->length += bvec->bv_len;
  438. } else {
  439. if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
  440. break;
  441. sg = sg ? sg + 1 : iod->sg;
  442. sg_set_page(sg, bvec->bv_page, bvec->bv_len,
  443. bvec->bv_offset);
  444. nsegs++;
  445. }
  446. length += bvec->bv_len;
  447. bvprv = bvec;
  448. }
  449. bio->bi_idx = i;
  450. iod->nents = nsegs;
  451. sg_mark_end(sg);
  452. if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
  453. bio->bi_idx = old_idx;
  454. return -ENOMEM;
  455. }
  456. return length;
  457. }
  458. static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  459. int cmdid)
  460. {
  461. struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  462. memset(cmnd, 0, sizeof(*cmnd));
  463. cmnd->common.opcode = nvme_cmd_flush;
  464. cmnd->common.command_id = cmdid;
  465. cmnd->common.nsid = cpu_to_le32(ns->ns_id);
  466. if (++nvmeq->sq_tail == nvmeq->q_depth)
  467. nvmeq->sq_tail = 0;
  468. writel(nvmeq->sq_tail, nvmeq->q_db);
  469. return 0;
  470. }
  471. static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
  472. {
  473. int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
  474. special_completion, NVME_IO_TIMEOUT);
  475. if (unlikely(cmdid < 0))
  476. return cmdid;
  477. return nvme_submit_flush(nvmeq, ns, cmdid);
  478. }
  479. /*
  480. * Called with local interrupts disabled and the q_lock held. May not sleep.
  481. */
  482. static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
  483. struct bio *bio)
  484. {
  485. struct nvme_command *cmnd;
  486. struct nvme_iod *iod;
  487. enum dma_data_direction dma_dir;
  488. int cmdid, length, result = -ENOMEM;
  489. u16 control;
  490. u32 dsmgmt;
  491. int psegs = bio_phys_segments(ns->queue, bio);
  492. if ((bio->bi_rw & REQ_FLUSH) && psegs) {
  493. result = nvme_submit_flush_data(nvmeq, ns);
  494. if (result)
  495. return result;
  496. }
  497. iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
  498. if (!iod)
  499. goto nomem;
  500. iod->private = bio;
  501. result = -EBUSY;
  502. cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
  503. if (unlikely(cmdid < 0))
  504. goto free_iod;
  505. if ((bio->bi_rw & REQ_FLUSH) && !psegs)
  506. return nvme_submit_flush(nvmeq, ns, cmdid);
  507. control = 0;
  508. if (bio->bi_rw & REQ_FUA)
  509. control |= NVME_RW_FUA;
  510. if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
  511. control |= NVME_RW_LR;
  512. dsmgmt = 0;
  513. if (bio->bi_rw & REQ_RAHEAD)
  514. dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
  515. cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  516. memset(cmnd, 0, sizeof(*cmnd));
  517. if (bio_data_dir(bio)) {
  518. cmnd->rw.opcode = nvme_cmd_write;
  519. dma_dir = DMA_TO_DEVICE;
  520. } else {
  521. cmnd->rw.opcode = nvme_cmd_read;
  522. dma_dir = DMA_FROM_DEVICE;
  523. }
  524. result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
  525. if (result < 0)
  526. goto free_iod;
  527. length = result;
  528. cmnd->rw.command_id = cmdid;
  529. cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
  530. length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
  531. GFP_ATOMIC);
  532. cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
  533. cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
  534. cmnd->rw.control = cpu_to_le16(control);
  535. cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  536. bio->bi_sector += length >> 9;
  537. if (++nvmeq->sq_tail == nvmeq->q_depth)
  538. nvmeq->sq_tail = 0;
  539. writel(nvmeq->sq_tail, nvmeq->q_db);
  540. return 0;
  541. free_iod:
  542. nvme_free_iod(nvmeq->dev, iod);
  543. nomem:
  544. return result;
  545. }
  546. static void nvme_make_request(struct request_queue *q, struct bio *bio)
  547. {
  548. struct nvme_ns *ns = q->queuedata;
  549. struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
  550. int result = -EBUSY;
  551. spin_lock_irq(&nvmeq->q_lock);
  552. if (bio_list_empty(&nvmeq->sq_cong))
  553. result = nvme_submit_bio_queue(nvmeq, ns, bio);
  554. if (unlikely(result)) {
  555. if (bio_list_empty(&nvmeq->sq_cong))
  556. add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
  557. bio_list_add(&nvmeq->sq_cong, bio);
  558. }
  559. spin_unlock_irq(&nvmeq->q_lock);
  560. put_nvmeq(nvmeq);
  561. }
  562. static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
  563. {
  564. u16 head, phase;
  565. head = nvmeq->cq_head;
  566. phase = nvmeq->cq_phase;
  567. for (;;) {
  568. void *ctx;
  569. nvme_completion_fn fn;
  570. struct nvme_completion cqe = nvmeq->cqes[head];
  571. if ((le16_to_cpu(cqe.status) & 1) != phase)
  572. break;
  573. nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
  574. if (++head == nvmeq->q_depth) {
  575. head = 0;
  576. phase = !phase;
  577. }
  578. ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
  579. fn(nvmeq->dev, ctx, &cqe);
  580. }
  581. /* If the controller ignores the cq head doorbell and continuously
  582. * writes to the queue, it is theoretically possible to wrap around
  583. * the queue twice and mistakenly return IRQ_NONE. Linux only
  584. * requires that 0.1% of your interrupts are handled, so this isn't
  585. * a big problem.
  586. */
  587. if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
  588. return IRQ_NONE;
  589. writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
  590. nvmeq->cq_head = head;
  591. nvmeq->cq_phase = phase;
  592. return IRQ_HANDLED;
  593. }
  594. static irqreturn_t nvme_irq(int irq, void *data)
  595. {
  596. irqreturn_t result;
  597. struct nvme_queue *nvmeq = data;
  598. spin_lock(&nvmeq->q_lock);
  599. result = nvme_process_cq(nvmeq);
  600. spin_unlock(&nvmeq->q_lock);
  601. return result;
  602. }
  603. static irqreturn_t nvme_irq_check(int irq, void *data)
  604. {
  605. struct nvme_queue *nvmeq = data;
  606. struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
  607. if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
  608. return IRQ_NONE;
  609. return IRQ_WAKE_THREAD;
  610. }
  611. static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
  612. {
  613. spin_lock_irq(&nvmeq->q_lock);
  614. cancel_cmdid(nvmeq, cmdid, NULL);
  615. spin_unlock_irq(&nvmeq->q_lock);
  616. }
  617. struct sync_cmd_info {
  618. struct task_struct *task;
  619. u32 result;
  620. int status;
  621. };
  622. static void sync_completion(struct nvme_dev *dev, void *ctx,
  623. struct nvme_completion *cqe)
  624. {
  625. struct sync_cmd_info *cmdinfo = ctx;
  626. cmdinfo->result = le32_to_cpup(&cqe->result);
  627. cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
  628. wake_up_process(cmdinfo->task);
  629. }
  630. /*
  631. * Returns 0 on success. If the result is negative, it's a Linux error code;
  632. * if the result is positive, it's an NVM Express status code
  633. */
  634. static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
  635. struct nvme_command *cmd, u32 *result, unsigned timeout)
  636. {
  637. int cmdid;
  638. struct sync_cmd_info cmdinfo;
  639. cmdinfo.task = current;
  640. cmdinfo.status = -EINTR;
  641. cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
  642. timeout);
  643. if (cmdid < 0)
  644. return cmdid;
  645. cmd->common.command_id = cmdid;
  646. set_current_state(TASK_KILLABLE);
  647. nvme_submit_cmd(nvmeq, cmd);
  648. schedule();
  649. if (cmdinfo.status == -EINTR) {
  650. nvme_abort_command(nvmeq, cmdid);
  651. return -EINTR;
  652. }
  653. if (result)
  654. *result = cmdinfo.result;
  655. return cmdinfo.status;
  656. }
  657. static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
  658. u32 *result)
  659. {
  660. return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
  661. }
  662. static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
  663. {
  664. int status;
  665. struct nvme_command c;
  666. memset(&c, 0, sizeof(c));
  667. c.delete_queue.opcode = opcode;
  668. c.delete_queue.qid = cpu_to_le16(id);
  669. status = nvme_submit_admin_cmd(dev, &c, NULL);
  670. if (status)
  671. return -EIO;
  672. return 0;
  673. }
  674. static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
  675. struct nvme_queue *nvmeq)
  676. {
  677. int status;
  678. struct nvme_command c;
  679. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
  680. memset(&c, 0, sizeof(c));
  681. c.create_cq.opcode = nvme_admin_create_cq;
  682. c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
  683. c.create_cq.cqid = cpu_to_le16(qid);
  684. c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  685. c.create_cq.cq_flags = cpu_to_le16(flags);
  686. c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
  687. status = nvme_submit_admin_cmd(dev, &c, NULL);
  688. if (status)
  689. return -EIO;
  690. return 0;
  691. }
  692. static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
  693. struct nvme_queue *nvmeq)
  694. {
  695. int status;
  696. struct nvme_command c;
  697. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
  698. memset(&c, 0, sizeof(c));
  699. c.create_sq.opcode = nvme_admin_create_sq;
  700. c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
  701. c.create_sq.sqid = cpu_to_le16(qid);
  702. c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  703. c.create_sq.sq_flags = cpu_to_le16(flags);
  704. c.create_sq.cqid = cpu_to_le16(qid);
  705. status = nvme_submit_admin_cmd(dev, &c, NULL);
  706. if (status)
  707. return -EIO;
  708. return 0;
  709. }
  710. static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
  711. {
  712. return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
  713. }
  714. static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
  715. {
  716. return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
  717. }
  718. static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
  719. dma_addr_t dma_addr)
  720. {
  721. struct nvme_command c;
  722. memset(&c, 0, sizeof(c));
  723. c.identify.opcode = nvme_admin_identify;
  724. c.identify.nsid = cpu_to_le32(nsid);
  725. c.identify.prp1 = cpu_to_le64(dma_addr);
  726. c.identify.cns = cpu_to_le32(cns);
  727. return nvme_submit_admin_cmd(dev, &c, NULL);
  728. }
  729. static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
  730. unsigned nsid, dma_addr_t dma_addr)
  731. {
  732. struct nvme_command c;
  733. memset(&c, 0, sizeof(c));
  734. c.features.opcode = nvme_admin_get_features;
  735. c.features.nsid = cpu_to_le32(nsid);
  736. c.features.prp1 = cpu_to_le64(dma_addr);
  737. c.features.fid = cpu_to_le32(fid);
  738. return nvme_submit_admin_cmd(dev, &c, NULL);
  739. }
  740. static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
  741. unsigned dword11, dma_addr_t dma_addr, u32 *result)
  742. {
  743. struct nvme_command c;
  744. memset(&c, 0, sizeof(c));
  745. c.features.opcode = nvme_admin_set_features;
  746. c.features.prp1 = cpu_to_le64(dma_addr);
  747. c.features.fid = cpu_to_le32(fid);
  748. c.features.dword11 = cpu_to_le32(dword11);
  749. return nvme_submit_admin_cmd(dev, &c, result);
  750. }
  751. /**
  752. * nvme_cancel_ios - Cancel outstanding I/Os
  753. * @queue: The queue to cancel I/Os on
  754. * @timeout: True to only cancel I/Os which have timed out
  755. */
  756. static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
  757. {
  758. int depth = nvmeq->q_depth - 1;
  759. struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
  760. unsigned long now = jiffies;
  761. int cmdid;
  762. for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
  763. void *ctx;
  764. nvme_completion_fn fn;
  765. static struct nvme_completion cqe = {
  766. .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
  767. };
  768. if (timeout && !time_after(now, info[cmdid].timeout))
  769. continue;
  770. dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
  771. ctx = cancel_cmdid(nvmeq, cmdid, &fn);
  772. fn(nvmeq->dev, ctx, &cqe);
  773. }
  774. }
  775. static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
  776. {
  777. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  778. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  779. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  780. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  781. kfree(nvmeq);
  782. }
  783. static void nvme_free_queue(struct nvme_dev *dev, int qid)
  784. {
  785. struct nvme_queue *nvmeq = dev->queues[qid];
  786. int vector = dev->entry[nvmeq->cq_vector].vector;
  787. spin_lock_irq(&nvmeq->q_lock);
  788. nvme_cancel_ios(nvmeq, false);
  789. spin_unlock_irq(&nvmeq->q_lock);
  790. irq_set_affinity_hint(vector, NULL);
  791. free_irq(vector, nvmeq);
  792. /* Don't tell the adapter to delete the admin queue */
  793. if (qid) {
  794. adapter_delete_sq(dev, qid);
  795. adapter_delete_cq(dev, qid);
  796. }
  797. nvme_free_queue_mem(nvmeq);
  798. }
  799. static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
  800. int depth, int vector)
  801. {
  802. struct device *dmadev = &dev->pci_dev->dev;
  803. unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
  804. sizeof(struct nvme_cmd_info));
  805. struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
  806. if (!nvmeq)
  807. return NULL;
  808. nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
  809. &nvmeq->cq_dma_addr, GFP_KERNEL);
  810. if (!nvmeq->cqes)
  811. goto free_nvmeq;
  812. memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
  813. nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
  814. &nvmeq->sq_dma_addr, GFP_KERNEL);
  815. if (!nvmeq->sq_cmds)
  816. goto free_cqdma;
  817. nvmeq->q_dmadev = dmadev;
  818. nvmeq->dev = dev;
  819. spin_lock_init(&nvmeq->q_lock);
  820. nvmeq->cq_head = 0;
  821. nvmeq->cq_phase = 1;
  822. init_waitqueue_head(&nvmeq->sq_full);
  823. init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
  824. bio_list_init(&nvmeq->sq_cong);
  825. nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
  826. nvmeq->q_depth = depth;
  827. nvmeq->cq_vector = vector;
  828. return nvmeq;
  829. free_cqdma:
  830. dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
  831. nvmeq->cq_dma_addr);
  832. free_nvmeq:
  833. kfree(nvmeq);
  834. return NULL;
  835. }
  836. static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
  837. const char *name)
  838. {
  839. if (use_threaded_interrupts)
  840. return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
  841. nvme_irq_check, nvme_irq,
  842. IRQF_DISABLED | IRQF_SHARED,
  843. name, nvmeq);
  844. return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
  845. IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
  846. }
  847. static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
  848. int cq_size, int vector)
  849. {
  850. int result;
  851. struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
  852. if (!nvmeq)
  853. return ERR_PTR(-ENOMEM);
  854. result = adapter_alloc_cq(dev, qid, nvmeq);
  855. if (result < 0)
  856. goto free_nvmeq;
  857. result = adapter_alloc_sq(dev, qid, nvmeq);
  858. if (result < 0)
  859. goto release_cq;
  860. result = queue_request_irq(dev, nvmeq, "nvme");
  861. if (result < 0)
  862. goto release_sq;
  863. return nvmeq;
  864. release_sq:
  865. adapter_delete_sq(dev, qid);
  866. release_cq:
  867. adapter_delete_cq(dev, qid);
  868. free_nvmeq:
  869. dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
  870. (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
  871. dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
  872. nvmeq->sq_cmds, nvmeq->sq_dma_addr);
  873. kfree(nvmeq);
  874. return ERR_PTR(result);
  875. }
  876. static int nvme_configure_admin_queue(struct nvme_dev *dev)
  877. {
  878. int result = 0;
  879. u32 aqa;
  880. u64 cap;
  881. unsigned long timeout;
  882. struct nvme_queue *nvmeq;
  883. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  884. nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
  885. if (!nvmeq)
  886. return -ENOMEM;
  887. aqa = nvmeq->q_depth - 1;
  888. aqa |= aqa << 16;
  889. dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
  890. dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
  891. dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
  892. dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
  893. writel(0, &dev->bar->cc);
  894. writel(aqa, &dev->bar->aqa);
  895. writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
  896. writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
  897. writel(dev->ctrl_config, &dev->bar->cc);
  898. cap = readq(&dev->bar->cap);
  899. timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
  900. dev->db_stride = NVME_CAP_STRIDE(cap);
  901. while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
  902. msleep(100);
  903. if (fatal_signal_pending(current))
  904. result = -EINTR;
  905. if (time_after(jiffies, timeout)) {
  906. dev_err(&dev->pci_dev->dev,
  907. "Device not ready; aborting initialisation\n");
  908. result = -ENODEV;
  909. }
  910. }
  911. if (result) {
  912. nvme_free_queue_mem(nvmeq);
  913. return result;
  914. }
  915. result = queue_request_irq(dev, nvmeq, "nvme admin");
  916. dev->queues[0] = nvmeq;
  917. return result;
  918. }
  919. static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
  920. unsigned long addr, unsigned length)
  921. {
  922. int i, err, count, nents, offset;
  923. struct scatterlist *sg;
  924. struct page **pages;
  925. struct nvme_iod *iod;
  926. if (addr & 3)
  927. return ERR_PTR(-EINVAL);
  928. if (!length)
  929. return ERR_PTR(-EINVAL);
  930. offset = offset_in_page(addr);
  931. count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
  932. pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
  933. if (!pages)
  934. return ERR_PTR(-ENOMEM);
  935. err = get_user_pages_fast(addr, count, 1, pages);
  936. if (err < count) {
  937. count = err;
  938. err = -EFAULT;
  939. goto put_pages;
  940. }
  941. iod = nvme_alloc_iod(count, length, GFP_KERNEL);
  942. sg = iod->sg;
  943. sg_init_table(sg, count);
  944. for (i = 0; i < count; i++) {
  945. sg_set_page(&sg[i], pages[i],
  946. min_t(int, length, PAGE_SIZE - offset), offset);
  947. length -= (PAGE_SIZE - offset);
  948. offset = 0;
  949. }
  950. sg_mark_end(&sg[i - 1]);
  951. iod->nents = count;
  952. err = -ENOMEM;
  953. nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
  954. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  955. if (!nents)
  956. goto free_iod;
  957. kfree(pages);
  958. return iod;
  959. free_iod:
  960. kfree(iod);
  961. put_pages:
  962. for (i = 0; i < count; i++)
  963. put_page(pages[i]);
  964. kfree(pages);
  965. return ERR_PTR(err);
  966. }
  967. static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
  968. struct nvme_iod *iod)
  969. {
  970. int i;
  971. dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
  972. write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  973. for (i = 0; i < iod->nents; i++)
  974. put_page(sg_page(&iod->sg[i]));
  975. }
  976. static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  977. {
  978. struct nvme_dev *dev = ns->dev;
  979. struct nvme_queue *nvmeq;
  980. struct nvme_user_io io;
  981. struct nvme_command c;
  982. unsigned length;
  983. int status;
  984. struct nvme_iod *iod;
  985. if (copy_from_user(&io, uio, sizeof(io)))
  986. return -EFAULT;
  987. length = (io.nblocks + 1) << ns->lba_shift;
  988. switch (io.opcode) {
  989. case nvme_cmd_write:
  990. case nvme_cmd_read:
  991. case nvme_cmd_compare:
  992. iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
  993. break;
  994. default:
  995. return -EINVAL;
  996. }
  997. if (IS_ERR(iod))
  998. return PTR_ERR(iod);
  999. memset(&c, 0, sizeof(c));
  1000. c.rw.opcode = io.opcode;
  1001. c.rw.flags = io.flags;
  1002. c.rw.nsid = cpu_to_le32(ns->ns_id);
  1003. c.rw.slba = cpu_to_le64(io.slba);
  1004. c.rw.length = cpu_to_le16(io.nblocks);
  1005. c.rw.control = cpu_to_le16(io.control);
  1006. c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
  1007. c.rw.reftag = io.reftag;
  1008. c.rw.apptag = io.apptag;
  1009. c.rw.appmask = io.appmask;
  1010. /* XXX: metadata */
  1011. length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
  1012. nvmeq = get_nvmeq(dev);
  1013. /*
  1014. * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
  1015. * disabled. We may be preempted at any point, and be rescheduled
  1016. * to a different CPU. That will cause cacheline bouncing, but no
  1017. * additional races since q_lock already protects against other CPUs.
  1018. */
  1019. put_nvmeq(nvmeq);
  1020. if (length != (io.nblocks + 1) << ns->lba_shift)
  1021. status = -ENOMEM;
  1022. else
  1023. status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
  1024. nvme_unmap_user_pages(dev, io.opcode & 1, iod);
  1025. nvme_free_iod(dev, iod);
  1026. return status;
  1027. }
  1028. static int nvme_user_admin_cmd(struct nvme_dev *dev,
  1029. struct nvme_admin_cmd __user *ucmd)
  1030. {
  1031. struct nvme_admin_cmd cmd;
  1032. struct nvme_command c;
  1033. int status, length;
  1034. struct nvme_iod *uninitialized_var(iod);
  1035. if (!capable(CAP_SYS_ADMIN))
  1036. return -EACCES;
  1037. if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
  1038. return -EFAULT;
  1039. memset(&c, 0, sizeof(c));
  1040. c.common.opcode = cmd.opcode;
  1041. c.common.flags = cmd.flags;
  1042. c.common.nsid = cpu_to_le32(cmd.nsid);
  1043. c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
  1044. c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
  1045. c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
  1046. c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
  1047. c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
  1048. c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
  1049. c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
  1050. c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
  1051. length = cmd.data_len;
  1052. if (cmd.data_len) {
  1053. iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
  1054. length);
  1055. if (IS_ERR(iod))
  1056. return PTR_ERR(iod);
  1057. length = nvme_setup_prps(dev, &c.common, iod, length,
  1058. GFP_KERNEL);
  1059. }
  1060. if (length != cmd.data_len)
  1061. status = -ENOMEM;
  1062. else
  1063. status = nvme_submit_admin_cmd(dev, &c, NULL);
  1064. if (cmd.data_len) {
  1065. nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
  1066. nvme_free_iod(dev, iod);
  1067. }
  1068. return status;
  1069. }
  1070. static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
  1071. unsigned long arg)
  1072. {
  1073. struct nvme_ns *ns = bdev->bd_disk->private_data;
  1074. switch (cmd) {
  1075. case NVME_IOCTL_ID:
  1076. return ns->ns_id;
  1077. case NVME_IOCTL_ADMIN_CMD:
  1078. return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
  1079. case NVME_IOCTL_SUBMIT_IO:
  1080. return nvme_submit_io(ns, (void __user *)arg);
  1081. default:
  1082. return -ENOTTY;
  1083. }
  1084. }
  1085. static const struct block_device_operations nvme_fops = {
  1086. .owner = THIS_MODULE,
  1087. .ioctl = nvme_ioctl,
  1088. .compat_ioctl = nvme_ioctl,
  1089. };
  1090. static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
  1091. {
  1092. while (bio_list_peek(&nvmeq->sq_cong)) {
  1093. struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
  1094. struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
  1095. if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
  1096. bio_list_add_head(&nvmeq->sq_cong, bio);
  1097. break;
  1098. }
  1099. if (bio_list_empty(&nvmeq->sq_cong))
  1100. remove_wait_queue(&nvmeq->sq_full,
  1101. &nvmeq->sq_cong_wait);
  1102. }
  1103. }
  1104. static int nvme_kthread(void *data)
  1105. {
  1106. struct nvme_dev *dev;
  1107. while (!kthread_should_stop()) {
  1108. __set_current_state(TASK_RUNNING);
  1109. spin_lock(&dev_list_lock);
  1110. list_for_each_entry(dev, &dev_list, node) {
  1111. int i;
  1112. for (i = 0; i < dev->queue_count; i++) {
  1113. struct nvme_queue *nvmeq = dev->queues[i];
  1114. if (!nvmeq)
  1115. continue;
  1116. spin_lock_irq(&nvmeq->q_lock);
  1117. if (nvme_process_cq(nvmeq))
  1118. printk("process_cq did something\n");
  1119. nvme_cancel_ios(nvmeq, true);
  1120. nvme_resubmit_bios(nvmeq);
  1121. spin_unlock_irq(&nvmeq->q_lock);
  1122. }
  1123. }
  1124. spin_unlock(&dev_list_lock);
  1125. set_current_state(TASK_INTERRUPTIBLE);
  1126. schedule_timeout(HZ);
  1127. }
  1128. return 0;
  1129. }
  1130. static DEFINE_IDA(nvme_index_ida);
  1131. static int nvme_get_ns_idx(void)
  1132. {
  1133. int index, error;
  1134. do {
  1135. if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
  1136. return -1;
  1137. spin_lock(&dev_list_lock);
  1138. error = ida_get_new(&nvme_index_ida, &index);
  1139. spin_unlock(&dev_list_lock);
  1140. } while (error == -EAGAIN);
  1141. if (error)
  1142. index = -1;
  1143. return index;
  1144. }
  1145. static void nvme_put_ns_idx(int index)
  1146. {
  1147. spin_lock(&dev_list_lock);
  1148. ida_remove(&nvme_index_ida, index);
  1149. spin_unlock(&dev_list_lock);
  1150. }
  1151. static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
  1152. struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
  1153. {
  1154. struct nvme_ns *ns;
  1155. struct gendisk *disk;
  1156. int lbaf;
  1157. if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
  1158. return NULL;
  1159. ns = kzalloc(sizeof(*ns), GFP_KERNEL);
  1160. if (!ns)
  1161. return NULL;
  1162. ns->queue = blk_alloc_queue(GFP_KERNEL);
  1163. if (!ns->queue)
  1164. goto out_free_ns;
  1165. ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
  1166. queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
  1167. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
  1168. /* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
  1169. blk_queue_make_request(ns->queue, nvme_make_request);
  1170. ns->dev = dev;
  1171. ns->queue->queuedata = ns;
  1172. disk = alloc_disk(NVME_MINORS);
  1173. if (!disk)
  1174. goto out_free_queue;
  1175. ns->ns_id = nsid;
  1176. ns->disk = disk;
  1177. lbaf = id->flbas & 0xf;
  1178. ns->lba_shift = id->lbaf[lbaf].ds;
  1179. blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
  1180. if (dev->max_hw_sectors)
  1181. blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
  1182. disk->major = nvme_major;
  1183. disk->minors = NVME_MINORS;
  1184. disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
  1185. disk->fops = &nvme_fops;
  1186. disk->private_data = ns;
  1187. disk->queue = ns->queue;
  1188. disk->driverfs_dev = &dev->pci_dev->dev;
  1189. sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
  1190. set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
  1191. return ns;
  1192. out_free_queue:
  1193. blk_cleanup_queue(ns->queue);
  1194. out_free_ns:
  1195. kfree(ns);
  1196. return NULL;
  1197. }
  1198. static void nvme_ns_free(struct nvme_ns *ns)
  1199. {
  1200. int index = ns->disk->first_minor / NVME_MINORS;
  1201. put_disk(ns->disk);
  1202. nvme_put_ns_idx(index);
  1203. blk_cleanup_queue(ns->queue);
  1204. kfree(ns);
  1205. }
  1206. static int set_queue_count(struct nvme_dev *dev, int count)
  1207. {
  1208. int status;
  1209. u32 result;
  1210. u32 q_count = (count - 1) | ((count - 1) << 16);
  1211. status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
  1212. &result);
  1213. if (status)
  1214. return -EIO;
  1215. return min(result & 0xffff, result >> 16) + 1;
  1216. }
  1217. static int nvme_setup_io_queues(struct nvme_dev *dev)
  1218. {
  1219. int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
  1220. nr_io_queues = num_online_cpus();
  1221. result = set_queue_count(dev, nr_io_queues);
  1222. if (result < 0)
  1223. return result;
  1224. if (result < nr_io_queues)
  1225. nr_io_queues = result;
  1226. /* Deregister the admin queue's interrupt */
  1227. free_irq(dev->entry[0].vector, dev->queues[0]);
  1228. db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
  1229. if (db_bar_size > 8192) {
  1230. iounmap(dev->bar);
  1231. dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
  1232. db_bar_size);
  1233. dev->dbs = ((void __iomem *)dev->bar) + 4096;
  1234. dev->queues[0]->q_db = dev->dbs;
  1235. }
  1236. for (i = 0; i < nr_io_queues; i++)
  1237. dev->entry[i].entry = i;
  1238. for (;;) {
  1239. result = pci_enable_msix(dev->pci_dev, dev->entry,
  1240. nr_io_queues);
  1241. if (result == 0) {
  1242. break;
  1243. } else if (result > 0) {
  1244. nr_io_queues = result;
  1245. continue;
  1246. } else {
  1247. nr_io_queues = 1;
  1248. break;
  1249. }
  1250. }
  1251. result = queue_request_irq(dev, dev->queues[0], "nvme admin");
  1252. /* XXX: handle failure here */
  1253. cpu = cpumask_first(cpu_online_mask);
  1254. for (i = 0; i < nr_io_queues; i++) {
  1255. irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
  1256. cpu = cpumask_next(cpu, cpu_online_mask);
  1257. }
  1258. q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
  1259. NVME_Q_DEPTH);
  1260. for (i = 0; i < nr_io_queues; i++) {
  1261. dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
  1262. if (IS_ERR(dev->queues[i + 1]))
  1263. return PTR_ERR(dev->queues[i + 1]);
  1264. dev->queue_count++;
  1265. }
  1266. for (; i < num_possible_cpus(); i++) {
  1267. int target = i % rounddown_pow_of_two(dev->queue_count - 1);
  1268. dev->queues[i + 1] = dev->queues[target + 1];
  1269. }
  1270. return 0;
  1271. }
  1272. static void nvme_free_queues(struct nvme_dev *dev)
  1273. {
  1274. int i;
  1275. for (i = dev->queue_count - 1; i >= 0; i--)
  1276. nvme_free_queue(dev, i);
  1277. }
  1278. static int nvme_dev_add(struct nvme_dev *dev)
  1279. {
  1280. int res, nn, i;
  1281. struct nvme_ns *ns, *next;
  1282. struct nvme_id_ctrl *ctrl;
  1283. struct nvme_id_ns *id_ns;
  1284. void *mem;
  1285. dma_addr_t dma_addr;
  1286. res = nvme_setup_io_queues(dev);
  1287. if (res)
  1288. return res;
  1289. mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
  1290. GFP_KERNEL);
  1291. res = nvme_identify(dev, 0, 1, dma_addr);
  1292. if (res) {
  1293. res = -EIO;
  1294. goto out_free;
  1295. }
  1296. ctrl = mem;
  1297. nn = le32_to_cpup(&ctrl->nn);
  1298. memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
  1299. memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
  1300. memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
  1301. if (ctrl->mdts) {
  1302. int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
  1303. dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
  1304. }
  1305. id_ns = mem;
  1306. for (i = 1; i <= nn; i++) {
  1307. res = nvme_identify(dev, i, 0, dma_addr);
  1308. if (res)
  1309. continue;
  1310. if (id_ns->ncap == 0)
  1311. continue;
  1312. res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
  1313. dma_addr + 4096);
  1314. if (res)
  1315. continue;
  1316. ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
  1317. if (ns)
  1318. list_add_tail(&ns->list, &dev->namespaces);
  1319. }
  1320. list_for_each_entry(ns, &dev->namespaces, list)
  1321. add_disk(ns->disk);
  1322. goto out;
  1323. out_free:
  1324. list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
  1325. list_del(&ns->list);
  1326. nvme_ns_free(ns);
  1327. }
  1328. out:
  1329. dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
  1330. return res;
  1331. }
  1332. static int nvme_dev_remove(struct nvme_dev *dev)
  1333. {
  1334. struct nvme_ns *ns, *next;
  1335. spin_lock(&dev_list_lock);
  1336. list_del(&dev->node);
  1337. spin_unlock(&dev_list_lock);
  1338. list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
  1339. list_del(&ns->list);
  1340. del_gendisk(ns->disk);
  1341. nvme_ns_free(ns);
  1342. }
  1343. nvme_free_queues(dev);
  1344. return 0;
  1345. }
  1346. static int nvme_setup_prp_pools(struct nvme_dev *dev)
  1347. {
  1348. struct device *dmadev = &dev->pci_dev->dev;
  1349. dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
  1350. PAGE_SIZE, PAGE_SIZE, 0);
  1351. if (!dev->prp_page_pool)
  1352. return -ENOMEM;
  1353. /* Optimisation for I/Os between 4k and 128k */
  1354. dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
  1355. 256, 256, 0);
  1356. if (!dev->prp_small_pool) {
  1357. dma_pool_destroy(dev->prp_page_pool);
  1358. return -ENOMEM;
  1359. }
  1360. return 0;
  1361. }
  1362. static void nvme_release_prp_pools(struct nvme_dev *dev)
  1363. {
  1364. dma_pool_destroy(dev->prp_page_pool);
  1365. dma_pool_destroy(dev->prp_small_pool);
  1366. }
  1367. static DEFINE_IDA(nvme_instance_ida);
  1368. static int nvme_set_instance(struct nvme_dev *dev)
  1369. {
  1370. int instance, error;
  1371. do {
  1372. if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
  1373. return -ENODEV;
  1374. spin_lock(&dev_list_lock);
  1375. error = ida_get_new(&nvme_instance_ida, &instance);
  1376. spin_unlock(&dev_list_lock);
  1377. } while (error == -EAGAIN);
  1378. if (error)
  1379. return -ENODEV;
  1380. dev->instance = instance;
  1381. return 0;
  1382. }
  1383. static void nvme_release_instance(struct nvme_dev *dev)
  1384. {
  1385. spin_lock(&dev_list_lock);
  1386. ida_remove(&nvme_instance_ida, dev->instance);
  1387. spin_unlock(&dev_list_lock);
  1388. }
  1389. static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1390. {
  1391. int bars, result = -ENOMEM;
  1392. struct nvme_dev *dev;
  1393. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1394. if (!dev)
  1395. return -ENOMEM;
  1396. dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
  1397. GFP_KERNEL);
  1398. if (!dev->entry)
  1399. goto free;
  1400. dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
  1401. GFP_KERNEL);
  1402. if (!dev->queues)
  1403. goto free;
  1404. if (pci_enable_device_mem(pdev))
  1405. goto free;
  1406. pci_set_master(pdev);
  1407. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1408. if (pci_request_selected_regions(pdev, bars, "nvme"))
  1409. goto disable;
  1410. INIT_LIST_HEAD(&dev->namespaces);
  1411. dev->pci_dev = pdev;
  1412. pci_set_drvdata(pdev, dev);
  1413. dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1414. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  1415. result = nvme_set_instance(dev);
  1416. if (result)
  1417. goto disable;
  1418. dev->entry[0].vector = pdev->irq;
  1419. result = nvme_setup_prp_pools(dev);
  1420. if (result)
  1421. goto disable_msix;
  1422. dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
  1423. if (!dev->bar) {
  1424. result = -ENOMEM;
  1425. goto disable_msix;
  1426. }
  1427. result = nvme_configure_admin_queue(dev);
  1428. if (result)
  1429. goto unmap;
  1430. dev->queue_count++;
  1431. spin_lock(&dev_list_lock);
  1432. list_add(&dev->node, &dev_list);
  1433. spin_unlock(&dev_list_lock);
  1434. result = nvme_dev_add(dev);
  1435. if (result)
  1436. goto delete;
  1437. return 0;
  1438. delete:
  1439. spin_lock(&dev_list_lock);
  1440. list_del(&dev->node);
  1441. spin_unlock(&dev_list_lock);
  1442. nvme_free_queues(dev);
  1443. unmap:
  1444. iounmap(dev->bar);
  1445. disable_msix:
  1446. pci_disable_msix(pdev);
  1447. nvme_release_instance(dev);
  1448. nvme_release_prp_pools(dev);
  1449. disable:
  1450. pci_disable_device(pdev);
  1451. pci_release_regions(pdev);
  1452. free:
  1453. kfree(dev->queues);
  1454. kfree(dev->entry);
  1455. kfree(dev);
  1456. return result;
  1457. }
  1458. static void nvme_remove(struct pci_dev *pdev)
  1459. {
  1460. struct nvme_dev *dev = pci_get_drvdata(pdev);
  1461. nvme_dev_remove(dev);
  1462. pci_disable_msix(pdev);
  1463. iounmap(dev->bar);
  1464. nvme_release_instance(dev);
  1465. nvme_release_prp_pools(dev);
  1466. pci_disable_device(pdev);
  1467. pci_release_regions(pdev);
  1468. kfree(dev->queues);
  1469. kfree(dev->entry);
  1470. kfree(dev);
  1471. }
  1472. /* These functions are yet to be implemented */
  1473. #define nvme_error_detected NULL
  1474. #define nvme_dump_registers NULL
  1475. #define nvme_link_reset NULL
  1476. #define nvme_slot_reset NULL
  1477. #define nvme_error_resume NULL
  1478. #define nvme_suspend NULL
  1479. #define nvme_resume NULL
  1480. static const struct pci_error_handlers nvme_err_handler = {
  1481. .error_detected = nvme_error_detected,
  1482. .mmio_enabled = nvme_dump_registers,
  1483. .link_reset = nvme_link_reset,
  1484. .slot_reset = nvme_slot_reset,
  1485. .resume = nvme_error_resume,
  1486. };
  1487. /* Move to pci_ids.h later */
  1488. #define PCI_CLASS_STORAGE_EXPRESS 0x010802
  1489. static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
  1490. { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
  1491. { 0, }
  1492. };
  1493. MODULE_DEVICE_TABLE(pci, nvme_id_table);
  1494. static struct pci_driver nvme_driver = {
  1495. .name = "nvme",
  1496. .id_table = nvme_id_table,
  1497. .probe = nvme_probe,
  1498. .remove = nvme_remove,
  1499. .suspend = nvme_suspend,
  1500. .resume = nvme_resume,
  1501. .err_handler = &nvme_err_handler,
  1502. };
  1503. static int __init nvme_init(void)
  1504. {
  1505. int result;
  1506. nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
  1507. if (IS_ERR(nvme_thread))
  1508. return PTR_ERR(nvme_thread);
  1509. result = register_blkdev(nvme_major, "nvme");
  1510. if (result < 0)
  1511. goto kill_kthread;
  1512. else if (result > 0)
  1513. nvme_major = result;
  1514. result = pci_register_driver(&nvme_driver);
  1515. if (result)
  1516. goto unregister_blkdev;
  1517. return 0;
  1518. unregister_blkdev:
  1519. unregister_blkdev(nvme_major, "nvme");
  1520. kill_kthread:
  1521. kthread_stop(nvme_thread);
  1522. return result;
  1523. }
  1524. static void __exit nvme_exit(void)
  1525. {
  1526. pci_unregister_driver(&nvme_driver);
  1527. unregister_blkdev(nvme_major, "nvme");
  1528. kthread_stop(nvme_thread);
  1529. }
  1530. MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
  1531. MODULE_LICENSE("GPL");
  1532. MODULE_VERSION("0.8");
  1533. module_init(nvme_init);
  1534. module_exit(nvme_exit);