nbd.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /*
  2. * Network block device - make block devices work over TCP
  3. *
  4. * Note that you can not swap over this thing, yet. Seems to work but
  5. * deadlocks sometimes - you can not swap over TCP in general.
  6. *
  7. * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
  8. * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
  9. *
  10. * This file is released under GPLv2 or later.
  11. *
  12. * (part of code stolen from loop.c)
  13. */
  14. #include <linux/major.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/sched.h>
  19. #include <linux/fs.h>
  20. #include <linux/bio.h>
  21. #include <linux/stat.h>
  22. #include <linux/errno.h>
  23. #include <linux/file.h>
  24. #include <linux/ioctl.h>
  25. #include <linux/mutex.h>
  26. #include <linux/compiler.h>
  27. #include <linux/err.h>
  28. #include <linux/kernel.h>
  29. #include <linux/slab.h>
  30. #include <net/sock.h>
  31. #include <linux/net.h>
  32. #include <linux/kthread.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/system.h>
  35. #include <asm/types.h>
  36. #include <linux/nbd.h>
  37. #define LO_MAGIC 0x68797548
  38. #ifdef NDEBUG
  39. #define dprintk(flags, fmt...)
  40. #else /* NDEBUG */
  41. #define dprintk(flags, fmt...) do { \
  42. if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
  43. } while (0)
  44. #define DBG_IOCTL 0x0004
  45. #define DBG_INIT 0x0010
  46. #define DBG_EXIT 0x0020
  47. #define DBG_BLKDEV 0x0100
  48. #define DBG_RX 0x0200
  49. #define DBG_TX 0x0400
  50. static unsigned int debugflags;
  51. #endif /* NDEBUG */
  52. static unsigned int nbds_max = 16;
  53. static struct nbd_device *nbd_dev;
  54. static int max_part;
  55. /*
  56. * Use just one lock (or at most 1 per NIC). Two arguments for this:
  57. * 1. Each NIC is essentially a synchronization point for all servers
  58. * accessed through that NIC so there's no need to have more locks
  59. * than NICs anyway.
  60. * 2. More locks lead to more "Dirty cache line bouncing" which will slow
  61. * down each lock to the point where they're actually slower than just
  62. * a single lock.
  63. * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
  64. */
  65. static DEFINE_SPINLOCK(nbd_lock);
  66. #ifndef NDEBUG
  67. static const char *ioctl_cmd_to_ascii(int cmd)
  68. {
  69. switch (cmd) {
  70. case NBD_SET_SOCK: return "set-sock";
  71. case NBD_SET_BLKSIZE: return "set-blksize";
  72. case NBD_SET_SIZE: return "set-size";
  73. case NBD_DO_IT: return "do-it";
  74. case NBD_CLEAR_SOCK: return "clear-sock";
  75. case NBD_CLEAR_QUE: return "clear-que";
  76. case NBD_PRINT_DEBUG: return "print-debug";
  77. case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
  78. case NBD_DISCONNECT: return "disconnect";
  79. case BLKROSET: return "set-read-only";
  80. case BLKFLSBUF: return "flush-buffer-cache";
  81. }
  82. return "unknown";
  83. }
  84. static const char *nbdcmd_to_ascii(int cmd)
  85. {
  86. switch (cmd) {
  87. case NBD_CMD_READ: return "read";
  88. case NBD_CMD_WRITE: return "write";
  89. case NBD_CMD_DISC: return "disconnect";
  90. }
  91. return "invalid";
  92. }
  93. #endif /* NDEBUG */
  94. static void nbd_end_request(struct request *req)
  95. {
  96. int error = req->errors ? -EIO : 0;
  97. struct request_queue *q = req->q;
  98. unsigned long flags;
  99. dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
  100. req, error ? "failed" : "done");
  101. spin_lock_irqsave(q->queue_lock, flags);
  102. __blk_end_request_all(req, error);
  103. spin_unlock_irqrestore(q->queue_lock, flags);
  104. }
  105. static void sock_shutdown(struct nbd_device *lo, int lock)
  106. {
  107. /* Forcibly shutdown the socket causing all listeners
  108. * to error
  109. *
  110. * FIXME: This code is duplicated from sys_shutdown, but
  111. * there should be a more generic interface rather than
  112. * calling socket ops directly here */
  113. if (lock)
  114. mutex_lock(&lo->tx_lock);
  115. if (lo->sock) {
  116. dev_warn(disk_to_dev(lo->disk), "shutting down socket\n");
  117. kernel_sock_shutdown(lo->sock, SHUT_RDWR);
  118. lo->sock = NULL;
  119. }
  120. if (lock)
  121. mutex_unlock(&lo->tx_lock);
  122. }
  123. static void nbd_xmit_timeout(unsigned long arg)
  124. {
  125. struct task_struct *task = (struct task_struct *)arg;
  126. printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
  127. task->comm, task->pid);
  128. force_sig(SIGKILL, task);
  129. }
  130. /*
  131. * Send or receive packet.
  132. */
  133. static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
  134. int msg_flags)
  135. {
  136. struct socket *sock = lo->sock;
  137. int result;
  138. struct msghdr msg;
  139. struct kvec iov;
  140. sigset_t blocked, oldset;
  141. if (unlikely(!sock)) {
  142. dev_err(disk_to_dev(lo->disk),
  143. "Attempted %s on closed socket in sock_xmit\n",
  144. (send ? "send" : "recv"));
  145. return -EINVAL;
  146. }
  147. /* Allow interception of SIGKILL only
  148. * Don't allow other signals to interrupt the transmission */
  149. siginitsetinv(&blocked, sigmask(SIGKILL));
  150. sigprocmask(SIG_SETMASK, &blocked, &oldset);
  151. do {
  152. sock->sk->sk_allocation = GFP_NOIO;
  153. iov.iov_base = buf;
  154. iov.iov_len = size;
  155. msg.msg_name = NULL;
  156. msg.msg_namelen = 0;
  157. msg.msg_control = NULL;
  158. msg.msg_controllen = 0;
  159. msg.msg_flags = msg_flags | MSG_NOSIGNAL;
  160. if (send) {
  161. struct timer_list ti;
  162. if (lo->xmit_timeout) {
  163. init_timer(&ti);
  164. ti.function = nbd_xmit_timeout;
  165. ti.data = (unsigned long)current;
  166. ti.expires = jiffies + lo->xmit_timeout;
  167. add_timer(&ti);
  168. }
  169. result = kernel_sendmsg(sock, &msg, &iov, 1, size);
  170. if (lo->xmit_timeout)
  171. del_timer_sync(&ti);
  172. } else
  173. result = kernel_recvmsg(sock, &msg, &iov, 1, size,
  174. msg.msg_flags);
  175. if (signal_pending(current)) {
  176. siginfo_t info;
  177. printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
  178. task_pid_nr(current), current->comm,
  179. dequeue_signal_lock(current, &current->blocked, &info));
  180. result = -EINTR;
  181. sock_shutdown(lo, !send);
  182. break;
  183. }
  184. if (result <= 0) {
  185. if (result == 0)
  186. result = -EPIPE; /* short read */
  187. break;
  188. }
  189. size -= result;
  190. buf += result;
  191. } while (size > 0);
  192. sigprocmask(SIG_SETMASK, &oldset, NULL);
  193. return result;
  194. }
  195. static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec,
  196. int flags)
  197. {
  198. int result;
  199. void *kaddr = kmap(bvec->bv_page);
  200. result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags);
  201. kunmap(bvec->bv_page);
  202. return result;
  203. }
  204. /* always call with the tx_lock held */
  205. static int nbd_send_req(struct nbd_device *lo, struct request *req)
  206. {
  207. int result, flags;
  208. struct nbd_request request;
  209. unsigned long size = blk_rq_bytes(req);
  210. request.magic = htonl(NBD_REQUEST_MAGIC);
  211. request.type = htonl(nbd_cmd(req));
  212. request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
  213. request.len = htonl(size);
  214. memcpy(request.handle, &req, sizeof(req));
  215. dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
  216. lo->disk->disk_name, req,
  217. nbdcmd_to_ascii(nbd_cmd(req)),
  218. (unsigned long long)blk_rq_pos(req) << 9,
  219. blk_rq_bytes(req));
  220. result = sock_xmit(lo, 1, &request, sizeof(request),
  221. (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
  222. if (result <= 0) {
  223. dev_err(disk_to_dev(lo->disk),
  224. "Send control failed (result %d)\n", result);
  225. goto error_out;
  226. }
  227. if (nbd_cmd(req) == NBD_CMD_WRITE) {
  228. struct req_iterator iter;
  229. struct bio_vec *bvec;
  230. /*
  231. * we are really probing at internals to determine
  232. * whether to set MSG_MORE or not...
  233. */
  234. rq_for_each_segment(bvec, req, iter) {
  235. flags = 0;
  236. if (!rq_iter_last(req, iter))
  237. flags = MSG_MORE;
  238. dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
  239. lo->disk->disk_name, req, bvec->bv_len);
  240. result = sock_send_bvec(lo, bvec, flags);
  241. if (result <= 0) {
  242. dev_err(disk_to_dev(lo->disk),
  243. "Send data failed (result %d)\n",
  244. result);
  245. goto error_out;
  246. }
  247. }
  248. }
  249. return 0;
  250. error_out:
  251. return -EIO;
  252. }
  253. static struct request *nbd_find_request(struct nbd_device *lo,
  254. struct request *xreq)
  255. {
  256. struct request *req, *tmp;
  257. int err;
  258. err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
  259. if (unlikely(err))
  260. goto out;
  261. spin_lock(&lo->queue_lock);
  262. list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) {
  263. if (req != xreq)
  264. continue;
  265. list_del_init(&req->queuelist);
  266. spin_unlock(&lo->queue_lock);
  267. return req;
  268. }
  269. spin_unlock(&lo->queue_lock);
  270. err = -ENOENT;
  271. out:
  272. return ERR_PTR(err);
  273. }
  274. static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec)
  275. {
  276. int result;
  277. void *kaddr = kmap(bvec->bv_page);
  278. result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len,
  279. MSG_WAITALL);
  280. kunmap(bvec->bv_page);
  281. return result;
  282. }
  283. /* NULL returned = something went wrong, inform userspace */
  284. static struct request *nbd_read_stat(struct nbd_device *lo)
  285. {
  286. int result;
  287. struct nbd_reply reply;
  288. struct request *req;
  289. reply.magic = 0;
  290. result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL);
  291. if (result <= 0) {
  292. dev_err(disk_to_dev(lo->disk),
  293. "Receive control failed (result %d)\n", result);
  294. goto harderror;
  295. }
  296. if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
  297. dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n",
  298. (unsigned long)ntohl(reply.magic));
  299. result = -EPROTO;
  300. goto harderror;
  301. }
  302. req = nbd_find_request(lo, *(struct request **)reply.handle);
  303. if (IS_ERR(req)) {
  304. result = PTR_ERR(req);
  305. if (result != -ENOENT)
  306. goto harderror;
  307. dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n",
  308. reply.handle);
  309. result = -EBADR;
  310. goto harderror;
  311. }
  312. if (ntohl(reply.error)) {
  313. dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n",
  314. ntohl(reply.error));
  315. req->errors++;
  316. return req;
  317. }
  318. dprintk(DBG_RX, "%s: request %p: got reply\n",
  319. lo->disk->disk_name, req);
  320. if (nbd_cmd(req) == NBD_CMD_READ) {
  321. struct req_iterator iter;
  322. struct bio_vec *bvec;
  323. rq_for_each_segment(bvec, req, iter) {
  324. result = sock_recv_bvec(lo, bvec);
  325. if (result <= 0) {
  326. dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n",
  327. result);
  328. req->errors++;
  329. return req;
  330. }
  331. dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
  332. lo->disk->disk_name, req, bvec->bv_len);
  333. }
  334. }
  335. return req;
  336. harderror:
  337. lo->harderror = result;
  338. return NULL;
  339. }
  340. static ssize_t pid_show(struct device *dev,
  341. struct device_attribute *attr, char *buf)
  342. {
  343. struct gendisk *disk = dev_to_disk(dev);
  344. return sprintf(buf, "%ld\n",
  345. (long) ((struct nbd_device *)disk->private_data)->pid);
  346. }
  347. static struct device_attribute pid_attr = {
  348. .attr = { .name = "pid", .mode = S_IRUGO},
  349. .show = pid_show,
  350. };
  351. static int nbd_do_it(struct nbd_device *lo)
  352. {
  353. struct request *req;
  354. int ret;
  355. BUG_ON(lo->magic != LO_MAGIC);
  356. lo->pid = task_pid_nr(current);
  357. ret = device_create_file(disk_to_dev(lo->disk), &pid_attr);
  358. if (ret) {
  359. dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n");
  360. lo->pid = 0;
  361. return ret;
  362. }
  363. while ((req = nbd_read_stat(lo)) != NULL)
  364. nbd_end_request(req);
  365. device_remove_file(disk_to_dev(lo->disk), &pid_attr);
  366. lo->pid = 0;
  367. return 0;
  368. }
  369. static void nbd_clear_que(struct nbd_device *lo)
  370. {
  371. struct request *req;
  372. BUG_ON(lo->magic != LO_MAGIC);
  373. /*
  374. * Because we have set lo->sock to NULL under the tx_lock, all
  375. * modifications to the list must have completed by now. For
  376. * the same reason, the active_req must be NULL.
  377. *
  378. * As a consequence, we don't need to take the spin lock while
  379. * purging the list here.
  380. */
  381. BUG_ON(lo->sock);
  382. BUG_ON(lo->active_req);
  383. while (!list_empty(&lo->queue_head)) {
  384. req = list_entry(lo->queue_head.next, struct request,
  385. queuelist);
  386. list_del_init(&req->queuelist);
  387. req->errors++;
  388. nbd_end_request(req);
  389. }
  390. }
  391. static void nbd_handle_req(struct nbd_device *lo, struct request *req)
  392. {
  393. if (req->cmd_type != REQ_TYPE_FS)
  394. goto error_out;
  395. nbd_cmd(req) = NBD_CMD_READ;
  396. if (rq_data_dir(req) == WRITE) {
  397. nbd_cmd(req) = NBD_CMD_WRITE;
  398. if (lo->flags & NBD_READ_ONLY) {
  399. dev_err(disk_to_dev(lo->disk),
  400. "Write on read-only\n");
  401. goto error_out;
  402. }
  403. }
  404. req->errors = 0;
  405. mutex_lock(&lo->tx_lock);
  406. if (unlikely(!lo->sock)) {
  407. mutex_unlock(&lo->tx_lock);
  408. dev_err(disk_to_dev(lo->disk),
  409. "Attempted send on closed socket\n");
  410. goto error_out;
  411. }
  412. lo->active_req = req;
  413. if (nbd_send_req(lo, req) != 0) {
  414. dev_err(disk_to_dev(lo->disk), "Request send failed\n");
  415. req->errors++;
  416. nbd_end_request(req);
  417. } else {
  418. spin_lock(&lo->queue_lock);
  419. list_add(&req->queuelist, &lo->queue_head);
  420. spin_unlock(&lo->queue_lock);
  421. }
  422. lo->active_req = NULL;
  423. mutex_unlock(&lo->tx_lock);
  424. wake_up_all(&lo->active_wq);
  425. return;
  426. error_out:
  427. req->errors++;
  428. nbd_end_request(req);
  429. }
  430. static int nbd_thread(void *data)
  431. {
  432. struct nbd_device *lo = data;
  433. struct request *req;
  434. set_user_nice(current, -20);
  435. while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
  436. /* wait for something to do */
  437. wait_event_interruptible(lo->waiting_wq,
  438. kthread_should_stop() ||
  439. !list_empty(&lo->waiting_queue));
  440. /* extract request */
  441. if (list_empty(&lo->waiting_queue))
  442. continue;
  443. spin_lock_irq(&lo->queue_lock);
  444. req = list_entry(lo->waiting_queue.next, struct request,
  445. queuelist);
  446. list_del_init(&req->queuelist);
  447. spin_unlock_irq(&lo->queue_lock);
  448. /* handle request */
  449. nbd_handle_req(lo, req);
  450. }
  451. return 0;
  452. }
  453. /*
  454. * We always wait for result of write, for now. It would be nice to make it optional
  455. * in future
  456. * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
  457. * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
  458. */
  459. static void do_nbd_request(struct request_queue *q)
  460. {
  461. struct request *req;
  462. while ((req = blk_fetch_request(q)) != NULL) {
  463. struct nbd_device *lo;
  464. spin_unlock_irq(q->queue_lock);
  465. dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
  466. req->rq_disk->disk_name, req, req->cmd_type);
  467. lo = req->rq_disk->private_data;
  468. BUG_ON(lo->magic != LO_MAGIC);
  469. if (unlikely(!lo->sock)) {
  470. dev_err(disk_to_dev(lo->disk),
  471. "Attempted send on closed socket\n");
  472. req->errors++;
  473. nbd_end_request(req);
  474. spin_lock_irq(q->queue_lock);
  475. continue;
  476. }
  477. spin_lock_irq(&lo->queue_lock);
  478. list_add_tail(&req->queuelist, &lo->waiting_queue);
  479. spin_unlock_irq(&lo->queue_lock);
  480. wake_up(&lo->waiting_wq);
  481. spin_lock_irq(q->queue_lock);
  482. }
  483. }
  484. /* Must be called with tx_lock held */
  485. static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
  486. unsigned int cmd, unsigned long arg)
  487. {
  488. switch (cmd) {
  489. case NBD_DISCONNECT: {
  490. struct request sreq;
  491. dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
  492. blk_rq_init(NULL, &sreq);
  493. sreq.cmd_type = REQ_TYPE_SPECIAL;
  494. nbd_cmd(&sreq) = NBD_CMD_DISC;
  495. if (!lo->sock)
  496. return -EINVAL;
  497. nbd_send_req(lo, &sreq);
  498. return 0;
  499. }
  500. case NBD_CLEAR_SOCK: {
  501. struct file *file;
  502. lo->sock = NULL;
  503. file = lo->file;
  504. lo->file = NULL;
  505. nbd_clear_que(lo);
  506. BUG_ON(!list_empty(&lo->queue_head));
  507. if (file)
  508. fput(file);
  509. return 0;
  510. }
  511. case NBD_SET_SOCK: {
  512. struct file *file;
  513. if (lo->file)
  514. return -EBUSY;
  515. file = fget(arg);
  516. if (file) {
  517. struct inode *inode = file->f_path.dentry->d_inode;
  518. if (S_ISSOCK(inode->i_mode)) {
  519. lo->file = file;
  520. lo->sock = SOCKET_I(inode);
  521. if (max_part > 0)
  522. bdev->bd_invalidated = 1;
  523. return 0;
  524. } else {
  525. fput(file);
  526. }
  527. }
  528. return -EINVAL;
  529. }
  530. case NBD_SET_BLKSIZE:
  531. lo->blksize = arg;
  532. lo->bytesize &= ~(lo->blksize-1);
  533. bdev->bd_inode->i_size = lo->bytesize;
  534. set_blocksize(bdev, lo->blksize);
  535. set_capacity(lo->disk, lo->bytesize >> 9);
  536. return 0;
  537. case NBD_SET_SIZE:
  538. lo->bytesize = arg & ~(lo->blksize-1);
  539. bdev->bd_inode->i_size = lo->bytesize;
  540. set_blocksize(bdev, lo->blksize);
  541. set_capacity(lo->disk, lo->bytesize >> 9);
  542. return 0;
  543. case NBD_SET_TIMEOUT:
  544. lo->xmit_timeout = arg * HZ;
  545. return 0;
  546. case NBD_SET_SIZE_BLOCKS:
  547. lo->bytesize = ((u64) arg) * lo->blksize;
  548. bdev->bd_inode->i_size = lo->bytesize;
  549. set_blocksize(bdev, lo->blksize);
  550. set_capacity(lo->disk, lo->bytesize >> 9);
  551. return 0;
  552. case NBD_DO_IT: {
  553. struct task_struct *thread;
  554. struct file *file;
  555. int error;
  556. if (lo->pid)
  557. return -EBUSY;
  558. if (!lo->file)
  559. return -EINVAL;
  560. mutex_unlock(&lo->tx_lock);
  561. thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
  562. if (IS_ERR(thread)) {
  563. mutex_lock(&lo->tx_lock);
  564. return PTR_ERR(thread);
  565. }
  566. wake_up_process(thread);
  567. error = nbd_do_it(lo);
  568. kthread_stop(thread);
  569. mutex_lock(&lo->tx_lock);
  570. if (error)
  571. return error;
  572. sock_shutdown(lo, 0);
  573. file = lo->file;
  574. lo->file = NULL;
  575. nbd_clear_que(lo);
  576. dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
  577. if (file)
  578. fput(file);
  579. lo->bytesize = 0;
  580. bdev->bd_inode->i_size = 0;
  581. set_capacity(lo->disk, 0);
  582. if (max_part > 0)
  583. ioctl_by_bdev(bdev, BLKRRPART, 0);
  584. return lo->harderror;
  585. }
  586. case NBD_CLEAR_QUE:
  587. /*
  588. * This is for compatibility only. The queue is always cleared
  589. * by NBD_DO_IT or NBD_CLEAR_SOCK.
  590. */
  591. BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
  592. return 0;
  593. case NBD_PRINT_DEBUG:
  594. dev_info(disk_to_dev(lo->disk),
  595. "next = %p, prev = %p, head = %p\n",
  596. lo->queue_head.next, lo->queue_head.prev,
  597. &lo->queue_head);
  598. return 0;
  599. }
  600. return -ENOTTY;
  601. }
  602. static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
  603. unsigned int cmd, unsigned long arg)
  604. {
  605. struct nbd_device *lo = bdev->bd_disk->private_data;
  606. int error;
  607. if (!capable(CAP_SYS_ADMIN))
  608. return -EPERM;
  609. BUG_ON(lo->magic != LO_MAGIC);
  610. /* Anyone capable of this syscall can do *real bad* things */
  611. dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
  612. lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
  613. mutex_lock(&lo->tx_lock);
  614. error = __nbd_ioctl(bdev, lo, cmd, arg);
  615. mutex_unlock(&lo->tx_lock);
  616. return error;
  617. }
  618. static const struct block_device_operations nbd_fops =
  619. {
  620. .owner = THIS_MODULE,
  621. .ioctl = nbd_ioctl,
  622. };
  623. /*
  624. * And here should be modules and kernel interface
  625. * (Just smiley confuses emacs :-)
  626. */
  627. static int __init nbd_init(void)
  628. {
  629. int err = -ENOMEM;
  630. int i;
  631. int part_shift;
  632. BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
  633. if (max_part < 0) {
  634. printk(KERN_ERR "nbd: max_part must be >= 0\n");
  635. return -EINVAL;
  636. }
  637. nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
  638. if (!nbd_dev)
  639. return -ENOMEM;
  640. part_shift = 0;
  641. if (max_part > 0) {
  642. part_shift = fls(max_part);
  643. /*
  644. * Adjust max_part according to part_shift as it is exported
  645. * to user space so that user can know the max number of
  646. * partition kernel should be able to manage.
  647. *
  648. * Note that -1 is required because partition 0 is reserved
  649. * for the whole disk.
  650. */
  651. max_part = (1UL << part_shift) - 1;
  652. }
  653. if ((1UL << part_shift) > DISK_MAX_PARTS)
  654. return -EINVAL;
  655. if (nbds_max > 1UL << (MINORBITS - part_shift))
  656. return -EINVAL;
  657. for (i = 0; i < nbds_max; i++) {
  658. struct gendisk *disk = alloc_disk(1 << part_shift);
  659. if (!disk)
  660. goto out;
  661. nbd_dev[i].disk = disk;
  662. /*
  663. * The new linux 2.5 block layer implementation requires
  664. * every gendisk to have its very own request_queue struct.
  665. * These structs are big so we dynamically allocate them.
  666. */
  667. disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
  668. if (!disk->queue) {
  669. put_disk(disk);
  670. goto out;
  671. }
  672. /*
  673. * Tell the block layer that we are not a rotational device
  674. */
  675. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
  676. }
  677. if (register_blkdev(NBD_MAJOR, "nbd")) {
  678. err = -EIO;
  679. goto out;
  680. }
  681. printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
  682. dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
  683. for (i = 0; i < nbds_max; i++) {
  684. struct gendisk *disk = nbd_dev[i].disk;
  685. nbd_dev[i].file = NULL;
  686. nbd_dev[i].magic = LO_MAGIC;
  687. nbd_dev[i].flags = 0;
  688. INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
  689. spin_lock_init(&nbd_dev[i].queue_lock);
  690. INIT_LIST_HEAD(&nbd_dev[i].queue_head);
  691. mutex_init(&nbd_dev[i].tx_lock);
  692. init_waitqueue_head(&nbd_dev[i].active_wq);
  693. init_waitqueue_head(&nbd_dev[i].waiting_wq);
  694. nbd_dev[i].blksize = 1024;
  695. nbd_dev[i].bytesize = 0;
  696. disk->major = NBD_MAJOR;
  697. disk->first_minor = i << part_shift;
  698. disk->fops = &nbd_fops;
  699. disk->private_data = &nbd_dev[i];
  700. sprintf(disk->disk_name, "nbd%d", i);
  701. set_capacity(disk, 0);
  702. add_disk(disk);
  703. }
  704. return 0;
  705. out:
  706. while (i--) {
  707. blk_cleanup_queue(nbd_dev[i].disk->queue);
  708. put_disk(nbd_dev[i].disk);
  709. }
  710. kfree(nbd_dev);
  711. return err;
  712. }
  713. static void __exit nbd_cleanup(void)
  714. {
  715. int i;
  716. for (i = 0; i < nbds_max; i++) {
  717. struct gendisk *disk = nbd_dev[i].disk;
  718. nbd_dev[i].magic = 0;
  719. if (disk) {
  720. del_gendisk(disk);
  721. blk_cleanup_queue(disk->queue);
  722. put_disk(disk);
  723. }
  724. }
  725. unregister_blkdev(NBD_MAJOR, "nbd");
  726. kfree(nbd_dev);
  727. printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
  728. }
  729. module_init(nbd_init);
  730. module_exit(nbd_cleanup);
  731. MODULE_DESCRIPTION("Network Block Device");
  732. MODULE_LICENSE("GPL");
  733. module_param(nbds_max, int, 0444);
  734. MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
  735. module_param(max_part, int, 0444);
  736. MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
  737. #ifndef NDEBUG
  738. module_param(debugflags, int, 0644);
  739. MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
  740. #endif