storvsc_drv.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/wait.h>
  24. #include <linux/sched.h>
  25. #include <linux/completion.h>
  26. #include <linux/string.h>
  27. #include <linux/mm.h>
  28. #include <linux/delay.h>
  29. #include <linux/init.h>
  30. #include <linux/slab.h>
  31. #include <linux/module.h>
  32. #include <linux/device.h>
  33. #include <linux/hyperv.h>
  34. #include <linux/mempool.h>
  35. #include <scsi/scsi.h>
  36. #include <scsi/scsi_cmnd.h>
  37. #include <scsi/scsi_host.h>
  38. #include <scsi/scsi_device.h>
  39. #include <scsi/scsi_tcq.h>
  40. #include <scsi/scsi_eh.h>
  41. #include <scsi/scsi_devinfo.h>
  42. #include <scsi/scsi_dbg.h>
  43. /*
  44. * All wire protocol details (storage protocol between the guest and the host)
  45. * are consolidated here.
  46. *
  47. * Begin protocol definitions.
  48. */
  49. /*
  50. * Version history:
  51. * V1 Beta: 0.1
  52. * V1 RC < 2008/1/31: 1.0
  53. * V1 RC > 2008/1/31: 2.0
  54. * Win7: 4.2
  55. */
  56. #define VMSTOR_CURRENT_MAJOR 4
  57. #define VMSTOR_CURRENT_MINOR 2
  58. /* Packet structure describing virtual storage requests. */
  59. enum vstor_packet_operation {
  60. VSTOR_OPERATION_COMPLETE_IO = 1,
  61. VSTOR_OPERATION_REMOVE_DEVICE = 2,
  62. VSTOR_OPERATION_EXECUTE_SRB = 3,
  63. VSTOR_OPERATION_RESET_LUN = 4,
  64. VSTOR_OPERATION_RESET_ADAPTER = 5,
  65. VSTOR_OPERATION_RESET_BUS = 6,
  66. VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
  67. VSTOR_OPERATION_END_INITIALIZATION = 8,
  68. VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
  69. VSTOR_OPERATION_QUERY_PROPERTIES = 10,
  70. VSTOR_OPERATION_ENUMERATE_BUS = 11,
  71. VSTOR_OPERATION_MAXIMUM = 11
  72. };
  73. /*
  74. * Platform neutral description of a scsi request -
  75. * this remains the same across the write regardless of 32/64 bit
  76. * note: it's patterned off the SCSI_PASS_THROUGH structure
  77. */
  78. #define STORVSC_MAX_CMD_LEN 0x10
  79. #define STORVSC_SENSE_BUFFER_SIZE 0x12
  80. #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
  81. struct vmscsi_request {
  82. u16 length;
  83. u8 srb_status;
  84. u8 scsi_status;
  85. u8 port_number;
  86. u8 path_id;
  87. u8 target_id;
  88. u8 lun;
  89. u8 cdb_length;
  90. u8 sense_info_length;
  91. u8 data_in;
  92. u8 reserved;
  93. u32 data_transfer_length;
  94. union {
  95. u8 cdb[STORVSC_MAX_CMD_LEN];
  96. u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
  97. u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
  98. };
  99. } __attribute((packed));
  100. /*
  101. * This structure is sent during the intialization phase to get the different
  102. * properties of the channel.
  103. */
  104. struct vmstorage_channel_properties {
  105. u16 protocol_version;
  106. u8 path_id;
  107. u8 target_id;
  108. /* Note: port number is only really known on the client side */
  109. u32 port_number;
  110. u32 flags;
  111. u32 max_transfer_bytes;
  112. /*
  113. * This id is unique for each channel and will correspond with
  114. * vendor specific data in the inquiry data.
  115. */
  116. u64 unique_id;
  117. } __packed;
  118. /* This structure is sent during the storage protocol negotiations. */
  119. struct vmstorage_protocol_version {
  120. /* Major (MSW) and minor (LSW) version numbers. */
  121. u16 major_minor;
  122. /*
  123. * Revision number is auto-incremented whenever this file is changed
  124. * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
  125. * definitely indicate incompatibility--but it does indicate mismatched
  126. * builds.
  127. * This is only used on the windows side. Just set it to 0.
  128. */
  129. u16 revision;
  130. } __packed;
  131. /* Channel Property Flags */
  132. #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
  133. #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
  134. struct vstor_packet {
  135. /* Requested operation type */
  136. enum vstor_packet_operation operation;
  137. /* Flags - see below for values */
  138. u32 flags;
  139. /* Status of the request returned from the server side. */
  140. u32 status;
  141. /* Data payload area */
  142. union {
  143. /*
  144. * Structure used to forward SCSI commands from the
  145. * client to the server.
  146. */
  147. struct vmscsi_request vm_srb;
  148. /* Structure used to query channel properties. */
  149. struct vmstorage_channel_properties storage_channel_properties;
  150. /* Used during version negotiations. */
  151. struct vmstorage_protocol_version version;
  152. };
  153. } __packed;
  154. /*
  155. * Packet Flags:
  156. *
  157. * This flag indicates that the server should send back a completion for this
  158. * packet.
  159. */
  160. #define REQUEST_COMPLETION_FLAG 0x1
  161. /* Matches Windows-end */
  162. enum storvsc_request_type {
  163. WRITE_TYPE = 0,
  164. READ_TYPE,
  165. UNKNOWN_TYPE,
  166. };
  167. /*
  168. * SRB status codes and masks; a subset of the codes used here.
  169. */
  170. #define SRB_STATUS_AUTOSENSE_VALID 0x80
  171. #define SRB_STATUS_INVALID_LUN 0x20
  172. #define SRB_STATUS_SUCCESS 0x01
  173. #define SRB_STATUS_ABORTED 0x02
  174. #define SRB_STATUS_ERROR 0x04
  175. /*
  176. * This is the end of Protocol specific defines.
  177. */
  178. /*
  179. * We setup a mempool to allocate request structures for this driver
  180. * on a per-lun basis. The following define specifies the number of
  181. * elements in the pool.
  182. */
  183. #define STORVSC_MIN_BUF_NR 64
  184. static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
  185. module_param(storvsc_ringbuffer_size, int, S_IRUGO);
  186. MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
  187. #define STORVSC_MAX_IO_REQUESTS 128
  188. /*
  189. * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
  190. * reality, the path/target is not used (ie always set to 0) so our
  191. * scsi host adapter essentially has 1 bus with 1 target that contains
  192. * up to 256 luns.
  193. */
  194. #define STORVSC_MAX_LUNS_PER_TARGET 64
  195. #define STORVSC_MAX_TARGETS 1
  196. #define STORVSC_MAX_CHANNELS 1
  197. struct storvsc_cmd_request {
  198. struct list_head entry;
  199. struct scsi_cmnd *cmd;
  200. unsigned int bounce_sgl_count;
  201. struct scatterlist *bounce_sgl;
  202. struct hv_device *device;
  203. /* Synchronize the request/response if needed */
  204. struct completion wait_event;
  205. unsigned char *sense_buffer;
  206. struct hv_multipage_buffer data_buffer;
  207. struct vstor_packet vstor_packet;
  208. };
  209. /* A storvsc device is a device object that contains a vmbus channel */
  210. struct storvsc_device {
  211. struct hv_device *device;
  212. bool destroy;
  213. bool drain_notify;
  214. atomic_t num_outstanding_req;
  215. struct Scsi_Host *host;
  216. wait_queue_head_t waiting_to_drain;
  217. /*
  218. * Each unique Port/Path/Target represents 1 channel ie scsi
  219. * controller. In reality, the pathid, targetid is always 0
  220. * and the port is set by us
  221. */
  222. unsigned int port_number;
  223. unsigned char path_id;
  224. unsigned char target_id;
  225. /* Used for vsc/vsp channel reset process */
  226. struct storvsc_cmd_request init_request;
  227. struct storvsc_cmd_request reset_request;
  228. };
  229. struct stor_mem_pools {
  230. struct kmem_cache *request_pool;
  231. mempool_t *request_mempool;
  232. };
  233. struct hv_host_device {
  234. struct hv_device *dev;
  235. unsigned int port;
  236. unsigned char path;
  237. unsigned char target;
  238. };
  239. struct storvsc_scan_work {
  240. struct work_struct work;
  241. struct Scsi_Host *host;
  242. uint lun;
  243. };
  244. static void storvsc_device_scan(struct work_struct *work)
  245. {
  246. struct storvsc_scan_work *wrk;
  247. uint lun;
  248. struct scsi_device *sdev;
  249. wrk = container_of(work, struct storvsc_scan_work, work);
  250. lun = wrk->lun;
  251. sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
  252. if (!sdev)
  253. goto done;
  254. scsi_rescan_device(&sdev->sdev_gendev);
  255. scsi_device_put(sdev);
  256. done:
  257. kfree(wrk);
  258. }
  259. static void storvsc_bus_scan(struct work_struct *work)
  260. {
  261. struct storvsc_scan_work *wrk;
  262. int id, order_id;
  263. wrk = container_of(work, struct storvsc_scan_work, work);
  264. for (id = 0; id < wrk->host->max_id; ++id) {
  265. if (wrk->host->reverse_ordering)
  266. order_id = wrk->host->max_id - id - 1;
  267. else
  268. order_id = id;
  269. scsi_scan_target(&wrk->host->shost_gendev, 0,
  270. order_id, SCAN_WILD_CARD, 1);
  271. }
  272. kfree(wrk);
  273. }
  274. static void storvsc_remove_lun(struct work_struct *work)
  275. {
  276. struct storvsc_scan_work *wrk;
  277. struct scsi_device *sdev;
  278. wrk = container_of(work, struct storvsc_scan_work, work);
  279. if (!scsi_host_get(wrk->host))
  280. goto done;
  281. sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
  282. if (sdev) {
  283. scsi_remove_device(sdev);
  284. scsi_device_put(sdev);
  285. }
  286. scsi_host_put(wrk->host);
  287. done:
  288. kfree(wrk);
  289. }
  290. /*
  291. * Major/minor macros. Minor version is in LSB, meaning that earlier flat
  292. * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
  293. */
  294. static inline u16 storvsc_get_version(u8 major, u8 minor)
  295. {
  296. u16 version;
  297. version = ((major << 8) | minor);
  298. return version;
  299. }
  300. /*
  301. * We can get incoming messages from the host that are not in response to
  302. * messages that we have sent out. An example of this would be messages
  303. * received by the guest to notify dynamic addition/removal of LUNs. To
  304. * deal with potential race conditions where the driver may be in the
  305. * midst of being unloaded when we might receive an unsolicited message
  306. * from the host, we have implemented a mechanism to gurantee sequential
  307. * consistency:
  308. *
  309. * 1) Once the device is marked as being destroyed, we will fail all
  310. * outgoing messages.
  311. * 2) We permit incoming messages when the device is being destroyed,
  312. * only to properly account for messages already sent out.
  313. */
  314. static inline struct storvsc_device *get_out_stor_device(
  315. struct hv_device *device)
  316. {
  317. struct storvsc_device *stor_device;
  318. stor_device = hv_get_drvdata(device);
  319. if (stor_device && stor_device->destroy)
  320. stor_device = NULL;
  321. return stor_device;
  322. }
  323. static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
  324. {
  325. dev->drain_notify = true;
  326. wait_event(dev->waiting_to_drain,
  327. atomic_read(&dev->num_outstanding_req) == 0);
  328. dev->drain_notify = false;
  329. }
  330. static inline struct storvsc_device *get_in_stor_device(
  331. struct hv_device *device)
  332. {
  333. struct storvsc_device *stor_device;
  334. stor_device = hv_get_drvdata(device);
  335. if (!stor_device)
  336. goto get_in_err;
  337. /*
  338. * If the device is being destroyed; allow incoming
  339. * traffic only to cleanup outstanding requests.
  340. */
  341. if (stor_device->destroy &&
  342. (atomic_read(&stor_device->num_outstanding_req) == 0))
  343. stor_device = NULL;
  344. get_in_err:
  345. return stor_device;
  346. }
  347. static void destroy_bounce_buffer(struct scatterlist *sgl,
  348. unsigned int sg_count)
  349. {
  350. int i;
  351. struct page *page_buf;
  352. for (i = 0; i < sg_count; i++) {
  353. page_buf = sg_page((&sgl[i]));
  354. if (page_buf != NULL)
  355. __free_page(page_buf);
  356. }
  357. kfree(sgl);
  358. }
  359. static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
  360. {
  361. int i;
  362. /* No need to check */
  363. if (sg_count < 2)
  364. return -1;
  365. /* We have at least 2 sg entries */
  366. for (i = 0; i < sg_count; i++) {
  367. if (i == 0) {
  368. /* make sure 1st one does not have hole */
  369. if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
  370. return i;
  371. } else if (i == sg_count - 1) {
  372. /* make sure last one does not have hole */
  373. if (sgl[i].offset != 0)
  374. return i;
  375. } else {
  376. /* make sure no hole in the middle */
  377. if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
  378. return i;
  379. }
  380. }
  381. return -1;
  382. }
  383. static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
  384. unsigned int sg_count,
  385. unsigned int len,
  386. int write)
  387. {
  388. int i;
  389. int num_pages;
  390. struct scatterlist *bounce_sgl;
  391. struct page *page_buf;
  392. unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
  393. num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
  394. bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
  395. if (!bounce_sgl)
  396. return NULL;
  397. sg_init_table(bounce_sgl, num_pages);
  398. for (i = 0; i < num_pages; i++) {
  399. page_buf = alloc_page(GFP_ATOMIC);
  400. if (!page_buf)
  401. goto cleanup;
  402. sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
  403. }
  404. return bounce_sgl;
  405. cleanup:
  406. destroy_bounce_buffer(bounce_sgl, num_pages);
  407. return NULL;
  408. }
  409. /* Disgusting wrapper functions */
  410. static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
  411. {
  412. void *addr = kmap_atomic(sg_page(sgl + idx));
  413. return (unsigned long)addr;
  414. }
  415. static inline void sg_kunmap_atomic(unsigned long addr)
  416. {
  417. kunmap_atomic((void *)addr);
  418. }
  419. /* Assume the original sgl has enough room */
  420. static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
  421. struct scatterlist *bounce_sgl,
  422. unsigned int orig_sgl_count,
  423. unsigned int bounce_sgl_count)
  424. {
  425. int i;
  426. int j = 0;
  427. unsigned long src, dest;
  428. unsigned int srclen, destlen, copylen;
  429. unsigned int total_copied = 0;
  430. unsigned long bounce_addr = 0;
  431. unsigned long dest_addr = 0;
  432. unsigned long flags;
  433. local_irq_save(flags);
  434. for (i = 0; i < orig_sgl_count; i++) {
  435. dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
  436. dest = dest_addr;
  437. destlen = orig_sgl[i].length;
  438. if (bounce_addr == 0)
  439. bounce_addr = sg_kmap_atomic(bounce_sgl,j);
  440. while (destlen) {
  441. src = bounce_addr + bounce_sgl[j].offset;
  442. srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
  443. copylen = min(srclen, destlen);
  444. memcpy((void *)dest, (void *)src, copylen);
  445. total_copied += copylen;
  446. bounce_sgl[j].offset += copylen;
  447. destlen -= copylen;
  448. dest += copylen;
  449. if (bounce_sgl[j].offset == bounce_sgl[j].length) {
  450. /* full */
  451. sg_kunmap_atomic(bounce_addr);
  452. j++;
  453. /*
  454. * It is possible that the number of elements
  455. * in the bounce buffer may not be equal to
  456. * the number of elements in the original
  457. * scatter list. Handle this correctly.
  458. */
  459. if (j == bounce_sgl_count) {
  460. /*
  461. * We are done; cleanup and return.
  462. */
  463. sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
  464. local_irq_restore(flags);
  465. return total_copied;
  466. }
  467. /* if we need to use another bounce buffer */
  468. if (destlen || i != orig_sgl_count - 1)
  469. bounce_addr = sg_kmap_atomic(bounce_sgl,j);
  470. } else if (destlen == 0 && i == orig_sgl_count - 1) {
  471. /* unmap the last bounce that is < PAGE_SIZE */
  472. sg_kunmap_atomic(bounce_addr);
  473. }
  474. }
  475. sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
  476. }
  477. local_irq_restore(flags);
  478. return total_copied;
  479. }
  480. /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
  481. static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
  482. struct scatterlist *bounce_sgl,
  483. unsigned int orig_sgl_count)
  484. {
  485. int i;
  486. int j = 0;
  487. unsigned long src, dest;
  488. unsigned int srclen, destlen, copylen;
  489. unsigned int total_copied = 0;
  490. unsigned long bounce_addr = 0;
  491. unsigned long src_addr = 0;
  492. unsigned long flags;
  493. local_irq_save(flags);
  494. for (i = 0; i < orig_sgl_count; i++) {
  495. src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
  496. src = src_addr;
  497. srclen = orig_sgl[i].length;
  498. if (bounce_addr == 0)
  499. bounce_addr = sg_kmap_atomic(bounce_sgl,j);
  500. while (srclen) {
  501. /* assume bounce offset always == 0 */
  502. dest = bounce_addr + bounce_sgl[j].length;
  503. destlen = PAGE_SIZE - bounce_sgl[j].length;
  504. copylen = min(srclen, destlen);
  505. memcpy((void *)dest, (void *)src, copylen);
  506. total_copied += copylen;
  507. bounce_sgl[j].length += copylen;
  508. srclen -= copylen;
  509. src += copylen;
  510. if (bounce_sgl[j].length == PAGE_SIZE) {
  511. /* full..move to next entry */
  512. sg_kunmap_atomic(bounce_addr);
  513. j++;
  514. /* if we need to use another bounce buffer */
  515. if (srclen || i != orig_sgl_count - 1)
  516. bounce_addr = sg_kmap_atomic(bounce_sgl,j);
  517. } else if (srclen == 0 && i == orig_sgl_count - 1) {
  518. /* unmap the last bounce that is < PAGE_SIZE */
  519. sg_kunmap_atomic(bounce_addr);
  520. }
  521. }
  522. sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
  523. }
  524. local_irq_restore(flags);
  525. return total_copied;
  526. }
  527. static int storvsc_channel_init(struct hv_device *device)
  528. {
  529. struct storvsc_device *stor_device;
  530. struct storvsc_cmd_request *request;
  531. struct vstor_packet *vstor_packet;
  532. int ret, t;
  533. stor_device = get_out_stor_device(device);
  534. if (!stor_device)
  535. return -ENODEV;
  536. request = &stor_device->init_request;
  537. vstor_packet = &request->vstor_packet;
  538. /*
  539. * Now, initiate the vsc/vsp initialization protocol on the open
  540. * channel
  541. */
  542. memset(request, 0, sizeof(struct storvsc_cmd_request));
  543. init_completion(&request->wait_event);
  544. vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
  545. vstor_packet->flags = REQUEST_COMPLETION_FLAG;
  546. ret = vmbus_sendpacket(device->channel, vstor_packet,
  547. sizeof(struct vstor_packet),
  548. (unsigned long)request,
  549. VM_PKT_DATA_INBAND,
  550. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  551. if (ret != 0)
  552. goto cleanup;
  553. t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
  554. if (t == 0) {
  555. ret = -ETIMEDOUT;
  556. goto cleanup;
  557. }
  558. if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
  559. vstor_packet->status != 0)
  560. goto cleanup;
  561. /* reuse the packet for version range supported */
  562. memset(vstor_packet, 0, sizeof(struct vstor_packet));
  563. vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
  564. vstor_packet->flags = REQUEST_COMPLETION_FLAG;
  565. vstor_packet->version.major_minor =
  566. storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
  567. /*
  568. * The revision number is only used in Windows; set it to 0.
  569. */
  570. vstor_packet->version.revision = 0;
  571. ret = vmbus_sendpacket(device->channel, vstor_packet,
  572. sizeof(struct vstor_packet),
  573. (unsigned long)request,
  574. VM_PKT_DATA_INBAND,
  575. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  576. if (ret != 0)
  577. goto cleanup;
  578. t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
  579. if (t == 0) {
  580. ret = -ETIMEDOUT;
  581. goto cleanup;
  582. }
  583. if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
  584. vstor_packet->status != 0)
  585. goto cleanup;
  586. memset(vstor_packet, 0, sizeof(struct vstor_packet));
  587. vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
  588. vstor_packet->flags = REQUEST_COMPLETION_FLAG;
  589. vstor_packet->storage_channel_properties.port_number =
  590. stor_device->port_number;
  591. ret = vmbus_sendpacket(device->channel, vstor_packet,
  592. sizeof(struct vstor_packet),
  593. (unsigned long)request,
  594. VM_PKT_DATA_INBAND,
  595. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  596. if (ret != 0)
  597. goto cleanup;
  598. t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
  599. if (t == 0) {
  600. ret = -ETIMEDOUT;
  601. goto cleanup;
  602. }
  603. if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
  604. vstor_packet->status != 0)
  605. goto cleanup;
  606. stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
  607. stor_device->target_id
  608. = vstor_packet->storage_channel_properties.target_id;
  609. memset(vstor_packet, 0, sizeof(struct vstor_packet));
  610. vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
  611. vstor_packet->flags = REQUEST_COMPLETION_FLAG;
  612. ret = vmbus_sendpacket(device->channel, vstor_packet,
  613. sizeof(struct vstor_packet),
  614. (unsigned long)request,
  615. VM_PKT_DATA_INBAND,
  616. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  617. if (ret != 0)
  618. goto cleanup;
  619. t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
  620. if (t == 0) {
  621. ret = -ETIMEDOUT;
  622. goto cleanup;
  623. }
  624. if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
  625. vstor_packet->status != 0)
  626. goto cleanup;
  627. cleanup:
  628. return ret;
  629. }
  630. static void storvsc_handle_error(struct vmscsi_request *vm_srb,
  631. struct scsi_cmnd *scmnd,
  632. struct Scsi_Host *host,
  633. u8 asc, u8 ascq)
  634. {
  635. struct storvsc_scan_work *wrk;
  636. void (*process_err_fn)(struct work_struct *work);
  637. bool do_work = false;
  638. switch (vm_srb->srb_status) {
  639. case SRB_STATUS_ERROR:
  640. /*
  641. * If there is an error; offline the device since all
  642. * error recovery strategies would have already been
  643. * deployed on the host side. However, if the command
  644. * were a pass-through command deal with it appropriately.
  645. */
  646. switch (scmnd->cmnd[0]) {
  647. case ATA_16:
  648. case ATA_12:
  649. set_host_byte(scmnd, DID_PASSTHROUGH);
  650. break;
  651. default:
  652. set_host_byte(scmnd, DID_TARGET_FAILURE);
  653. }
  654. break;
  655. case SRB_STATUS_INVALID_LUN:
  656. do_work = true;
  657. process_err_fn = storvsc_remove_lun;
  658. break;
  659. case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
  660. if ((asc == 0x2a) && (ascq == 0x9)) {
  661. do_work = true;
  662. process_err_fn = storvsc_device_scan;
  663. /*
  664. * Retry the I/O that trigerred this.
  665. */
  666. set_host_byte(scmnd, DID_REQUEUE);
  667. }
  668. break;
  669. }
  670. if (!do_work)
  671. return;
  672. /*
  673. * We need to schedule work to process this error; schedule it.
  674. */
  675. wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
  676. if (!wrk) {
  677. set_host_byte(scmnd, DID_TARGET_FAILURE);
  678. return;
  679. }
  680. wrk->host = host;
  681. wrk->lun = vm_srb->lun;
  682. INIT_WORK(&wrk->work, process_err_fn);
  683. schedule_work(&wrk->work);
  684. }
  685. static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
  686. {
  687. struct scsi_cmnd *scmnd = cmd_request->cmd;
  688. struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
  689. void (*scsi_done_fn)(struct scsi_cmnd *);
  690. struct scsi_sense_hdr sense_hdr;
  691. struct vmscsi_request *vm_srb;
  692. struct stor_mem_pools *memp = scmnd->device->hostdata;
  693. struct Scsi_Host *host;
  694. struct storvsc_device *stor_dev;
  695. struct hv_device *dev = host_dev->dev;
  696. stor_dev = get_in_stor_device(dev);
  697. host = stor_dev->host;
  698. vm_srb = &cmd_request->vstor_packet.vm_srb;
  699. if (cmd_request->bounce_sgl_count) {
  700. if (vm_srb->data_in == READ_TYPE)
  701. copy_from_bounce_buffer(scsi_sglist(scmnd),
  702. cmd_request->bounce_sgl,
  703. scsi_sg_count(scmnd),
  704. cmd_request->bounce_sgl_count);
  705. destroy_bounce_buffer(cmd_request->bounce_sgl,
  706. cmd_request->bounce_sgl_count);
  707. }
  708. scmnd->result = vm_srb->scsi_status;
  709. if (scmnd->result) {
  710. if (scsi_normalize_sense(scmnd->sense_buffer,
  711. SCSI_SENSE_BUFFERSIZE, &sense_hdr))
  712. scsi_print_sense_hdr("storvsc", &sense_hdr);
  713. }
  714. if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
  715. storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
  716. sense_hdr.ascq);
  717. scsi_set_resid(scmnd,
  718. cmd_request->data_buffer.len -
  719. vm_srb->data_transfer_length);
  720. scsi_done_fn = scmnd->scsi_done;
  721. scmnd->host_scribble = NULL;
  722. scmnd->scsi_done = NULL;
  723. scsi_done_fn(scmnd);
  724. mempool_free(cmd_request, memp->request_mempool);
  725. }
  726. static void storvsc_on_io_completion(struct hv_device *device,
  727. struct vstor_packet *vstor_packet,
  728. struct storvsc_cmd_request *request)
  729. {
  730. struct storvsc_device *stor_device;
  731. struct vstor_packet *stor_pkt;
  732. stor_device = hv_get_drvdata(device);
  733. stor_pkt = &request->vstor_packet;
  734. /*
  735. * The current SCSI handling on the host side does
  736. * not correctly handle:
  737. * INQUIRY command with page code parameter set to 0x80
  738. * MODE_SENSE command with cmd[2] == 0x1c
  739. *
  740. * Setup srb and scsi status so this won't be fatal.
  741. * We do this so we can distinguish truly fatal failues
  742. * (srb status == 0x4) and off-line the device in that case.
  743. */
  744. if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
  745. (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
  746. vstor_packet->vm_srb.scsi_status = 0;
  747. vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
  748. }
  749. /* Copy over the status...etc */
  750. stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
  751. stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
  752. stor_pkt->vm_srb.sense_info_length =
  753. vstor_packet->vm_srb.sense_info_length;
  754. if (vstor_packet->vm_srb.scsi_status != 0 ||
  755. vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
  756. dev_warn(&device->device,
  757. "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
  758. stor_pkt->vm_srb.cdb[0],
  759. vstor_packet->vm_srb.scsi_status,
  760. vstor_packet->vm_srb.srb_status);
  761. }
  762. if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
  763. /* CHECK_CONDITION */
  764. if (vstor_packet->vm_srb.srb_status &
  765. SRB_STATUS_AUTOSENSE_VALID) {
  766. /* autosense data available */
  767. dev_warn(&device->device,
  768. "stor pkt %p autosense data valid - len %d\n",
  769. request,
  770. vstor_packet->vm_srb.sense_info_length);
  771. memcpy(request->sense_buffer,
  772. vstor_packet->vm_srb.sense_data,
  773. vstor_packet->vm_srb.sense_info_length);
  774. }
  775. }
  776. stor_pkt->vm_srb.data_transfer_length =
  777. vstor_packet->vm_srb.data_transfer_length;
  778. storvsc_command_completion(request);
  779. if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
  780. stor_device->drain_notify)
  781. wake_up(&stor_device->waiting_to_drain);
  782. }
  783. static void storvsc_on_receive(struct hv_device *device,
  784. struct vstor_packet *vstor_packet,
  785. struct storvsc_cmd_request *request)
  786. {
  787. struct storvsc_scan_work *work;
  788. struct storvsc_device *stor_device;
  789. switch (vstor_packet->operation) {
  790. case VSTOR_OPERATION_COMPLETE_IO:
  791. storvsc_on_io_completion(device, vstor_packet, request);
  792. break;
  793. case VSTOR_OPERATION_REMOVE_DEVICE:
  794. case VSTOR_OPERATION_ENUMERATE_BUS:
  795. stor_device = get_in_stor_device(device);
  796. work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
  797. if (!work)
  798. return;
  799. INIT_WORK(&work->work, storvsc_bus_scan);
  800. work->host = stor_device->host;
  801. schedule_work(&work->work);
  802. break;
  803. default:
  804. break;
  805. }
  806. }
  807. static void storvsc_on_channel_callback(void *context)
  808. {
  809. struct hv_device *device = (struct hv_device *)context;
  810. struct storvsc_device *stor_device;
  811. u32 bytes_recvd;
  812. u64 request_id;
  813. unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
  814. struct storvsc_cmd_request *request;
  815. int ret;
  816. stor_device = get_in_stor_device(device);
  817. if (!stor_device)
  818. return;
  819. do {
  820. ret = vmbus_recvpacket(device->channel, packet,
  821. ALIGN(sizeof(struct vstor_packet), 8),
  822. &bytes_recvd, &request_id);
  823. if (ret == 0 && bytes_recvd > 0) {
  824. request = (struct storvsc_cmd_request *)
  825. (unsigned long)request_id;
  826. if ((request == &stor_device->init_request) ||
  827. (request == &stor_device->reset_request)) {
  828. memcpy(&request->vstor_packet, packet,
  829. sizeof(struct vstor_packet));
  830. complete(&request->wait_event);
  831. } else {
  832. storvsc_on_receive(device,
  833. (struct vstor_packet *)packet,
  834. request);
  835. }
  836. } else {
  837. break;
  838. }
  839. } while (1);
  840. return;
  841. }
  842. static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
  843. {
  844. struct vmstorage_channel_properties props;
  845. int ret;
  846. memset(&props, 0, sizeof(struct vmstorage_channel_properties));
  847. ret = vmbus_open(device->channel,
  848. ring_size,
  849. ring_size,
  850. (void *)&props,
  851. sizeof(struct vmstorage_channel_properties),
  852. storvsc_on_channel_callback, device);
  853. if (ret != 0)
  854. return ret;
  855. ret = storvsc_channel_init(device);
  856. return ret;
  857. }
  858. static int storvsc_dev_remove(struct hv_device *device)
  859. {
  860. struct storvsc_device *stor_device;
  861. unsigned long flags;
  862. stor_device = hv_get_drvdata(device);
  863. spin_lock_irqsave(&device->channel->inbound_lock, flags);
  864. stor_device->destroy = true;
  865. spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
  866. /*
  867. * At this point, all outbound traffic should be disable. We
  868. * only allow inbound traffic (responses) to proceed so that
  869. * outstanding requests can be completed.
  870. */
  871. storvsc_wait_to_drain(stor_device);
  872. /*
  873. * Since we have already drained, we don't need to busy wait
  874. * as was done in final_release_stor_device()
  875. * Note that we cannot set the ext pointer to NULL until
  876. * we have drained - to drain the outgoing packets, we need to
  877. * allow incoming packets.
  878. */
  879. spin_lock_irqsave(&device->channel->inbound_lock, flags);
  880. hv_set_drvdata(device, NULL);
  881. spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
  882. /* Close the channel */
  883. vmbus_close(device->channel);
  884. kfree(stor_device);
  885. return 0;
  886. }
  887. static int storvsc_do_io(struct hv_device *device,
  888. struct storvsc_cmd_request *request)
  889. {
  890. struct storvsc_device *stor_device;
  891. struct vstor_packet *vstor_packet;
  892. int ret = 0;
  893. vstor_packet = &request->vstor_packet;
  894. stor_device = get_out_stor_device(device);
  895. if (!stor_device)
  896. return -ENODEV;
  897. request->device = device;
  898. vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
  899. vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
  900. vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
  901. vstor_packet->vm_srb.data_transfer_length =
  902. request->data_buffer.len;
  903. vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
  904. if (request->data_buffer.len) {
  905. ret = vmbus_sendpacket_multipagebuffer(device->channel,
  906. &request->data_buffer,
  907. vstor_packet,
  908. sizeof(struct vstor_packet),
  909. (unsigned long)request);
  910. } else {
  911. ret = vmbus_sendpacket(device->channel, vstor_packet,
  912. sizeof(struct vstor_packet),
  913. (unsigned long)request,
  914. VM_PKT_DATA_INBAND,
  915. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  916. }
  917. if (ret != 0)
  918. return ret;
  919. atomic_inc(&stor_device->num_outstanding_req);
  920. return ret;
  921. }
  922. static int storvsc_device_alloc(struct scsi_device *sdevice)
  923. {
  924. struct stor_mem_pools *memp;
  925. int number = STORVSC_MIN_BUF_NR;
  926. memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
  927. if (!memp)
  928. return -ENOMEM;
  929. memp->request_pool =
  930. kmem_cache_create(dev_name(&sdevice->sdev_dev),
  931. sizeof(struct storvsc_cmd_request), 0,
  932. SLAB_HWCACHE_ALIGN, NULL);
  933. if (!memp->request_pool)
  934. goto err0;
  935. memp->request_mempool = mempool_create(number, mempool_alloc_slab,
  936. mempool_free_slab,
  937. memp->request_pool);
  938. if (!memp->request_mempool)
  939. goto err1;
  940. sdevice->hostdata = memp;
  941. return 0;
  942. err1:
  943. kmem_cache_destroy(memp->request_pool);
  944. err0:
  945. kfree(memp);
  946. return -ENOMEM;
  947. }
  948. static void storvsc_device_destroy(struct scsi_device *sdevice)
  949. {
  950. struct stor_mem_pools *memp = sdevice->hostdata;
  951. mempool_destroy(memp->request_mempool);
  952. kmem_cache_destroy(memp->request_pool);
  953. kfree(memp);
  954. sdevice->hostdata = NULL;
  955. }
  956. static int storvsc_device_configure(struct scsi_device *sdevice)
  957. {
  958. scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
  959. STORVSC_MAX_IO_REQUESTS);
  960. blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
  961. blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
  962. sdevice->no_write_same = 1;
  963. return 0;
  964. }
  965. static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
  966. sector_t capacity, int *info)
  967. {
  968. sector_t nsect = capacity;
  969. sector_t cylinders = nsect;
  970. int heads, sectors_pt;
  971. /*
  972. * We are making up these values; let us keep it simple.
  973. */
  974. heads = 0xff;
  975. sectors_pt = 0x3f; /* Sectors per track */
  976. sector_div(cylinders, heads * sectors_pt);
  977. if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
  978. cylinders = 0xffff;
  979. info[0] = heads;
  980. info[1] = sectors_pt;
  981. info[2] = (int)cylinders;
  982. return 0;
  983. }
  984. static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
  985. {
  986. struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
  987. struct hv_device *device = host_dev->dev;
  988. struct storvsc_device *stor_device;
  989. struct storvsc_cmd_request *request;
  990. struct vstor_packet *vstor_packet;
  991. int ret, t;
  992. stor_device = get_out_stor_device(device);
  993. if (!stor_device)
  994. return FAILED;
  995. request = &stor_device->reset_request;
  996. vstor_packet = &request->vstor_packet;
  997. init_completion(&request->wait_event);
  998. vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
  999. vstor_packet->flags = REQUEST_COMPLETION_FLAG;
  1000. vstor_packet->vm_srb.path_id = stor_device->path_id;
  1001. ret = vmbus_sendpacket(device->channel, vstor_packet,
  1002. sizeof(struct vstor_packet),
  1003. (unsigned long)&stor_device->reset_request,
  1004. VM_PKT_DATA_INBAND,
  1005. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  1006. if (ret != 0)
  1007. return FAILED;
  1008. t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
  1009. if (t == 0)
  1010. return TIMEOUT_ERROR;
  1011. /*
  1012. * At this point, all outstanding requests in the adapter
  1013. * should have been flushed out and return to us
  1014. * There is a potential race here where the host may be in
  1015. * the process of responding when we return from here.
  1016. * Just wait for all in-transit packets to be accounted for
  1017. * before we return from here.
  1018. */
  1019. storvsc_wait_to_drain(stor_device);
  1020. return SUCCESS;
  1021. }
  1022. static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
  1023. {
  1024. bool allowed = true;
  1025. u8 scsi_op = scmnd->cmnd[0];
  1026. switch (scsi_op) {
  1027. /* the host does not handle WRITE_SAME, log accident usage */
  1028. case WRITE_SAME:
  1029. /*
  1030. * smartd sends this command and the host does not handle
  1031. * this. So, don't send it.
  1032. */
  1033. case SET_WINDOW:
  1034. scmnd->result = ILLEGAL_REQUEST << 16;
  1035. allowed = false;
  1036. break;
  1037. default:
  1038. break;
  1039. }
  1040. return allowed;
  1041. }
  1042. static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
  1043. {
  1044. int ret;
  1045. struct hv_host_device *host_dev = shost_priv(host);
  1046. struct hv_device *dev = host_dev->dev;
  1047. struct storvsc_cmd_request *cmd_request;
  1048. unsigned int request_size = 0;
  1049. int i;
  1050. struct scatterlist *sgl;
  1051. unsigned int sg_count = 0;
  1052. struct vmscsi_request *vm_srb;
  1053. struct stor_mem_pools *memp = scmnd->device->hostdata;
  1054. if (!storvsc_scsi_cmd_ok(scmnd)) {
  1055. scmnd->scsi_done(scmnd);
  1056. return 0;
  1057. }
  1058. request_size = sizeof(struct storvsc_cmd_request);
  1059. cmd_request = mempool_alloc(memp->request_mempool,
  1060. GFP_ATOMIC);
  1061. /*
  1062. * We might be invoked in an interrupt context; hence
  1063. * mempool_alloc() can fail.
  1064. */
  1065. if (!cmd_request)
  1066. return SCSI_MLQUEUE_DEVICE_BUSY;
  1067. memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
  1068. /* Setup the cmd request */
  1069. cmd_request->cmd = scmnd;
  1070. scmnd->host_scribble = (unsigned char *)cmd_request;
  1071. vm_srb = &cmd_request->vstor_packet.vm_srb;
  1072. /* Build the SRB */
  1073. switch (scmnd->sc_data_direction) {
  1074. case DMA_TO_DEVICE:
  1075. vm_srb->data_in = WRITE_TYPE;
  1076. break;
  1077. case DMA_FROM_DEVICE:
  1078. vm_srb->data_in = READ_TYPE;
  1079. break;
  1080. default:
  1081. vm_srb->data_in = UNKNOWN_TYPE;
  1082. break;
  1083. }
  1084. vm_srb->port_number = host_dev->port;
  1085. vm_srb->path_id = scmnd->device->channel;
  1086. vm_srb->target_id = scmnd->device->id;
  1087. vm_srb->lun = scmnd->device->lun;
  1088. vm_srb->cdb_length = scmnd->cmd_len;
  1089. memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
  1090. cmd_request->sense_buffer = scmnd->sense_buffer;
  1091. cmd_request->data_buffer.len = scsi_bufflen(scmnd);
  1092. if (scsi_sg_count(scmnd)) {
  1093. sgl = (struct scatterlist *)scsi_sglist(scmnd);
  1094. sg_count = scsi_sg_count(scmnd);
  1095. /* check if we need to bounce the sgl */
  1096. if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
  1097. cmd_request->bounce_sgl =
  1098. create_bounce_buffer(sgl, scsi_sg_count(scmnd),
  1099. scsi_bufflen(scmnd),
  1100. vm_srb->data_in);
  1101. if (!cmd_request->bounce_sgl) {
  1102. ret = SCSI_MLQUEUE_HOST_BUSY;
  1103. goto queue_error;
  1104. }
  1105. cmd_request->bounce_sgl_count =
  1106. ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
  1107. PAGE_SHIFT;
  1108. if (vm_srb->data_in == WRITE_TYPE)
  1109. copy_to_bounce_buffer(sgl,
  1110. cmd_request->bounce_sgl,
  1111. scsi_sg_count(scmnd));
  1112. sgl = cmd_request->bounce_sgl;
  1113. sg_count = cmd_request->bounce_sgl_count;
  1114. }
  1115. cmd_request->data_buffer.offset = sgl[0].offset;
  1116. for (i = 0; i < sg_count; i++)
  1117. cmd_request->data_buffer.pfn_array[i] =
  1118. page_to_pfn(sg_page((&sgl[i])));
  1119. } else if (scsi_sglist(scmnd)) {
  1120. cmd_request->data_buffer.offset =
  1121. virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
  1122. cmd_request->data_buffer.pfn_array[0] =
  1123. virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
  1124. }
  1125. /* Invokes the vsc to start an IO */
  1126. ret = storvsc_do_io(dev, cmd_request);
  1127. if (ret == -EAGAIN) {
  1128. /* no more space */
  1129. if (cmd_request->bounce_sgl_count) {
  1130. destroy_bounce_buffer(cmd_request->bounce_sgl,
  1131. cmd_request->bounce_sgl_count);
  1132. ret = SCSI_MLQUEUE_DEVICE_BUSY;
  1133. goto queue_error;
  1134. }
  1135. }
  1136. return 0;
  1137. queue_error:
  1138. mempool_free(cmd_request, memp->request_mempool);
  1139. scmnd->host_scribble = NULL;
  1140. return ret;
  1141. }
  1142. static struct scsi_host_template scsi_driver = {
  1143. .module = THIS_MODULE,
  1144. .name = "storvsc_host_t",
  1145. .bios_param = storvsc_get_chs,
  1146. .queuecommand = storvsc_queuecommand,
  1147. .eh_host_reset_handler = storvsc_host_reset_handler,
  1148. .slave_alloc = storvsc_device_alloc,
  1149. .slave_destroy = storvsc_device_destroy,
  1150. .slave_configure = storvsc_device_configure,
  1151. .cmd_per_lun = 1,
  1152. /* 64 max_queue * 1 target */
  1153. .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
  1154. .this_id = -1,
  1155. /* no use setting to 0 since ll_blk_rw reset it to 1 */
  1156. /* currently 32 */
  1157. .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
  1158. .use_clustering = DISABLE_CLUSTERING,
  1159. /* Make sure we dont get a sg segment crosses a page boundary */
  1160. .dma_boundary = PAGE_SIZE-1,
  1161. };
  1162. enum {
  1163. SCSI_GUID,
  1164. IDE_GUID,
  1165. };
  1166. static const struct hv_vmbus_device_id id_table[] = {
  1167. /* SCSI guid */
  1168. { HV_SCSI_GUID,
  1169. .driver_data = SCSI_GUID
  1170. },
  1171. /* IDE guid */
  1172. { HV_IDE_GUID,
  1173. .driver_data = IDE_GUID
  1174. },
  1175. { },
  1176. };
  1177. MODULE_DEVICE_TABLE(vmbus, id_table);
  1178. static int storvsc_probe(struct hv_device *device,
  1179. const struct hv_vmbus_device_id *dev_id)
  1180. {
  1181. int ret;
  1182. struct Scsi_Host *host;
  1183. struct hv_host_device *host_dev;
  1184. bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
  1185. int target = 0;
  1186. struct storvsc_device *stor_device;
  1187. host = scsi_host_alloc(&scsi_driver,
  1188. sizeof(struct hv_host_device));
  1189. if (!host)
  1190. return -ENOMEM;
  1191. host_dev = shost_priv(host);
  1192. memset(host_dev, 0, sizeof(struct hv_host_device));
  1193. host_dev->port = host->host_no;
  1194. host_dev->dev = device;
  1195. stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
  1196. if (!stor_device) {
  1197. ret = -ENOMEM;
  1198. goto err_out0;
  1199. }
  1200. stor_device->destroy = false;
  1201. init_waitqueue_head(&stor_device->waiting_to_drain);
  1202. stor_device->device = device;
  1203. stor_device->host = host;
  1204. hv_set_drvdata(device, stor_device);
  1205. stor_device->port_number = host->host_no;
  1206. ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
  1207. if (ret)
  1208. goto err_out1;
  1209. host_dev->path = stor_device->path_id;
  1210. host_dev->target = stor_device->target_id;
  1211. /* max # of devices per target */
  1212. host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
  1213. /* max # of targets per channel */
  1214. host->max_id = STORVSC_MAX_TARGETS;
  1215. /* max # of channels */
  1216. host->max_channel = STORVSC_MAX_CHANNELS - 1;
  1217. /* max cmd length */
  1218. host->max_cmd_len = STORVSC_MAX_CMD_LEN;
  1219. /* Register the HBA and start the scsi bus scan */
  1220. ret = scsi_add_host(host, &device->device);
  1221. if (ret != 0)
  1222. goto err_out2;
  1223. if (!dev_is_ide) {
  1224. scsi_scan_host(host);
  1225. } else {
  1226. target = (device->dev_instance.b[5] << 8 |
  1227. device->dev_instance.b[4]);
  1228. ret = scsi_add_device(host, 0, target, 0);
  1229. if (ret) {
  1230. scsi_remove_host(host);
  1231. goto err_out2;
  1232. }
  1233. }
  1234. return 0;
  1235. err_out2:
  1236. /*
  1237. * Once we have connected with the host, we would need to
  1238. * to invoke storvsc_dev_remove() to rollback this state and
  1239. * this call also frees up the stor_device; hence the jump around
  1240. * err_out1 label.
  1241. */
  1242. storvsc_dev_remove(device);
  1243. goto err_out0;
  1244. err_out1:
  1245. kfree(stor_device);
  1246. err_out0:
  1247. scsi_host_put(host);
  1248. return ret;
  1249. }
  1250. static int storvsc_remove(struct hv_device *dev)
  1251. {
  1252. struct storvsc_device *stor_device = hv_get_drvdata(dev);
  1253. struct Scsi_Host *host = stor_device->host;
  1254. scsi_remove_host(host);
  1255. storvsc_dev_remove(dev);
  1256. scsi_host_put(host);
  1257. return 0;
  1258. }
  1259. static struct hv_driver storvsc_drv = {
  1260. .name = KBUILD_MODNAME,
  1261. .id_table = id_table,
  1262. .probe = storvsc_probe,
  1263. .remove = storvsc_remove,
  1264. };
  1265. static int __init storvsc_drv_init(void)
  1266. {
  1267. u32 max_outstanding_req_per_channel;
  1268. /*
  1269. * Divide the ring buffer data size (which is 1 page less
  1270. * than the ring buffer size since that page is reserved for
  1271. * the ring buffer indices) by the max request size (which is
  1272. * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
  1273. */
  1274. max_outstanding_req_per_channel =
  1275. ((storvsc_ringbuffer_size - PAGE_SIZE) /
  1276. ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
  1277. sizeof(struct vstor_packet) + sizeof(u64),
  1278. sizeof(u64)));
  1279. if (max_outstanding_req_per_channel <
  1280. STORVSC_MAX_IO_REQUESTS)
  1281. return -EINVAL;
  1282. return vmbus_driver_register(&storvsc_drv);
  1283. }
  1284. static void __exit storvsc_drv_exit(void)
  1285. {
  1286. vmbus_driver_unregister(&storvsc_drv);
  1287. }
  1288. MODULE_LICENSE("GPL");
  1289. MODULE_VERSION(HV_DRV_VERSION);
  1290. MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
  1291. module_init(storvsc_drv_init);
  1292. module_exit(storvsc_drv_exit);