megaraid_sas_fp.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. /*
  2. * Linux MegaRAID driver for SAS based RAID controllers
  3. *
  4. * Copyright (c) 2009-2012 LSI Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * FILE: megaraid_sas_fp.c
  21. *
  22. * Authors: LSI Corporation
  23. * Sumant Patro
  24. * Varad Talamacki
  25. * Manoj Jose
  26. *
  27. * Send feedback to: <megaraidlinux@lsi.com>
  28. *
  29. * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
  30. * ATTN: Linuxraid
  31. */
  32. #include <linux/kernel.h>
  33. #include <linux/types.h>
  34. #include <linux/pci.h>
  35. #include <linux/list.h>
  36. #include <linux/moduleparam.h>
  37. #include <linux/module.h>
  38. #include <linux/spinlock.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/delay.h>
  41. #include <linux/uio.h>
  42. #include <linux/uaccess.h>
  43. #include <linux/fs.h>
  44. #include <linux/compat.h>
  45. #include <linux/blkdev.h>
  46. #include <linux/poll.h>
  47. #include <scsi/scsi.h>
  48. #include <scsi/scsi_cmnd.h>
  49. #include <scsi/scsi_device.h>
  50. #include <scsi/scsi_host.h>
  51. #include "megaraid_sas_fusion.h"
  52. #include "megaraid_sas.h"
  53. #include <asm/div64.h>
  54. #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
  55. #define MR_LD_STATE_OPTIMAL 3
  56. #define FALSE 0
  57. #define TRUE 1
  58. #define SPAN_DEBUG 0
  59. #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
  60. #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
  61. #define SPAN_INVALID 0xff
  62. /* Prototypes */
  63. void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
  64. struct LD_LOAD_BALANCE_INFO *lbInfo);
  65. static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
  66. PLD_SPAN_INFO ldSpanInfo);
  67. static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
  68. u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
  69. struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
  70. static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
  71. u64 strip, struct MR_FW_RAID_MAP_ALL *map);
  72. u32 mega_mod64(u64 dividend, u32 divisor)
  73. {
  74. u64 d;
  75. u32 remainder;
  76. if (!divisor)
  77. printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
  78. d = dividend;
  79. remainder = do_div(d, divisor);
  80. return remainder;
  81. }
  82. /**
  83. * @param dividend : Dividend
  84. * @param divisor : Divisor
  85. *
  86. * @return quotient
  87. **/
  88. u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
  89. {
  90. u32 remainder;
  91. u64 d;
  92. if (!divisor)
  93. printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
  94. d = dividend;
  95. remainder = do_div(d, divisor);
  96. return d;
  97. }
  98. struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
  99. {
  100. return &map->raidMap.ldSpanMap[ld].ldRaid;
  101. }
  102. static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
  103. struct MR_FW_RAID_MAP_ALL
  104. *map)
  105. {
  106. return &map->raidMap.ldSpanMap[ld].spanBlock[0];
  107. }
  108. static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
  109. {
  110. return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
  111. }
  112. u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
  113. {
  114. return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
  115. }
  116. u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
  117. {
  118. return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
  119. }
  120. u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
  121. {
  122. return map->raidMap.devHndlInfo[pd].curDevHdl;
  123. }
  124. u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
  125. {
  126. return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
  127. }
  128. u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
  129. {
  130. return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
  131. }
  132. static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
  133. struct MR_FW_RAID_MAP_ALL *map)
  134. {
  135. return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
  136. }
  137. /*
  138. * This function will validate Map info data provided by FW
  139. */
  140. u8 MR_ValidateMapInfo(struct megasas_instance *instance)
  141. {
  142. struct fusion_context *fusion = instance->ctrl_context;
  143. struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
  144. struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
  145. PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
  146. struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
  147. struct MR_LD_RAID *raid;
  148. int ldCount, num_lds;
  149. u16 ld;
  150. if (le32_to_cpu(pFwRaidMap->totalSize) !=
  151. (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
  152. (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
  153. printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
  154. (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
  155. sizeof(struct MR_LD_SPAN_MAP)) +
  156. (sizeof(struct MR_LD_SPAN_MAP) *
  157. le32_to_cpu(pFwRaidMap->ldCount))));
  158. printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
  159. ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
  160. le32_to_cpu(pFwRaidMap->totalSize));
  161. return 0;
  162. }
  163. if (instance->UnevenSpanSupport)
  164. mr_update_span_set(map, ldSpanInfo);
  165. mr_update_load_balance_params(map, lbInfo);
  166. num_lds = le32_to_cpu(map->raidMap.ldCount);
  167. /*Convert Raid capability values to CPU arch */
  168. for (ldCount = 0; ldCount < num_lds; ldCount++) {
  169. ld = MR_TargetIdToLdGet(ldCount, map);
  170. raid = MR_LdRaidGet(ld, map);
  171. le32_to_cpus((u32 *)&raid->capability);
  172. }
  173. return 1;
  174. }
  175. u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
  176. struct MR_FW_RAID_MAP_ALL *map)
  177. {
  178. struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
  179. struct MR_QUAD_ELEMENT *quad;
  180. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  181. u32 span, j;
  182. for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
  183. for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
  184. quad = &pSpanBlock->block_span_info.quad[j];
  185. if (le32_to_cpu(quad->diff) == 0)
  186. return SPAN_INVALID;
  187. if (le64_to_cpu(quad->logStart) <= row && row <=
  188. le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
  189. le32_to_cpu(quad->diff))) == 0) {
  190. if (span_blk != NULL) {
  191. u64 blk, debugBlk;
  192. blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
  193. debugBlk = blk;
  194. blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
  195. *span_blk = blk;
  196. }
  197. return span;
  198. }
  199. }
  200. }
  201. return SPAN_INVALID;
  202. }
  203. /*
  204. ******************************************************************************
  205. *
  206. * Function to print info about span set created in driver from FW raid map
  207. *
  208. * Inputs :
  209. * map - LD map
  210. * ldSpanInfo - ldSpanInfo per HBA instance
  211. */
  212. #if SPAN_DEBUG
  213. static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
  214. {
  215. u8 span;
  216. u32 element;
  217. struct MR_LD_RAID *raid;
  218. LD_SPAN_SET *span_set;
  219. struct MR_QUAD_ELEMENT *quad;
  220. int ldCount;
  221. u16 ld;
  222. for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
  223. ld = MR_TargetIdToLdGet(ldCount, map);
  224. if (ld >= MAX_LOGICAL_DRIVES)
  225. continue;
  226. raid = MR_LdRaidGet(ld, map);
  227. dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
  228. ld, raid->spanDepth);
  229. for (span = 0; span < raid->spanDepth; span++)
  230. dev_dbg(&instance->pdev->dev, "Span=%x,"
  231. " number of quads=%x\n", span,
  232. le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  233. block_span_info.noElements));
  234. for (element = 0; element < MAX_QUAD_DEPTH; element++) {
  235. span_set = &(ldSpanInfo[ld].span_set[element]);
  236. if (span_set->span_row_data_width == 0)
  237. break;
  238. dev_dbg(&instance->pdev->dev, "Span Set %x:"
  239. "width=%x, diff=%x\n", element,
  240. (unsigned int)span_set->span_row_data_width,
  241. (unsigned int)span_set->diff);
  242. dev_dbg(&instance->pdev->dev, "logical LBA"
  243. "start=0x%08lx, end=0x%08lx\n",
  244. (long unsigned int)span_set->log_start_lba,
  245. (long unsigned int)span_set->log_end_lba);
  246. dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
  247. " end=0x%08lx\n",
  248. (long unsigned int)span_set->span_row_start,
  249. (long unsigned int)span_set->span_row_end);
  250. dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
  251. " end=0x%08lx\n",
  252. (long unsigned int)span_set->data_row_start,
  253. (long unsigned int)span_set->data_row_end);
  254. dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
  255. " end=0x%08lx\n",
  256. (long unsigned int)span_set->data_strip_start,
  257. (long unsigned int)span_set->data_strip_end);
  258. for (span = 0; span < raid->spanDepth; span++) {
  259. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  260. block_span_info.noElements) >=
  261. element + 1) {
  262. quad = &map->raidMap.ldSpanMap[ld].
  263. spanBlock[span].block_span_info.
  264. quad[element];
  265. dev_dbg(&instance->pdev->dev, "Span=%x,"
  266. "Quad=%x, diff=%x\n", span,
  267. element, le32_to_cpu(quad->diff));
  268. dev_dbg(&instance->pdev->dev,
  269. "offset_in_span=0x%08lx\n",
  270. (long unsigned int)le64_to_cpu(quad->offsetInSpan));
  271. dev_dbg(&instance->pdev->dev,
  272. "logical start=0x%08lx, end=0x%08lx\n",
  273. (long unsigned int)le64_to_cpu(quad->logStart),
  274. (long unsigned int)le64_to_cpu(quad->logEnd));
  275. }
  276. }
  277. }
  278. }
  279. return 0;
  280. }
  281. #endif
  282. /*
  283. ******************************************************************************
  284. *
  285. * This routine calculates the Span block for given row using spanset.
  286. *
  287. * Inputs :
  288. * instance - HBA instance
  289. * ld - Logical drive number
  290. * row - Row number
  291. * map - LD map
  292. *
  293. * Outputs :
  294. *
  295. * span - Span number
  296. * block - Absolute Block number in the physical disk
  297. * div_error - Devide error code.
  298. */
  299. u32 mr_spanset_get_span_block(struct megasas_instance *instance,
  300. u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
  301. {
  302. struct fusion_context *fusion = instance->ctrl_context;
  303. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  304. LD_SPAN_SET *span_set;
  305. struct MR_QUAD_ELEMENT *quad;
  306. u32 span, info;
  307. PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
  308. for (info = 0; info < MAX_QUAD_DEPTH; info++) {
  309. span_set = &(ldSpanInfo[ld].span_set[info]);
  310. if (span_set->span_row_data_width == 0)
  311. break;
  312. if (row > span_set->data_row_end)
  313. continue;
  314. for (span = 0; span < raid->spanDepth; span++)
  315. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  316. block_span_info.noElements) >= info+1) {
  317. quad = &map->raidMap.ldSpanMap[ld].
  318. spanBlock[span].
  319. block_span_info.quad[info];
  320. if (le32_to_cpu(quad->diff == 0))
  321. return SPAN_INVALID;
  322. if (le64_to_cpu(quad->logStart) <= row &&
  323. row <= le64_to_cpu(quad->logEnd) &&
  324. (mega_mod64(row - le64_to_cpu(quad->logStart),
  325. le32_to_cpu(quad->diff))) == 0) {
  326. if (span_blk != NULL) {
  327. u64 blk;
  328. blk = mega_div64_32
  329. ((row - le64_to_cpu(quad->logStart)),
  330. le32_to_cpu(quad->diff));
  331. blk = (blk + le64_to_cpu(quad->offsetInSpan))
  332. << raid->stripeShift;
  333. *span_blk = blk;
  334. }
  335. return span;
  336. }
  337. }
  338. }
  339. return SPAN_INVALID;
  340. }
  341. /*
  342. ******************************************************************************
  343. *
  344. * This routine calculates the row for given strip using spanset.
  345. *
  346. * Inputs :
  347. * instance - HBA instance
  348. * ld - Logical drive number
  349. * Strip - Strip
  350. * map - LD map
  351. *
  352. * Outputs :
  353. *
  354. * row - row associated with strip
  355. */
  356. static u64 get_row_from_strip(struct megasas_instance *instance,
  357. u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
  358. {
  359. struct fusion_context *fusion = instance->ctrl_context;
  360. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  361. LD_SPAN_SET *span_set;
  362. PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
  363. u32 info, strip_offset, span, span_offset;
  364. u64 span_set_Strip, span_set_Row, retval;
  365. for (info = 0; info < MAX_QUAD_DEPTH; info++) {
  366. span_set = &(ldSpanInfo[ld].span_set[info]);
  367. if (span_set->span_row_data_width == 0)
  368. break;
  369. if (strip > span_set->data_strip_end)
  370. continue;
  371. span_set_Strip = strip - span_set->data_strip_start;
  372. strip_offset = mega_mod64(span_set_Strip,
  373. span_set->span_row_data_width);
  374. span_set_Row = mega_div64_32(span_set_Strip,
  375. span_set->span_row_data_width) * span_set->diff;
  376. for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
  377. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  378. block_span_info.noElements >= info+1)) {
  379. if (strip_offset >=
  380. span_set->strip_offset[span])
  381. span_offset++;
  382. else
  383. break;
  384. }
  385. #if SPAN_DEBUG
  386. dev_info(&instance->pdev->dev, "Strip 0x%llx,"
  387. "span_set_Strip 0x%llx, span_set_Row 0x%llx"
  388. "data width 0x%llx span offset 0x%x\n", strip,
  389. (unsigned long long)span_set_Strip,
  390. (unsigned long long)span_set_Row,
  391. (unsigned long long)span_set->span_row_data_width,
  392. span_offset);
  393. dev_info(&instance->pdev->dev, "For strip 0x%llx"
  394. "row is 0x%llx\n", strip,
  395. (unsigned long long) span_set->data_row_start +
  396. (unsigned long long) span_set_Row + (span_offset - 1));
  397. #endif
  398. retval = (span_set->data_row_start + span_set_Row +
  399. (span_offset - 1));
  400. return retval;
  401. }
  402. return -1LLU;
  403. }
  404. /*
  405. ******************************************************************************
  406. *
  407. * This routine calculates the Start Strip for given row using spanset.
  408. *
  409. * Inputs :
  410. * instance - HBA instance
  411. * ld - Logical drive number
  412. * row - Row number
  413. * map - LD map
  414. *
  415. * Outputs :
  416. *
  417. * Strip - Start strip associated with row
  418. */
  419. static u64 get_strip_from_row(struct megasas_instance *instance,
  420. u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
  421. {
  422. struct fusion_context *fusion = instance->ctrl_context;
  423. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  424. LD_SPAN_SET *span_set;
  425. struct MR_QUAD_ELEMENT *quad;
  426. PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
  427. u32 span, info;
  428. u64 strip;
  429. for (info = 0; info < MAX_QUAD_DEPTH; info++) {
  430. span_set = &(ldSpanInfo[ld].span_set[info]);
  431. if (span_set->span_row_data_width == 0)
  432. break;
  433. if (row > span_set->data_row_end)
  434. continue;
  435. for (span = 0; span < raid->spanDepth; span++)
  436. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  437. block_span_info.noElements) >= info+1) {
  438. quad = &map->raidMap.ldSpanMap[ld].
  439. spanBlock[span].block_span_info.quad[info];
  440. if (le64_to_cpu(quad->logStart) <= row &&
  441. row <= le64_to_cpu(quad->logEnd) &&
  442. mega_mod64((row - le64_to_cpu(quad->logStart)),
  443. le32_to_cpu(quad->diff)) == 0) {
  444. strip = mega_div64_32
  445. (((row - span_set->data_row_start)
  446. - le64_to_cpu(quad->logStart)),
  447. le32_to_cpu(quad->diff));
  448. strip *= span_set->span_row_data_width;
  449. strip += span_set->data_strip_start;
  450. strip += span_set->strip_offset[span];
  451. return strip;
  452. }
  453. }
  454. }
  455. dev_err(&instance->pdev->dev, "get_strip_from_row"
  456. "returns invalid strip for ld=%x, row=%lx\n",
  457. ld, (long unsigned int)row);
  458. return -1;
  459. }
  460. /*
  461. ******************************************************************************
  462. *
  463. * This routine calculates the Physical Arm for given strip using spanset.
  464. *
  465. * Inputs :
  466. * instance - HBA instance
  467. * ld - Logical drive number
  468. * strip - Strip
  469. * map - LD map
  470. *
  471. * Outputs :
  472. *
  473. * Phys Arm - Phys Arm associated with strip
  474. */
  475. static u32 get_arm_from_strip(struct megasas_instance *instance,
  476. u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
  477. {
  478. struct fusion_context *fusion = instance->ctrl_context;
  479. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  480. LD_SPAN_SET *span_set;
  481. PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
  482. u32 info, strip_offset, span, span_offset, retval;
  483. for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
  484. span_set = &(ldSpanInfo[ld].span_set[info]);
  485. if (span_set->span_row_data_width == 0)
  486. break;
  487. if (strip > span_set->data_strip_end)
  488. continue;
  489. strip_offset = (uint)mega_mod64
  490. ((strip - span_set->data_strip_start),
  491. span_set->span_row_data_width);
  492. for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
  493. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  494. block_span_info.noElements) >= info+1) {
  495. if (strip_offset >=
  496. span_set->strip_offset[span])
  497. span_offset =
  498. span_set->strip_offset[span];
  499. else
  500. break;
  501. }
  502. #if SPAN_DEBUG
  503. dev_info(&instance->pdev->dev, "get_arm_from_strip:"
  504. "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
  505. (long unsigned int)strip, (strip_offset - span_offset));
  506. #endif
  507. retval = (strip_offset - span_offset);
  508. return retval;
  509. }
  510. dev_err(&instance->pdev->dev, "get_arm_from_strip"
  511. "returns invalid arm for ld=%x strip=%lx\n",
  512. ld, (long unsigned int)strip);
  513. return -1;
  514. }
  515. /* This Function will return Phys arm */
  516. u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
  517. struct MR_FW_RAID_MAP_ALL *map)
  518. {
  519. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  520. /* Need to check correct default value */
  521. u32 arm = 0;
  522. switch (raid->level) {
  523. case 0:
  524. case 5:
  525. case 6:
  526. arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
  527. break;
  528. case 1:
  529. /* start with logical arm */
  530. arm = get_arm_from_strip(instance, ld, stripe, map);
  531. if (arm != -1U)
  532. arm *= 2;
  533. break;
  534. }
  535. return arm;
  536. }
  537. /*
  538. ******************************************************************************
  539. *
  540. * This routine calculates the arm, span and block for the specified stripe and
  541. * reference in stripe using spanset
  542. *
  543. * Inputs :
  544. *
  545. * ld - Logical drive number
  546. * stripRow - Stripe number
  547. * stripRef - Reference in stripe
  548. *
  549. * Outputs :
  550. *
  551. * span - Span number
  552. * block - Absolute Block number in the physical disk
  553. */
  554. static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
  555. u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
  556. struct RAID_CONTEXT *pRAID_Context,
  557. struct MR_FW_RAID_MAP_ALL *map)
  558. {
  559. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  560. u32 pd, arRef;
  561. u8 physArm, span;
  562. u64 row;
  563. u8 retval = TRUE;
  564. u8 do_invader = 0;
  565. u64 *pdBlock = &io_info->pdBlock;
  566. u16 *pDevHandle = &io_info->devHandle;
  567. u32 logArm, rowMod, armQ, arm;
  568. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
  569. instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
  570. do_invader = 1;
  571. /*Get row and span from io_info for Uneven Span IO.*/
  572. row = io_info->start_row;
  573. span = io_info->start_span;
  574. if (raid->level == 6) {
  575. logArm = get_arm_from_strip(instance, ld, stripRow, map);
  576. if (logArm == -1U)
  577. return FALSE;
  578. rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
  579. armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
  580. arm = armQ + 1 + logArm;
  581. if (arm >= SPAN_ROW_SIZE(map, ld, span))
  582. arm -= SPAN_ROW_SIZE(map, ld, span);
  583. physArm = (u8)arm;
  584. } else
  585. /* Calculate the arm */
  586. physArm = get_arm(instance, ld, span, stripRow, map);
  587. if (physArm == 0xFF)
  588. return FALSE;
  589. arRef = MR_LdSpanArrayGet(ld, span, map);
  590. pd = MR_ArPdGet(arRef, physArm, map);
  591. if (pd != MR_PD_INVALID)
  592. *pDevHandle = MR_PdDevHandleGet(pd, map);
  593. else {
  594. *pDevHandle = MR_PD_INVALID;
  595. if ((raid->level >= 5) &&
  596. (!do_invader || (do_invader &&
  597. (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
  598. pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
  599. else if (raid->level == 1) {
  600. pd = MR_ArPdGet(arRef, physArm + 1, map);
  601. if (pd != MR_PD_INVALID)
  602. *pDevHandle = MR_PdDevHandleGet(pd, map);
  603. }
  604. }
  605. *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
  606. pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
  607. physArm;
  608. return retval;
  609. }
  610. /*
  611. ******************************************************************************
  612. *
  613. * This routine calculates the arm, span and block for the specified stripe and
  614. * reference in stripe.
  615. *
  616. * Inputs :
  617. *
  618. * ld - Logical drive number
  619. * stripRow - Stripe number
  620. * stripRef - Reference in stripe
  621. *
  622. * Outputs :
  623. *
  624. * span - Span number
  625. * block - Absolute Block number in the physical disk
  626. */
  627. u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
  628. u16 stripRef, struct IO_REQUEST_INFO *io_info,
  629. struct RAID_CONTEXT *pRAID_Context,
  630. struct MR_FW_RAID_MAP_ALL *map)
  631. {
  632. struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
  633. u32 pd, arRef;
  634. u8 physArm, span;
  635. u64 row;
  636. u8 retval = TRUE;
  637. u8 do_invader = 0;
  638. u64 *pdBlock = &io_info->pdBlock;
  639. u16 *pDevHandle = &io_info->devHandle;
  640. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
  641. instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
  642. do_invader = 1;
  643. row = mega_div64_32(stripRow, raid->rowDataSize);
  644. if (raid->level == 6) {
  645. /* logical arm within row */
  646. u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
  647. u32 rowMod, armQ, arm;
  648. if (raid->rowSize == 0)
  649. return FALSE;
  650. /* get logical row mod */
  651. rowMod = mega_mod64(row, raid->rowSize);
  652. armQ = raid->rowSize-1-rowMod; /* index of Q drive */
  653. arm = armQ+1+logArm; /* data always logically follows Q */
  654. if (arm >= raid->rowSize) /* handle wrap condition */
  655. arm -= raid->rowSize;
  656. physArm = (u8)arm;
  657. } else {
  658. if (raid->modFactor == 0)
  659. return FALSE;
  660. physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
  661. raid->modFactor),
  662. map);
  663. }
  664. if (raid->spanDepth == 1) {
  665. span = 0;
  666. *pdBlock = row << raid->stripeShift;
  667. } else {
  668. span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
  669. if (span == SPAN_INVALID)
  670. return FALSE;
  671. }
  672. /* Get the array on which this span is present */
  673. arRef = MR_LdSpanArrayGet(ld, span, map);
  674. pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
  675. if (pd != MR_PD_INVALID)
  676. /* Get dev handle from Pd. */
  677. *pDevHandle = MR_PdDevHandleGet(pd, map);
  678. else {
  679. *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
  680. if ((raid->level >= 5) &&
  681. (!do_invader || (do_invader &&
  682. (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
  683. pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
  684. else if (raid->level == 1) {
  685. /* Get alternate Pd. */
  686. pd = MR_ArPdGet(arRef, physArm + 1, map);
  687. if (pd != MR_PD_INVALID)
  688. /* Get dev handle from Pd */
  689. *pDevHandle = MR_PdDevHandleGet(pd, map);
  690. }
  691. }
  692. *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
  693. pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
  694. physArm;
  695. return retval;
  696. }
  697. /*
  698. ******************************************************************************
  699. *
  700. * MR_BuildRaidContext function
  701. *
  702. * This function will initiate command processing. The start/end row and strip
  703. * information is calculated then the lock is acquired.
  704. * This function will return 0 if region lock was acquired OR return num strips
  705. */
  706. u8
  707. MR_BuildRaidContext(struct megasas_instance *instance,
  708. struct IO_REQUEST_INFO *io_info,
  709. struct RAID_CONTEXT *pRAID_Context,
  710. struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
  711. {
  712. struct MR_LD_RAID *raid;
  713. u32 ld, stripSize, stripe_mask;
  714. u64 endLba, endStrip, endRow, start_row, start_strip;
  715. u64 regStart;
  716. u32 regSize;
  717. u8 num_strips, numRows;
  718. u16 ref_in_start_stripe, ref_in_end_stripe;
  719. u64 ldStartBlock;
  720. u32 numBlocks, ldTgtId;
  721. u8 isRead;
  722. u8 retval = 0;
  723. u8 startlba_span = SPAN_INVALID;
  724. u64 *pdBlock = &io_info->pdBlock;
  725. ldStartBlock = io_info->ldStartBlock;
  726. numBlocks = io_info->numBlocks;
  727. ldTgtId = io_info->ldTgtId;
  728. isRead = io_info->isRead;
  729. io_info->IoforUnevenSpan = 0;
  730. io_info->start_span = SPAN_INVALID;
  731. ld = MR_TargetIdToLdGet(ldTgtId, map);
  732. raid = MR_LdRaidGet(ld, map);
  733. /*
  734. * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
  735. * return FALSE
  736. */
  737. if (raid->rowDataSize == 0) {
  738. if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
  739. return FALSE;
  740. else if (instance->UnevenSpanSupport) {
  741. io_info->IoforUnevenSpan = 1;
  742. } else {
  743. dev_info(&instance->pdev->dev,
  744. "raid->rowDataSize is 0, but has SPAN[0]"
  745. "rowDataSize = 0x%0x,"
  746. "but there is _NO_ UnevenSpanSupport\n",
  747. MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
  748. return FALSE;
  749. }
  750. }
  751. stripSize = 1 << raid->stripeShift;
  752. stripe_mask = stripSize-1;
  753. /*
  754. * calculate starting row and stripe, and number of strips and rows
  755. */
  756. start_strip = ldStartBlock >> raid->stripeShift;
  757. ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
  758. endLba = ldStartBlock + numBlocks - 1;
  759. ref_in_end_stripe = (u16)(endLba & stripe_mask);
  760. endStrip = endLba >> raid->stripeShift;
  761. num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
  762. if (io_info->IoforUnevenSpan) {
  763. start_row = get_row_from_strip(instance, ld, start_strip, map);
  764. endRow = get_row_from_strip(instance, ld, endStrip, map);
  765. if (start_row == -1ULL || endRow == -1ULL) {
  766. dev_info(&instance->pdev->dev, "return from %s %d."
  767. "Send IO w/o region lock.\n",
  768. __func__, __LINE__);
  769. return FALSE;
  770. }
  771. if (raid->spanDepth == 1) {
  772. startlba_span = 0;
  773. *pdBlock = start_row << raid->stripeShift;
  774. } else
  775. startlba_span = (u8)mr_spanset_get_span_block(instance,
  776. ld, start_row, pdBlock, map);
  777. if (startlba_span == SPAN_INVALID) {
  778. dev_info(&instance->pdev->dev, "return from %s %d"
  779. "for row 0x%llx,start strip %llx"
  780. "endSrip %llx\n", __func__, __LINE__,
  781. (unsigned long long)start_row,
  782. (unsigned long long)start_strip,
  783. (unsigned long long)endStrip);
  784. return FALSE;
  785. }
  786. io_info->start_span = startlba_span;
  787. io_info->start_row = start_row;
  788. #if SPAN_DEBUG
  789. dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
  790. "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
  791. " span 0x%x\n", __func__, __LINE__,
  792. (unsigned long long)start_row,
  793. (unsigned long long)start_strip,
  794. (unsigned long long)endStrip, startlba_span);
  795. dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
  796. "Start span 0x%x\n", (unsigned long long)start_row,
  797. (unsigned long long)endRow, startlba_span);
  798. #endif
  799. } else {
  800. start_row = mega_div64_32(start_strip, raid->rowDataSize);
  801. endRow = mega_div64_32(endStrip, raid->rowDataSize);
  802. }
  803. numRows = (u8)(endRow - start_row + 1);
  804. /*
  805. * calculate region info.
  806. */
  807. /* assume region is at the start of the first row */
  808. regStart = start_row << raid->stripeShift;
  809. /* assume this IO needs the full row - we'll adjust if not true */
  810. regSize = stripSize;
  811. /* Check if we can send this I/O via FastPath */
  812. if (raid->capability.fpCapable) {
  813. if (isRead)
  814. io_info->fpOkForIo = (raid->capability.fpReadCapable &&
  815. ((num_strips == 1) ||
  816. raid->capability.
  817. fpReadAcrossStripe));
  818. else
  819. io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
  820. ((num_strips == 1) ||
  821. raid->capability.
  822. fpWriteAcrossStripe));
  823. } else
  824. io_info->fpOkForIo = FALSE;
  825. if (numRows == 1) {
  826. /* single-strip IOs can always lock only the data needed */
  827. if (num_strips == 1) {
  828. regStart += ref_in_start_stripe;
  829. regSize = numBlocks;
  830. }
  831. /* multi-strip IOs always need to full stripe locked */
  832. } else if (io_info->IoforUnevenSpan == 0) {
  833. /*
  834. * For Even span region lock optimization.
  835. * If the start strip is the last in the start row
  836. */
  837. if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
  838. regStart += ref_in_start_stripe;
  839. /* initialize count to sectors from startref to end
  840. of strip */
  841. regSize = stripSize - ref_in_start_stripe;
  842. }
  843. /* add complete rows in the middle of the transfer */
  844. if (numRows > 2)
  845. regSize += (numRows-2) << raid->stripeShift;
  846. /* if IO ends within first strip of last row*/
  847. if (endStrip == endRow*raid->rowDataSize)
  848. regSize += ref_in_end_stripe+1;
  849. else
  850. regSize += stripSize;
  851. } else {
  852. /*
  853. * For Uneven span region lock optimization.
  854. * If the start strip is the last in the start row
  855. */
  856. if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
  857. SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
  858. regStart += ref_in_start_stripe;
  859. /* initialize count to sectors from
  860. * startRef to end of strip
  861. */
  862. regSize = stripSize - ref_in_start_stripe;
  863. }
  864. /* Add complete rows in the middle of the transfer*/
  865. if (numRows > 2)
  866. /* Add complete rows in the middle of the transfer*/
  867. regSize += (numRows-2) << raid->stripeShift;
  868. /* if IO ends within first strip of last row */
  869. if (endStrip == get_strip_from_row(instance, ld, endRow, map))
  870. regSize += ref_in_end_stripe + 1;
  871. else
  872. regSize += stripSize;
  873. }
  874. pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
  875. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
  876. (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
  877. pRAID_Context->regLockFlags = (isRead) ?
  878. raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
  879. else
  880. pRAID_Context->regLockFlags = (isRead) ?
  881. REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
  882. pRAID_Context->VirtualDiskTgtId = raid->targetId;
  883. pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
  884. pRAID_Context->regLockLength = cpu_to_le32(regSize);
  885. pRAID_Context->configSeqNum = raid->seqNum;
  886. /* save pointer to raid->LUN array */
  887. *raidLUN = raid->LUN;
  888. /*Get Phy Params only if FP capable, or else leave it to MR firmware
  889. to do the calculation.*/
  890. if (io_info->fpOkForIo) {
  891. retval = io_info->IoforUnevenSpan ?
  892. mr_spanset_get_phy_params(instance, ld,
  893. start_strip, ref_in_start_stripe,
  894. io_info, pRAID_Context, map) :
  895. MR_GetPhyParams(instance, ld, start_strip,
  896. ref_in_start_stripe, io_info,
  897. pRAID_Context, map);
  898. /* If IO on an invalid Pd, then FP is not possible.*/
  899. if (io_info->devHandle == MR_PD_INVALID)
  900. io_info->fpOkForIo = FALSE;
  901. return retval;
  902. } else if (isRead) {
  903. uint stripIdx;
  904. for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
  905. retval = io_info->IoforUnevenSpan ?
  906. mr_spanset_get_phy_params(instance, ld,
  907. start_strip + stripIdx,
  908. ref_in_start_stripe, io_info,
  909. pRAID_Context, map) :
  910. MR_GetPhyParams(instance, ld,
  911. start_strip + stripIdx, ref_in_start_stripe,
  912. io_info, pRAID_Context, map);
  913. if (!retval)
  914. return TRUE;
  915. }
  916. }
  917. #if SPAN_DEBUG
  918. /* Just for testing what arm we get for strip.*/
  919. if (io_info->IoforUnevenSpan)
  920. get_arm_from_strip(instance, ld, start_strip, map);
  921. #endif
  922. return TRUE;
  923. }
  924. /*
  925. ******************************************************************************
  926. *
  927. * This routine pepare spanset info from Valid Raid map and store it into
  928. * local copy of ldSpanInfo per instance data structure.
  929. *
  930. * Inputs :
  931. * map - LD map
  932. * ldSpanInfo - ldSpanInfo per HBA instance
  933. *
  934. */
  935. void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
  936. PLD_SPAN_INFO ldSpanInfo)
  937. {
  938. u8 span, count;
  939. u32 element, span_row_width;
  940. u64 span_row;
  941. struct MR_LD_RAID *raid;
  942. LD_SPAN_SET *span_set, *span_set_prev;
  943. struct MR_QUAD_ELEMENT *quad;
  944. int ldCount;
  945. u16 ld;
  946. for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
  947. ld = MR_TargetIdToLdGet(ldCount, map);
  948. if (ld >= MAX_LOGICAL_DRIVES)
  949. continue;
  950. raid = MR_LdRaidGet(ld, map);
  951. for (element = 0; element < MAX_QUAD_DEPTH; element++) {
  952. for (span = 0; span < raid->spanDepth; span++) {
  953. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
  954. block_span_info.noElements) <
  955. element + 1)
  956. continue;
  957. span_set = &(ldSpanInfo[ld].span_set[element]);
  958. quad = &map->raidMap.ldSpanMap[ld].
  959. spanBlock[span].block_span_info.
  960. quad[element];
  961. span_set->diff = le32_to_cpu(quad->diff);
  962. for (count = 0, span_row_width = 0;
  963. count < raid->spanDepth; count++) {
  964. if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
  965. spanBlock[count].
  966. block_span_info.
  967. noElements) >= element + 1) {
  968. span_set->strip_offset[count] =
  969. span_row_width;
  970. span_row_width +=
  971. MR_LdSpanPtrGet
  972. (ld, count, map)->spanRowDataSize;
  973. printk(KERN_INFO "megasas:"
  974. "span %x rowDataSize %x\n",
  975. count, MR_LdSpanPtrGet
  976. (ld, count, map)->spanRowDataSize);
  977. }
  978. }
  979. span_set->span_row_data_width = span_row_width;
  980. span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
  981. le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
  982. le32_to_cpu(quad->diff));
  983. if (element == 0) {
  984. span_set->log_start_lba = 0;
  985. span_set->log_end_lba =
  986. ((span_row << raid->stripeShift)
  987. * span_row_width) - 1;
  988. span_set->span_row_start = 0;
  989. span_set->span_row_end = span_row - 1;
  990. span_set->data_strip_start = 0;
  991. span_set->data_strip_end =
  992. (span_row * span_row_width) - 1;
  993. span_set->data_row_start = 0;
  994. span_set->data_row_end =
  995. (span_row * le32_to_cpu(quad->diff)) - 1;
  996. } else {
  997. span_set_prev = &(ldSpanInfo[ld].
  998. span_set[element - 1]);
  999. span_set->log_start_lba =
  1000. span_set_prev->log_end_lba + 1;
  1001. span_set->log_end_lba =
  1002. span_set->log_start_lba +
  1003. ((span_row << raid->stripeShift)
  1004. * span_row_width) - 1;
  1005. span_set->span_row_start =
  1006. span_set_prev->span_row_end + 1;
  1007. span_set->span_row_end =
  1008. span_set->span_row_start + span_row - 1;
  1009. span_set->data_strip_start =
  1010. span_set_prev->data_strip_end + 1;
  1011. span_set->data_strip_end =
  1012. span_set->data_strip_start +
  1013. (span_row * span_row_width) - 1;
  1014. span_set->data_row_start =
  1015. span_set_prev->data_row_end + 1;
  1016. span_set->data_row_end =
  1017. span_set->data_row_start +
  1018. (span_row * le32_to_cpu(quad->diff)) - 1;
  1019. }
  1020. break;
  1021. }
  1022. if (span == raid->spanDepth)
  1023. break;
  1024. }
  1025. }
  1026. #if SPAN_DEBUG
  1027. getSpanInfo(map, ldSpanInfo);
  1028. #endif
  1029. }
  1030. void
  1031. mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
  1032. struct LD_LOAD_BALANCE_INFO *lbInfo)
  1033. {
  1034. int ldCount;
  1035. u16 ld;
  1036. struct MR_LD_RAID *raid;
  1037. for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
  1038. ld = MR_TargetIdToLdGet(ldCount, map);
  1039. if (ld >= MAX_LOGICAL_DRIVES) {
  1040. lbInfo[ldCount].loadBalanceFlag = 0;
  1041. continue;
  1042. }
  1043. raid = MR_LdRaidGet(ld, map);
  1044. /* Two drive Optimal RAID 1 */
  1045. if ((raid->level == 1) && (raid->rowSize == 2) &&
  1046. (raid->spanDepth == 1) && raid->ldState ==
  1047. MR_LD_STATE_OPTIMAL) {
  1048. u32 pd, arRef;
  1049. lbInfo[ldCount].loadBalanceFlag = 1;
  1050. /* Get the array on which this span is present */
  1051. arRef = MR_LdSpanArrayGet(ld, 0, map);
  1052. /* Get the Pd */
  1053. pd = MR_ArPdGet(arRef, 0, map);
  1054. /* Get dev handle from Pd */
  1055. lbInfo[ldCount].raid1DevHandle[0] =
  1056. MR_PdDevHandleGet(pd, map);
  1057. /* Get the Pd */
  1058. pd = MR_ArPdGet(arRef, 1, map);
  1059. /* Get the dev handle from Pd */
  1060. lbInfo[ldCount].raid1DevHandle[1] =
  1061. MR_PdDevHandleGet(pd, map);
  1062. } else
  1063. lbInfo[ldCount].loadBalanceFlag = 0;
  1064. }
  1065. }
  1066. u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
  1067. u32 count)
  1068. {
  1069. u16 pend0, pend1;
  1070. u64 diff0, diff1;
  1071. u8 bestArm;
  1072. /* get the pending cmds for the data and mirror arms */
  1073. pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
  1074. pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
  1075. /* Determine the disk whose head is nearer to the req. block */
  1076. diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
  1077. diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
  1078. bestArm = (diff0 <= diff1 ? 0 : 1);
  1079. /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
  1080. if ((bestArm == arm && pend0 > pend1 + 4) ||
  1081. (bestArm != arm && pend1 > pend0 + 4))
  1082. bestArm ^= 1;
  1083. /* Update the last accessed block on the correct pd */
  1084. lbInfo->last_accessed_block[bestArm] = block + count - 1;
  1085. return bestArm;
  1086. }
  1087. u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
  1088. struct IO_REQUEST_INFO *io_info)
  1089. {
  1090. u8 arm, old_arm;
  1091. u16 devHandle;
  1092. old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
  1093. /* get best new arm */
  1094. arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
  1095. io_info->numBlocks);
  1096. devHandle = lbInfo->raid1DevHandle[arm];
  1097. atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
  1098. return devHandle;
  1099. }