bnx2x_init_ops.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. /* bnx2x_init_ops.h: Broadcom Everest network driver.
  2. * Static functions needed during the initialization.
  3. * This file is "included" in bnx2x_main.c.
  4. *
  5. * Copyright (c) 2007-2009 Broadcom Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation.
  10. *
  11. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  12. * Written by: Vladislav Zolotarov <vladz@broadcom.com>
  13. */
  14. #ifndef BNX2X_INIT_OPS_H
  15. #define BNX2X_INIT_OPS_H
  16. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
  17. static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
  18. u32 len)
  19. {
  20. u32 i;
  21. for (i = 0; i < len; i++)
  22. REG_WR(bp, addr + i*4, data[i]);
  23. }
  24. static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
  25. u32 len)
  26. {
  27. u32 i;
  28. for (i = 0; i < len; i++)
  29. REG_WR_IND(bp, addr + i*4, data[i]);
  30. }
  31. static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
  32. {
  33. if (bp->dmae_ready)
  34. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  35. else
  36. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  37. }
  38. static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
  39. {
  40. u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
  41. u32 buf_len32 = buf_len/4;
  42. u32 i;
  43. memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
  44. for (i = 0; i < len; i += buf_len32) {
  45. u32 cur_len = min(buf_len32, len - i);
  46. bnx2x_write_big_buf(bp, addr + i*4, cur_len);
  47. }
  48. }
  49. static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
  50. u32 len64)
  51. {
  52. u32 buf_len32 = FW_BUF_SIZE/4;
  53. u32 len = len64*2;
  54. u64 data64 = 0;
  55. u32 i;
  56. /* 64 bit value is in a blob: first low DWORD, then high DWORD */
  57. data64 = HILO_U64((*(data + 1)), (*data));
  58. len64 = min((u32)(FW_BUF_SIZE/8), len64);
  59. for (i = 0; i < len64; i++) {
  60. u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
  61. *pdata = data64;
  62. }
  63. for (i = 0; i < len; i += buf_len32) {
  64. u32 cur_len = min(buf_len32, len - i);
  65. bnx2x_write_big_buf(bp, addr + i*4, cur_len);
  66. }
  67. }
  68. /*********************************************************
  69. There are different blobs for each PRAM section.
  70. In addition, each blob write operation is divided into a few operations
  71. in order to decrease the amount of phys. contiguous buffer needed.
  72. Thus, when we select a blob the address may be with some offset
  73. from the beginning of PRAM section.
  74. The same holds for the INT_TABLE sections.
  75. **********************************************************/
  76. #define IF_IS_INT_TABLE_ADDR(base, addr) \
  77. if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
  78. #define IF_IS_PRAM_ADDR(base, addr) \
  79. if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
  80. static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
  81. {
  82. IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
  83. data = INIT_TSEM_INT_TABLE_DATA(bp);
  84. else
  85. IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
  86. data = INIT_CSEM_INT_TABLE_DATA(bp);
  87. else
  88. IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
  89. data = INIT_USEM_INT_TABLE_DATA(bp);
  90. else
  91. IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
  92. data = INIT_XSEM_INT_TABLE_DATA(bp);
  93. else
  94. IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
  95. data = INIT_TSEM_PRAM_DATA(bp);
  96. else
  97. IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
  98. data = INIT_CSEM_PRAM_DATA(bp);
  99. else
  100. IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
  101. data = INIT_USEM_PRAM_DATA(bp);
  102. else
  103. IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
  104. data = INIT_XSEM_PRAM_DATA(bp);
  105. return data;
  106. }
  107. static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
  108. {
  109. if (bp->dmae_ready)
  110. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  111. else
  112. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  113. }
  114. static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
  115. u32 len)
  116. {
  117. data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
  118. if (bp->dmae_ready)
  119. VIRT_WR_DMAE_LEN(bp, data, addr, len);
  120. else
  121. bnx2x_init_ind_wr(bp, addr, data, len);
  122. }
  123. static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
  124. {
  125. const u8 *data = NULL;
  126. int rc;
  127. u32 i;
  128. data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
  129. rc = bnx2x_gunzip(bp, data, len);
  130. if (rc)
  131. return;
  132. /* gunzip_outlen is in dwords */
  133. len = GUNZIP_OUTLEN(bp);
  134. for (i = 0; i < len; i++)
  135. ((u32 *)GUNZIP_BUF(bp))[i] =
  136. cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
  137. bnx2x_write_big_buf_wb(bp, addr, len);
  138. }
  139. static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
  140. {
  141. u16 op_start =
  142. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
  143. u16 op_end =
  144. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
  145. union init_op *op;
  146. int hw_wr;
  147. u32 i, op_type, addr, len;
  148. const u32 *data, *data_base;
  149. /* If empty block */
  150. if (op_start == op_end)
  151. return;
  152. if (CHIP_REV_IS_FPGA(bp))
  153. hw_wr = OP_WR_FPGA;
  154. else if (CHIP_REV_IS_EMUL(bp))
  155. hw_wr = OP_WR_EMUL;
  156. else
  157. hw_wr = OP_WR_ASIC;
  158. data_base = INIT_DATA(bp);
  159. for (i = op_start; i < op_end; i++) {
  160. op = (union init_op *)&(INIT_OPS(bp)[i]);
  161. op_type = op->str_wr.op;
  162. addr = op->str_wr.offset;
  163. len = op->str_wr.data_len;
  164. data = data_base + op->str_wr.data_off;
  165. /* HW/EMUL specific */
  166. if ((op_type > OP_WB) && (op_type == hw_wr))
  167. op_type = OP_WR;
  168. switch (op_type) {
  169. case OP_RD:
  170. REG_RD(bp, addr);
  171. break;
  172. case OP_WR:
  173. REG_WR(bp, addr, op->write.val);
  174. break;
  175. case OP_SW:
  176. bnx2x_init_str_wr(bp, addr, data, len);
  177. break;
  178. case OP_WB:
  179. bnx2x_init_wr_wb(bp, addr, data, len);
  180. break;
  181. case OP_SI:
  182. bnx2x_init_ind_wr(bp, addr, data, len);
  183. break;
  184. case OP_ZR:
  185. bnx2x_init_fill(bp, addr, 0, op->zero.len);
  186. break;
  187. case OP_ZP:
  188. bnx2x_init_wr_zp(bp, addr, len,
  189. op->str_wr.data_off);
  190. break;
  191. case OP_WR_64:
  192. bnx2x_init_wr_64(bp, addr, data, len);
  193. break;
  194. default:
  195. /* happens whenever an op is of a diff HW */
  196. break;
  197. }
  198. }
  199. }
  200. /****************************************************************************
  201. * PXP Arbiter
  202. ****************************************************************************/
  203. /*
  204. * This code configures the PCI read/write arbiter
  205. * which implements a weighted round robin
  206. * between the virtual queues in the chip.
  207. *
  208. * The values were derived for each PCI max payload and max request size.
  209. * since max payload and max request size are only known at run time,
  210. * this is done as a separate init stage.
  211. */
  212. #define NUM_WR_Q 13
  213. #define NUM_RD_Q 29
  214. #define MAX_RD_ORD 3
  215. #define MAX_WR_ORD 2
  216. /* configuration for one arbiter queue */
  217. struct arb_line {
  218. int l;
  219. int add;
  220. int ubound;
  221. };
  222. /* derived configuration for each read queue for each max request size */
  223. static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
  224. /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  225. { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
  226. { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
  227. { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
  228. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  229. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  230. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  231. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  232. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  233. /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  234. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  235. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  236. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  237. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  238. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  239. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  240. { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
  241. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  242. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  243. /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  244. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  245. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  246. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  247. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  248. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  249. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  250. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  251. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  252. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
  253. };
  254. /* derived configuration for each write queue for each max request size */
  255. static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
  256. /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
  257. { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
  258. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  259. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  260. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  261. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  262. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
  263. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  264. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  265. /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
  266. { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
  267. { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
  268. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
  269. };
  270. /* register addresses for read queues */
  271. static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
  272. /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
  273. PXP2_REG_RQ_BW_RD_UBOUND0},
  274. {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  275. PXP2_REG_PSWRQ_BW_UB1},
  276. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  277. PXP2_REG_PSWRQ_BW_UB2},
  278. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  279. PXP2_REG_PSWRQ_BW_UB3},
  280. {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
  281. PXP2_REG_RQ_BW_RD_UBOUND4},
  282. {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
  283. PXP2_REG_RQ_BW_RD_UBOUND5},
  284. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  285. PXP2_REG_PSWRQ_BW_UB6},
  286. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  287. PXP2_REG_PSWRQ_BW_UB7},
  288. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  289. PXP2_REG_PSWRQ_BW_UB8},
  290. /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  291. PXP2_REG_PSWRQ_BW_UB9},
  292. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  293. PXP2_REG_PSWRQ_BW_UB10},
  294. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  295. PXP2_REG_PSWRQ_BW_UB11},
  296. {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
  297. PXP2_REG_RQ_BW_RD_UBOUND12},
  298. {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
  299. PXP2_REG_RQ_BW_RD_UBOUND13},
  300. {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
  301. PXP2_REG_RQ_BW_RD_UBOUND14},
  302. {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
  303. PXP2_REG_RQ_BW_RD_UBOUND15},
  304. {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
  305. PXP2_REG_RQ_BW_RD_UBOUND16},
  306. {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
  307. PXP2_REG_RQ_BW_RD_UBOUND17},
  308. {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
  309. PXP2_REG_RQ_BW_RD_UBOUND18},
  310. /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
  311. PXP2_REG_RQ_BW_RD_UBOUND19},
  312. {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
  313. PXP2_REG_RQ_BW_RD_UBOUND20},
  314. {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
  315. PXP2_REG_RQ_BW_RD_UBOUND22},
  316. {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
  317. PXP2_REG_RQ_BW_RD_UBOUND23},
  318. {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
  319. PXP2_REG_RQ_BW_RD_UBOUND24},
  320. {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
  321. PXP2_REG_RQ_BW_RD_UBOUND25},
  322. {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
  323. PXP2_REG_RQ_BW_RD_UBOUND26},
  324. {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
  325. PXP2_REG_RQ_BW_RD_UBOUND27},
  326. {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  327. PXP2_REG_PSWRQ_BW_UB28}
  328. };
  329. /* register addresses for write queues */
  330. static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
  331. /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  332. PXP2_REG_PSWRQ_BW_UB1},
  333. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  334. PXP2_REG_PSWRQ_BW_UB2},
  335. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  336. PXP2_REG_PSWRQ_BW_UB3},
  337. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  338. PXP2_REG_PSWRQ_BW_UB6},
  339. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  340. PXP2_REG_PSWRQ_BW_UB7},
  341. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  342. PXP2_REG_PSWRQ_BW_UB8},
  343. {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  344. PXP2_REG_PSWRQ_BW_UB9},
  345. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  346. PXP2_REG_PSWRQ_BW_UB10},
  347. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  348. PXP2_REG_PSWRQ_BW_UB11},
  349. /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  350. PXP2_REG_PSWRQ_BW_UB28},
  351. {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
  352. PXP2_REG_RQ_BW_WR_UBOUND29},
  353. {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
  354. PXP2_REG_RQ_BW_WR_UBOUND30}
  355. };
  356. static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
  357. {
  358. u32 val, i;
  359. if (r_order > MAX_RD_ORD) {
  360. DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
  361. r_order, MAX_RD_ORD);
  362. r_order = MAX_RD_ORD;
  363. }
  364. if (w_order > MAX_WR_ORD) {
  365. DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
  366. w_order, MAX_WR_ORD);
  367. w_order = MAX_WR_ORD;
  368. }
  369. if (CHIP_REV_IS_FPGA(bp)) {
  370. DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
  371. w_order = 0;
  372. }
  373. DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
  374. for (i = 0; i < NUM_RD_Q-1; i++) {
  375. REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
  376. REG_WR(bp, read_arb_addr[i].add,
  377. read_arb_data[i][r_order].add);
  378. REG_WR(bp, read_arb_addr[i].ubound,
  379. read_arb_data[i][r_order].ubound);
  380. }
  381. for (i = 0; i < NUM_WR_Q-1; i++) {
  382. if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
  383. (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
  384. REG_WR(bp, write_arb_addr[i].l,
  385. write_arb_data[i][w_order].l);
  386. REG_WR(bp, write_arb_addr[i].add,
  387. write_arb_data[i][w_order].add);
  388. REG_WR(bp, write_arb_addr[i].ubound,
  389. write_arb_data[i][w_order].ubound);
  390. } else {
  391. val = REG_RD(bp, write_arb_addr[i].l);
  392. REG_WR(bp, write_arb_addr[i].l,
  393. val | (write_arb_data[i][w_order].l << 10));
  394. val = REG_RD(bp, write_arb_addr[i].add);
  395. REG_WR(bp, write_arb_addr[i].add,
  396. val | (write_arb_data[i][w_order].add << 10));
  397. val = REG_RD(bp, write_arb_addr[i].ubound);
  398. REG_WR(bp, write_arb_addr[i].ubound,
  399. val | (write_arb_data[i][w_order].ubound << 7));
  400. }
  401. }
  402. val = write_arb_data[NUM_WR_Q-1][w_order].add;
  403. val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
  404. val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
  405. REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
  406. val = read_arb_data[NUM_RD_Q-1][r_order].add;
  407. val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
  408. val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
  409. REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
  410. REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
  411. REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
  412. REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
  413. REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
  414. if (r_order == MAX_RD_ORD)
  415. REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
  416. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
  417. if (CHIP_IS_E1H(bp)) {
  418. /* MPS w_order optimal TH presently TH
  419. * 128 0 0 2
  420. * 256 1 1 3
  421. * >=512 2 2 3
  422. */
  423. val = ((w_order == 0) ? 2 : 3);
  424. REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
  425. REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
  426. REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
  427. REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
  428. REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
  429. REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
  430. REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
  431. REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
  432. REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
  433. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
  434. REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
  435. }
  436. }
  437. #endif /* BNX2X_INIT_OPS_H */