bnx2x_init_ops.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. /* bnx2x_init_ops.h: Broadcom Everest network driver.
  2. * Static functions needed during the initialization.
  3. * This file is "included" in bnx2x_main.c.
  4. *
  5. * Copyright (c) 2007-2013 Broadcom Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation.
  10. *
  11. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  12. * Written by: Vladislav Zolotarov <vladz@broadcom.com>
  13. */
  14. #ifndef BNX2X_INIT_OPS_H
  15. #define BNX2X_INIT_OPS_H
  16. #ifndef BP_ILT
  17. #define BP_ILT(bp) NULL
  18. #endif
  19. #ifndef BP_FUNC
  20. #define BP_FUNC(bp) 0
  21. #endif
  22. #ifndef BP_PORT
  23. #define BP_PORT(bp) 0
  24. #endif
  25. #ifndef BNX2X_ILT_FREE
  26. #define BNX2X_ILT_FREE(x, y, sz)
  27. #endif
  28. #ifndef BNX2X_ILT_ZALLOC
  29. #define BNX2X_ILT_ZALLOC(x, y, sz)
  30. #endif
  31. #ifndef ILOG2
  32. #define ILOG2(x) x
  33. #endif
  34. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
  35. static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
  36. static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
  37. dma_addr_t phys_addr, u32 addr,
  38. u32 len);
  39. static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
  40. const u32 *data, u32 len)
  41. {
  42. u32 i;
  43. for (i = 0; i < len; i++)
  44. REG_WR(bp, addr + i*4, data[i]);
  45. }
  46. static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
  47. const u32 *data, u32 len)
  48. {
  49. u32 i;
  50. for (i = 0; i < len; i++)
  51. bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
  52. }
  53. static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
  54. u8 wb)
  55. {
  56. if (bp->dmae_ready)
  57. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  58. /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
  59. else if (wb && CHIP_IS_E1(bp))
  60. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  61. /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
  62. else
  63. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  64. }
  65. static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
  66. u32 len, u8 wb)
  67. {
  68. u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
  69. u32 buf_len32 = buf_len/4;
  70. u32 i;
  71. memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
  72. for (i = 0; i < len; i += buf_len32) {
  73. u32 cur_len = min(buf_len32, len - i);
  74. bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
  75. }
  76. }
  77. static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
  78. {
  79. if (bp->dmae_ready)
  80. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  81. /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
  82. else if (CHIP_IS_E1(bp))
  83. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  84. /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
  85. else
  86. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  87. }
  88. static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
  89. const u32 *data, u32 len64)
  90. {
  91. u32 buf_len32 = FW_BUF_SIZE/4;
  92. u32 len = len64*2;
  93. u64 data64 = 0;
  94. u32 i;
  95. /* 64 bit value is in a blob: first low DWORD, then high DWORD */
  96. data64 = HILO_U64((*(data + 1)), (*data));
  97. len64 = min((u32)(FW_BUF_SIZE/8), len64);
  98. for (i = 0; i < len64; i++) {
  99. u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
  100. *pdata = data64;
  101. }
  102. for (i = 0; i < len; i += buf_len32) {
  103. u32 cur_len = min(buf_len32, len - i);
  104. bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
  105. }
  106. }
  107. /*********************************************************
  108. There are different blobs for each PRAM section.
  109. In addition, each blob write operation is divided into a few operations
  110. in order to decrease the amount of phys. contiguous buffer needed.
  111. Thus, when we select a blob the address may be with some offset
  112. from the beginning of PRAM section.
  113. The same holds for the INT_TABLE sections.
  114. **********************************************************/
  115. #define IF_IS_INT_TABLE_ADDR(base, addr) \
  116. if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
  117. #define IF_IS_PRAM_ADDR(base, addr) \
  118. if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
  119. static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
  120. const u8 *data)
  121. {
  122. IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
  123. data = INIT_TSEM_INT_TABLE_DATA(bp);
  124. else
  125. IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
  126. data = INIT_CSEM_INT_TABLE_DATA(bp);
  127. else
  128. IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
  129. data = INIT_USEM_INT_TABLE_DATA(bp);
  130. else
  131. IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
  132. data = INIT_XSEM_INT_TABLE_DATA(bp);
  133. else
  134. IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
  135. data = INIT_TSEM_PRAM_DATA(bp);
  136. else
  137. IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
  138. data = INIT_CSEM_PRAM_DATA(bp);
  139. else
  140. IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
  141. data = INIT_USEM_PRAM_DATA(bp);
  142. else
  143. IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
  144. data = INIT_XSEM_PRAM_DATA(bp);
  145. return data;
  146. }
  147. static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
  148. const u32 *data, u32 len)
  149. {
  150. if (bp->dmae_ready)
  151. VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
  152. /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
  153. else if (CHIP_IS_E1(bp))
  154. bnx2x_init_ind_wr(bp, addr, data, len);
  155. /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
  156. else
  157. bnx2x_init_str_wr(bp, addr, data, len);
  158. }
  159. static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
  160. u32 val_hi)
  161. {
  162. u32 wb_write[2];
  163. wb_write[0] = val_lo;
  164. wb_write[1] = val_hi;
  165. REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
  166. }
  167. static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
  168. u32 blob_off)
  169. {
  170. const u8 *data = NULL;
  171. int rc;
  172. u32 i;
  173. data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
  174. rc = bnx2x_gunzip(bp, data, len);
  175. if (rc)
  176. return;
  177. /* gunzip_outlen is in dwords */
  178. len = GUNZIP_OUTLEN(bp);
  179. for (i = 0; i < len; i++)
  180. ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
  181. cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
  182. bnx2x_write_big_buf_wb(bp, addr, len);
  183. }
  184. static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
  185. {
  186. u16 op_start =
  187. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
  188. STAGE_START)];
  189. u16 op_end =
  190. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
  191. STAGE_END)];
  192. const union init_op *op;
  193. u32 op_idx, op_type, addr, len;
  194. const u32 *data, *data_base;
  195. /* If empty block */
  196. if (op_start == op_end)
  197. return;
  198. data_base = INIT_DATA(bp);
  199. for (op_idx = op_start; op_idx < op_end; op_idx++) {
  200. op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
  201. /* Get generic data */
  202. op_type = op->raw.op;
  203. addr = op->raw.offset;
  204. /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
  205. * OP_WR64 (we assume that op_arr_write and op_write have the
  206. * same structure).
  207. */
  208. len = op->arr_wr.data_len;
  209. data = data_base + op->arr_wr.data_off;
  210. switch (op_type) {
  211. case OP_RD:
  212. REG_RD(bp, addr);
  213. break;
  214. case OP_WR:
  215. REG_WR(bp, addr, op->write.val);
  216. break;
  217. case OP_SW:
  218. bnx2x_init_str_wr(bp, addr, data, len);
  219. break;
  220. case OP_WB:
  221. bnx2x_init_wr_wb(bp, addr, data, len);
  222. break;
  223. case OP_ZR:
  224. bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
  225. break;
  226. case OP_WB_ZR:
  227. bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
  228. break;
  229. case OP_ZP:
  230. bnx2x_init_wr_zp(bp, addr, len,
  231. op->arr_wr.data_off);
  232. break;
  233. case OP_WR_64:
  234. bnx2x_init_wr_64(bp, addr, data, len);
  235. break;
  236. case OP_IF_MODE_AND:
  237. /* if any of the flags doesn't match, skip the
  238. * conditional block.
  239. */
  240. if ((INIT_MODE_FLAGS(bp) &
  241. op->if_mode.mode_bit_map) !=
  242. op->if_mode.mode_bit_map)
  243. op_idx += op->if_mode.cmd_offset;
  244. break;
  245. case OP_IF_MODE_OR:
  246. /* if all the flags don't match, skip the conditional
  247. * block.
  248. */
  249. if ((INIT_MODE_FLAGS(bp) &
  250. op->if_mode.mode_bit_map) == 0)
  251. op_idx += op->if_mode.cmd_offset;
  252. break;
  253. default:
  254. /* Should never get here! */
  255. break;
  256. }
  257. }
  258. }
  259. /****************************************************************************
  260. * PXP Arbiter
  261. ****************************************************************************/
  262. /*
  263. * This code configures the PCI read/write arbiter
  264. * which implements a weighted round robin
  265. * between the virtual queues in the chip.
  266. *
  267. * The values were derived for each PCI max payload and max request size.
  268. * since max payload and max request size are only known at run time,
  269. * this is done as a separate init stage.
  270. */
  271. #define NUM_WR_Q 13
  272. #define NUM_RD_Q 29
  273. #define MAX_RD_ORD 3
  274. #define MAX_WR_ORD 2
  275. /* configuration for one arbiter queue */
  276. struct arb_line {
  277. int l;
  278. int add;
  279. int ubound;
  280. };
  281. /* derived configuration for each read queue for each max request size */
  282. static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
  283. /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  284. { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
  285. { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
  286. { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
  287. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  288. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  289. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  290. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  291. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  292. /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  293. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  294. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  295. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  296. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  297. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  298. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  299. { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
  300. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  301. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  302. /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  303. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  304. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  305. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  306. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  307. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  308. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  309. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  310. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  311. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
  312. };
  313. /* derived configuration for each write queue for each max request size */
  314. static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
  315. /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
  316. { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
  317. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  318. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  319. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  320. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  321. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
  322. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  323. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  324. /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
  325. { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
  326. { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
  327. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
  328. };
  329. /* register addresses for read queues */
  330. static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
  331. /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
  332. PXP2_REG_RQ_BW_RD_UBOUND0},
  333. {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  334. PXP2_REG_PSWRQ_BW_UB1},
  335. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  336. PXP2_REG_PSWRQ_BW_UB2},
  337. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  338. PXP2_REG_PSWRQ_BW_UB3},
  339. {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
  340. PXP2_REG_RQ_BW_RD_UBOUND4},
  341. {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
  342. PXP2_REG_RQ_BW_RD_UBOUND5},
  343. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  344. PXP2_REG_PSWRQ_BW_UB6},
  345. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  346. PXP2_REG_PSWRQ_BW_UB7},
  347. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  348. PXP2_REG_PSWRQ_BW_UB8},
  349. /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  350. PXP2_REG_PSWRQ_BW_UB9},
  351. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  352. PXP2_REG_PSWRQ_BW_UB10},
  353. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  354. PXP2_REG_PSWRQ_BW_UB11},
  355. {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
  356. PXP2_REG_RQ_BW_RD_UBOUND12},
  357. {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
  358. PXP2_REG_RQ_BW_RD_UBOUND13},
  359. {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
  360. PXP2_REG_RQ_BW_RD_UBOUND14},
  361. {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
  362. PXP2_REG_RQ_BW_RD_UBOUND15},
  363. {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
  364. PXP2_REG_RQ_BW_RD_UBOUND16},
  365. {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
  366. PXP2_REG_RQ_BW_RD_UBOUND17},
  367. {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
  368. PXP2_REG_RQ_BW_RD_UBOUND18},
  369. /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
  370. PXP2_REG_RQ_BW_RD_UBOUND19},
  371. {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
  372. PXP2_REG_RQ_BW_RD_UBOUND20},
  373. {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
  374. PXP2_REG_RQ_BW_RD_UBOUND22},
  375. {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
  376. PXP2_REG_RQ_BW_RD_UBOUND23},
  377. {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
  378. PXP2_REG_RQ_BW_RD_UBOUND24},
  379. {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
  380. PXP2_REG_RQ_BW_RD_UBOUND25},
  381. {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
  382. PXP2_REG_RQ_BW_RD_UBOUND26},
  383. {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
  384. PXP2_REG_RQ_BW_RD_UBOUND27},
  385. {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  386. PXP2_REG_PSWRQ_BW_UB28}
  387. };
  388. /* register addresses for write queues */
  389. static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
  390. /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  391. PXP2_REG_PSWRQ_BW_UB1},
  392. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  393. PXP2_REG_PSWRQ_BW_UB2},
  394. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  395. PXP2_REG_PSWRQ_BW_UB3},
  396. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  397. PXP2_REG_PSWRQ_BW_UB6},
  398. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  399. PXP2_REG_PSWRQ_BW_UB7},
  400. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  401. PXP2_REG_PSWRQ_BW_UB8},
  402. {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  403. PXP2_REG_PSWRQ_BW_UB9},
  404. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  405. PXP2_REG_PSWRQ_BW_UB10},
  406. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  407. PXP2_REG_PSWRQ_BW_UB11},
  408. /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  409. PXP2_REG_PSWRQ_BW_UB28},
  410. {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
  411. PXP2_REG_RQ_BW_WR_UBOUND29},
  412. {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
  413. PXP2_REG_RQ_BW_WR_UBOUND30}
  414. };
  415. static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
  416. int w_order)
  417. {
  418. u32 val, i;
  419. if (r_order > MAX_RD_ORD) {
  420. DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
  421. r_order, MAX_RD_ORD);
  422. r_order = MAX_RD_ORD;
  423. }
  424. if (w_order > MAX_WR_ORD) {
  425. DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
  426. w_order, MAX_WR_ORD);
  427. w_order = MAX_WR_ORD;
  428. }
  429. if (CHIP_REV_IS_FPGA(bp)) {
  430. DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
  431. w_order = 0;
  432. }
  433. DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
  434. for (i = 0; i < NUM_RD_Q-1; i++) {
  435. REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
  436. REG_WR(bp, read_arb_addr[i].add,
  437. read_arb_data[i][r_order].add);
  438. REG_WR(bp, read_arb_addr[i].ubound,
  439. read_arb_data[i][r_order].ubound);
  440. }
  441. for (i = 0; i < NUM_WR_Q-1; i++) {
  442. if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
  443. (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
  444. REG_WR(bp, write_arb_addr[i].l,
  445. write_arb_data[i][w_order].l);
  446. REG_WR(bp, write_arb_addr[i].add,
  447. write_arb_data[i][w_order].add);
  448. REG_WR(bp, write_arb_addr[i].ubound,
  449. write_arb_data[i][w_order].ubound);
  450. } else {
  451. val = REG_RD(bp, write_arb_addr[i].l);
  452. REG_WR(bp, write_arb_addr[i].l,
  453. val | (write_arb_data[i][w_order].l << 10));
  454. val = REG_RD(bp, write_arb_addr[i].add);
  455. REG_WR(bp, write_arb_addr[i].add,
  456. val | (write_arb_data[i][w_order].add << 10));
  457. val = REG_RD(bp, write_arb_addr[i].ubound);
  458. REG_WR(bp, write_arb_addr[i].ubound,
  459. val | (write_arb_data[i][w_order].ubound << 7));
  460. }
  461. }
  462. val = write_arb_data[NUM_WR_Q-1][w_order].add;
  463. val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
  464. val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
  465. REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
  466. val = read_arb_data[NUM_RD_Q-1][r_order].add;
  467. val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
  468. val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
  469. REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
  470. REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
  471. REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
  472. REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
  473. REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
  474. if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
  475. REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
  476. if (CHIP_IS_E3(bp))
  477. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
  478. else if (CHIP_IS_E2(bp))
  479. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
  480. else
  481. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
  482. if (!CHIP_IS_E1(bp)) {
  483. /* MPS w_order optimal TH presently TH
  484. * 128 0 0 2
  485. * 256 1 1 3
  486. * >=512 2 2 3
  487. */
  488. /* DMAE is special */
  489. if (!CHIP_IS_E1H(bp)) {
  490. /* E2 can use optimal TH */
  491. val = w_order;
  492. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
  493. } else {
  494. val = ((w_order == 0) ? 2 : 3);
  495. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
  496. }
  497. REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
  498. REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
  499. REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
  500. REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
  501. REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
  502. REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
  503. REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
  504. REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
  505. REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
  506. REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
  507. }
  508. /* Validate number of tags suppoted by device */
  509. #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
  510. val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
  511. val &= 0xFF;
  512. if (val <= 0x20)
  513. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
  514. }
  515. /****************************************************************************
  516. * ILT management
  517. ****************************************************************************/
  518. /*
  519. * This codes hides the low level HW interaction for ILT management and
  520. * configuration. The API consists of a shadow ILT table which is set by the
  521. * driver and a set of routines to use it to configure the HW.
  522. *
  523. */
  524. /* ILT HW init operations */
  525. /* ILT memory management operations */
  526. #define ILT_MEMOP_ALLOC 0
  527. #define ILT_MEMOP_FREE 1
  528. /* the phys address is shifted right 12 bits and has an added
  529. * 1=valid bit added to the 53rd bit
  530. * then since this is a wide register(TM)
  531. * we split it into two 32 bit writes
  532. */
  533. #define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
  534. #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
  535. #define ILT_RANGE(f, l) (((l) << 10) | f)
  536. static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
  537. struct ilt_line *line, u32 size, u8 memop)
  538. {
  539. if (memop == ILT_MEMOP_FREE) {
  540. BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
  541. return 0;
  542. }
  543. BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
  544. if (!line->page)
  545. return -1;
  546. line->size = size;
  547. return 0;
  548. }
  549. static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
  550. u8 memop)
  551. {
  552. int i, rc;
  553. struct bnx2x_ilt *ilt = BP_ILT(bp);
  554. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  555. if (!ilt || !ilt->lines)
  556. return -1;
  557. if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
  558. return 0;
  559. for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
  560. rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
  561. ilt_cli->page_size, memop);
  562. }
  563. return rc;
  564. }
  565. static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
  566. {
  567. int rc = 0;
  568. if (CONFIGURE_NIC_MODE(bp))
  569. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
  570. if (!rc)
  571. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
  572. return rc;
  573. }
  574. static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
  575. {
  576. int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
  577. if (!rc)
  578. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
  579. if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
  580. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
  581. return rc;
  582. }
  583. static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
  584. dma_addr_t page_mapping)
  585. {
  586. u32 reg;
  587. if (CHIP_IS_E1(bp))
  588. reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
  589. else
  590. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
  591. bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
  592. }
  593. static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
  594. struct bnx2x_ilt *ilt, int idx, u8 initop)
  595. {
  596. dma_addr_t null_mapping;
  597. int abs_idx = ilt->start_line + idx;
  598. switch (initop) {
  599. case INITOP_INIT:
  600. /* set in the init-value array */
  601. case INITOP_SET:
  602. bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
  603. break;
  604. case INITOP_CLEAR:
  605. null_mapping = 0;
  606. bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
  607. break;
  608. }
  609. }
  610. static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
  611. struct ilt_client_info *ilt_cli,
  612. u32 ilt_start, u8 initop)
  613. {
  614. u32 start_reg = 0;
  615. u32 end_reg = 0;
  616. /* The boundary is either SET or INIT,
  617. CLEAR => SET and for now SET ~~ INIT */
  618. /* find the appropriate regs */
  619. if (CHIP_IS_E1(bp)) {
  620. switch (ilt_cli->client_num) {
  621. case ILT_CLIENT_CDU:
  622. start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
  623. break;
  624. case ILT_CLIENT_QM:
  625. start_reg = PXP2_REG_PSWRQ_QM0_L2P;
  626. break;
  627. case ILT_CLIENT_SRC:
  628. start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
  629. break;
  630. case ILT_CLIENT_TM:
  631. start_reg = PXP2_REG_PSWRQ_TM0_L2P;
  632. break;
  633. }
  634. REG_WR(bp, start_reg + BP_FUNC(bp)*4,
  635. ILT_RANGE((ilt_start + ilt_cli->start),
  636. (ilt_start + ilt_cli->end)));
  637. } else {
  638. switch (ilt_cli->client_num) {
  639. case ILT_CLIENT_CDU:
  640. start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
  641. end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
  642. break;
  643. case ILT_CLIENT_QM:
  644. start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
  645. end_reg = PXP2_REG_RQ_QM_LAST_ILT;
  646. break;
  647. case ILT_CLIENT_SRC:
  648. start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
  649. end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
  650. break;
  651. case ILT_CLIENT_TM:
  652. start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
  653. end_reg = PXP2_REG_RQ_TM_LAST_ILT;
  654. break;
  655. }
  656. REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
  657. REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
  658. }
  659. }
  660. static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
  661. struct bnx2x_ilt *ilt,
  662. struct ilt_client_info *ilt_cli,
  663. u8 initop)
  664. {
  665. int i;
  666. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  667. return;
  668. for (i = ilt_cli->start; i <= ilt_cli->end; i++)
  669. bnx2x_ilt_line_init_op(bp, ilt, i, initop);
  670. /* init/clear the ILT boundries */
  671. bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
  672. }
  673. static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
  674. struct ilt_client_info *ilt_cli, u8 initop)
  675. {
  676. struct bnx2x_ilt *ilt = BP_ILT(bp);
  677. bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
  678. }
  679. static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
  680. int cli_num, u8 initop)
  681. {
  682. struct bnx2x_ilt *ilt = BP_ILT(bp);
  683. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  684. bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
  685. }
  686. static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
  687. {
  688. if (CONFIGURE_NIC_MODE(bp))
  689. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
  690. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
  691. }
  692. static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
  693. {
  694. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
  695. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
  696. if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
  697. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
  698. }
  699. static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
  700. u32 psz_reg, u8 initop)
  701. {
  702. struct bnx2x_ilt *ilt = BP_ILT(bp);
  703. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  704. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  705. return;
  706. switch (initop) {
  707. case INITOP_INIT:
  708. /* set in the init-value array */
  709. case INITOP_SET:
  710. REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
  711. break;
  712. case INITOP_CLEAR:
  713. break;
  714. }
  715. }
  716. /*
  717. * called during init common stage, ilt clients should be initialized
  718. * prioir to calling this function
  719. */
  720. static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
  721. {
  722. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
  723. PXP2_REG_RQ_CDU_P_SIZE, initop);
  724. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
  725. PXP2_REG_RQ_QM_P_SIZE, initop);
  726. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
  727. PXP2_REG_RQ_SRC_P_SIZE, initop);
  728. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
  729. PXP2_REG_RQ_TM_P_SIZE, initop);
  730. }
  731. /****************************************************************************
  732. * QM initializations
  733. ****************************************************************************/
  734. #define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
  735. #define QM_INIT_MIN_CID_COUNT 31
  736. #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
  737. /* called during init port stage */
  738. static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
  739. u8 initop)
  740. {
  741. int port = BP_PORT(bp);
  742. if (QM_INIT(qm_cid_count)) {
  743. switch (initop) {
  744. case INITOP_INIT:
  745. /* set in the init-value array */
  746. case INITOP_SET:
  747. REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
  748. qm_cid_count/16 - 1);
  749. break;
  750. case INITOP_CLEAR:
  751. break;
  752. }
  753. }
  754. }
  755. static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
  756. u32 base_reg, u32 reg)
  757. {
  758. int i;
  759. u32 wb_data[2] = {0, 0};
  760. for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
  761. REG_WR(bp, base_reg + i*4,
  762. qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
  763. bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
  764. }
  765. }
  766. /* called during init common stage */
  767. static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
  768. u8 initop)
  769. {
  770. if (!QM_INIT(qm_cid_count))
  771. return;
  772. switch (initop) {
  773. case INITOP_INIT:
  774. /* set in the init-value array */
  775. case INITOP_SET:
  776. bnx2x_qm_set_ptr_table(bp, qm_cid_count,
  777. QM_REG_BASEADDR, QM_REG_PTRTBL);
  778. if (CHIP_IS_E1H(bp))
  779. bnx2x_qm_set_ptr_table(bp, qm_cid_count,
  780. QM_REG_BASEADDR_EXT_A,
  781. QM_REG_PTRTBL_EXT_A);
  782. break;
  783. case INITOP_CLEAR:
  784. break;
  785. }
  786. }
  787. /****************************************************************************
  788. * SRC initializations
  789. ****************************************************************************/
  790. /* called during init func stage */
  791. static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
  792. dma_addr_t t2_mapping, int src_cid_count)
  793. {
  794. int i;
  795. int port = BP_PORT(bp);
  796. /* Initialize T2 */
  797. for (i = 0; i < src_cid_count-1; i++)
  798. t2[i].next = (u64)(t2_mapping +
  799. (i+1)*sizeof(struct src_ent));
  800. /* tell the searcher where the T2 table is */
  801. REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
  802. bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
  803. U64_LO(t2_mapping), U64_HI(t2_mapping));
  804. bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
  805. U64_LO((u64)t2_mapping +
  806. (src_cid_count-1) * sizeof(struct src_ent)),
  807. U64_HI((u64)t2_mapping +
  808. (src_cid_count-1) * sizeof(struct src_ent)));
  809. }
  810. #endif /* BNX2X_INIT_OPS_H */