cnic_if.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /* cnic_if.h: Broadcom CNIC core network driver.
  2. *
  3. * Copyright (c) 2006-2010 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. */
  10. #ifndef CNIC_IF_H
  11. #define CNIC_IF_H
  12. #define CNIC_MODULE_VERSION "2.1.3"
  13. #define CNIC_MODULE_RELDATE "June 24, 2010"
  14. #define CNIC_ULP_RDMA 0
  15. #define CNIC_ULP_ISCSI 1
  16. #define CNIC_ULP_L4 2
  17. #define MAX_CNIC_ULP_TYPE_EXT 2
  18. #define MAX_CNIC_ULP_TYPE 3
  19. struct kwqe {
  20. u32 kwqe_op_flag;
  21. #define KWQE_OPCODE_MASK 0x00ff0000
  22. #define KWQE_OPCODE_SHIFT 16
  23. #define KWQE_FLAGS_LAYER_SHIFT 28
  24. #define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
  25. u32 kwqe_info0;
  26. u32 kwqe_info1;
  27. u32 kwqe_info2;
  28. u32 kwqe_info3;
  29. u32 kwqe_info4;
  30. u32 kwqe_info5;
  31. u32 kwqe_info6;
  32. };
  33. struct kwqe_16 {
  34. u32 kwqe_info0;
  35. u32 kwqe_info1;
  36. u32 kwqe_info2;
  37. u32 kwqe_info3;
  38. };
  39. struct kcqe {
  40. u32 kcqe_info0;
  41. u32 kcqe_info1;
  42. u32 kcqe_info2;
  43. u32 kcqe_info3;
  44. u32 kcqe_info4;
  45. u32 kcqe_info5;
  46. u32 kcqe_info6;
  47. u32 kcqe_op_flag;
  48. #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
  49. #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
  50. #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
  51. #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
  52. #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
  53. #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
  54. #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
  55. #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
  56. #define KCQE_FLAGS_NEXT (1<<31)
  57. #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
  58. #define KCQE_FLAGS_OPCODE_SHIFT (16)
  59. #define KCQE_OPCODE(op) \
  60. (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
  61. };
  62. #define MAX_CNIC_CTL_DATA 64
  63. #define MAX_DRV_CTL_DATA 64
  64. #define CNIC_CTL_STOP_CMD 1
  65. #define CNIC_CTL_START_CMD 2
  66. #define CNIC_CTL_COMPLETION_CMD 3
  67. #define DRV_CTL_IO_WR_CMD 0x101
  68. #define DRV_CTL_IO_RD_CMD 0x102
  69. #define DRV_CTL_CTX_WR_CMD 0x103
  70. #define DRV_CTL_CTXTBL_WR_CMD 0x104
  71. #define DRV_CTL_COMPLETION_CMD 0x105
  72. #define DRV_CTL_START_L2_CMD 0x106
  73. #define DRV_CTL_STOP_L2_CMD 0x107
  74. struct cnic_ctl_completion {
  75. u32 cid;
  76. };
  77. struct drv_ctl_completion {
  78. u32 comp_count;
  79. };
  80. struct cnic_ctl_info {
  81. int cmd;
  82. union {
  83. struct cnic_ctl_completion comp;
  84. char bytes[MAX_CNIC_CTL_DATA];
  85. } data;
  86. };
  87. struct drv_ctl_io {
  88. u32 cid_addr;
  89. u32 offset;
  90. u32 data;
  91. dma_addr_t dma_addr;
  92. };
  93. struct drv_ctl_l2_ring {
  94. u32 client_id;
  95. u32 cid;
  96. };
  97. struct drv_ctl_info {
  98. int cmd;
  99. union {
  100. struct drv_ctl_completion comp;
  101. struct drv_ctl_io io;
  102. struct drv_ctl_l2_ring ring;
  103. char bytes[MAX_DRV_CTL_DATA];
  104. } data;
  105. };
  106. struct cnic_ops {
  107. struct module *cnic_owner;
  108. /* Calls to these functions are protected by RCU. When
  109. * unregistering, we wait for any calls to complete before
  110. * continuing.
  111. */
  112. int (*cnic_handler)(void *, void *);
  113. int (*cnic_ctl)(void *, struct cnic_ctl_info *);
  114. };
  115. #define MAX_CNIC_VEC 8
  116. struct cnic_irq {
  117. unsigned int vector;
  118. void *status_blk;
  119. u32 status_blk_num;
  120. u32 status_blk_num2;
  121. u32 irq_flags;
  122. #define CNIC_IRQ_FL_MSIX 0x00000001
  123. };
  124. struct cnic_eth_dev {
  125. struct module *drv_owner;
  126. u32 drv_state;
  127. #define CNIC_DRV_STATE_REGD 0x00000001
  128. #define CNIC_DRV_STATE_USING_MSIX 0x00000002
  129. u32 chip_id;
  130. u32 max_kwqe_pending;
  131. struct pci_dev *pdev;
  132. void __iomem *io_base;
  133. void __iomem *io_base2;
  134. void *iro_arr;
  135. u32 ctx_tbl_offset;
  136. u32 ctx_tbl_len;
  137. int ctx_blk_size;
  138. u32 starting_cid;
  139. u32 max_iscsi_conn;
  140. u32 max_fcoe_conn;
  141. u32 max_rdma_conn;
  142. u32 reserved0[2];
  143. int num_irq;
  144. struct cnic_irq irq_arr[MAX_CNIC_VEC];
  145. int (*drv_register_cnic)(struct net_device *,
  146. struct cnic_ops *, void *);
  147. int (*drv_unregister_cnic)(struct net_device *);
  148. int (*drv_submit_kwqes_32)(struct net_device *,
  149. struct kwqe *[], u32);
  150. int (*drv_submit_kwqes_16)(struct net_device *,
  151. struct kwqe_16 *[], u32);
  152. int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
  153. unsigned long reserved1[2];
  154. };
  155. struct cnic_sockaddr {
  156. union {
  157. struct sockaddr_in v4;
  158. struct sockaddr_in6 v6;
  159. } local;
  160. union {
  161. struct sockaddr_in v4;
  162. struct sockaddr_in6 v6;
  163. } remote;
  164. };
  165. struct cnic_sock {
  166. struct cnic_dev *dev;
  167. void *context;
  168. u32 src_ip[4];
  169. u32 dst_ip[4];
  170. u16 src_port;
  171. u16 dst_port;
  172. u16 vlan_id;
  173. unsigned char old_ha[6];
  174. unsigned char ha[6];
  175. u32 mtu;
  176. u32 cid;
  177. u32 l5_cid;
  178. u32 pg_cid;
  179. int ulp_type;
  180. u32 ka_timeout;
  181. u32 ka_interval;
  182. u8 ka_max_probe_count;
  183. u8 tos;
  184. u8 ttl;
  185. u8 snd_seq_scale;
  186. u32 rcv_buf;
  187. u32 snd_buf;
  188. u32 seed;
  189. unsigned long tcp_flags;
  190. #define SK_TCP_NO_DELAY_ACK 0x1
  191. #define SK_TCP_KEEP_ALIVE 0x2
  192. #define SK_TCP_NAGLE 0x4
  193. #define SK_TCP_TIMESTAMP 0x8
  194. #define SK_TCP_SACK 0x10
  195. #define SK_TCP_SEG_SCALING 0x20
  196. unsigned long flags;
  197. #define SK_F_INUSE 0
  198. #define SK_F_OFFLD_COMPLETE 1
  199. #define SK_F_OFFLD_SCHED 2
  200. #define SK_F_PG_OFFLD_COMPLETE 3
  201. #define SK_F_CONNECT_START 4
  202. #define SK_F_IPV6 5
  203. #define SK_F_CLOSING 7
  204. atomic_t ref_count;
  205. u32 state;
  206. struct kwqe kwqe1;
  207. struct kwqe kwqe2;
  208. struct kwqe kwqe3;
  209. };
  210. struct cnic_dev {
  211. struct net_device *netdev;
  212. struct pci_dev *pcidev;
  213. void __iomem *regview;
  214. struct list_head list;
  215. int (*register_device)(struct cnic_dev *dev, int ulp_type,
  216. void *ulp_ctx);
  217. int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
  218. int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
  219. u32 num_wqes);
  220. int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
  221. u32 num_wqes);
  222. int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
  223. void *);
  224. int (*cm_destroy)(struct cnic_sock *);
  225. int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
  226. int (*cm_abort)(struct cnic_sock *);
  227. int (*cm_close)(struct cnic_sock *);
  228. struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
  229. int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
  230. char *data, u16 data_size);
  231. unsigned long flags;
  232. #define CNIC_F_CNIC_UP 1
  233. #define CNIC_F_BNX2_CLASS 3
  234. #define CNIC_F_BNX2X_CLASS 4
  235. atomic_t ref_count;
  236. u8 mac_addr[6];
  237. int max_iscsi_conn;
  238. int max_fcoe_conn;
  239. int max_rdma_conn;
  240. void *cnic_priv;
  241. };
  242. #define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
  243. #define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
  244. #define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
  245. #define CNIC_RD(dev, off) readl(dev->regview + off)
  246. #define CNIC_RD16(dev, off) readw(dev->regview + off)
  247. struct cnic_ulp_ops {
  248. /* Calls to these functions are protected by RCU. When
  249. * unregistering, we wait for any calls to complete before
  250. * continuing.
  251. */
  252. void (*cnic_init)(struct cnic_dev *dev);
  253. void (*cnic_exit)(struct cnic_dev *dev);
  254. void (*cnic_start)(void *ulp_ctx);
  255. void (*cnic_stop)(void *ulp_ctx);
  256. void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
  257. u32 num_cqes);
  258. void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
  259. void (*cm_connect_complete)(struct cnic_sock *);
  260. void (*cm_close_complete)(struct cnic_sock *);
  261. void (*cm_abort_complete)(struct cnic_sock *);
  262. void (*cm_remote_close)(struct cnic_sock *);
  263. void (*cm_remote_abort)(struct cnic_sock *);
  264. void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
  265. char *data, u16 data_size);
  266. struct module *owner;
  267. atomic_t ref_count;
  268. };
  269. extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
  270. extern int cnic_unregister_driver(int ulp_type);
  271. extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
  272. extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
  273. #endif