mv643xx_eth.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. #ifndef __MV643XX_ETH_H__
  2. #define __MV643XX_ETH_H__
  3. #include <linux/module.h>
  4. #include <linux/kernel.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/workqueue.h>
  7. #include <linux/mii.h>
  8. #include <linux/mv643xx.h>
  9. #include <asm/dma-mapping.h>
  10. /* Checksum offload for Tx works for most packets, but
  11. * fails if previous packet sent did not use hw csum
  12. */
  13. #define MV643XX_CHECKSUM_OFFLOAD_TX
  14. #define MV643XX_NAPI
  15. #define MV643XX_TX_FAST_REFILL
  16. #undef MV643XX_COAL
  17. /*
  18. * Number of RX / TX descriptors on RX / TX rings.
  19. * Note that allocating RX descriptors is done by allocating the RX
  20. * ring AND a preallocated RX buffers (skb's) for each descriptor.
  21. * The TX descriptors only allocates the TX descriptors ring,
  22. * with no pre allocated TX buffers (skb's are allocated by higher layers.
  23. */
  24. /* Default TX ring size is 1000 descriptors */
  25. #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
  26. /* Default RX ring size is 400 descriptors */
  27. #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
  28. #define MV643XX_TX_COAL 100
  29. #ifdef MV643XX_COAL
  30. #define MV643XX_RX_COAL 100
  31. #endif
  32. #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
  33. #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
  34. #else
  35. #define MAX_DESCS_PER_SKB 1
  36. #endif
  37. #define ETH_VLAN_HLEN 4
  38. #define ETH_FCS_LEN 4
  39. #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
  40. #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
  41. ETH_VLAN_HLEN + ETH_FCS_LEN)
  42. #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
  43. #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
  44. #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
  45. #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
  46. #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
  47. #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
  48. #define ETH_INT_CAUSE_EXT 0x00000002
  49. #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
  50. #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
  51. #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
  52. #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
  53. #define ETH_INT_CAUSE_PHY 0x00010000
  54. #define ETH_INT_CAUSE_STATE 0x00100000
  55. #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
  56. ETH_INT_CAUSE_STATE)
  57. #define ETH_INT_MASK_ALL 0x00000000
  58. #define ETH_INT_MASK_ALL_EXT 0x00000000
  59. #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
  60. #define PHY_WAIT_MICRO_SECONDS 10
  61. /* Buffer offset from buffer pointer */
  62. #define RX_BUF_OFFSET 0x2
  63. /* Gigabit Ethernet Unit Global Registers */
  64. /* MIB Counters register definitions */
  65. #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
  66. #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
  67. #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
  68. #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
  69. #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
  70. #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
  71. #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
  72. #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
  73. #define ETH_MIB_FRAMES_64_OCTETS 0x20
  74. #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
  75. #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
  76. #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
  77. #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
  78. #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
  79. #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
  80. #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
  81. #define ETH_MIB_GOOD_FRAMES_SENT 0x40
  82. #define ETH_MIB_EXCESSIVE_COLLISION 0x44
  83. #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
  84. #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
  85. #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
  86. #define ETH_MIB_FC_SENT 0x54
  87. #define ETH_MIB_GOOD_FC_RECEIVED 0x58
  88. #define ETH_MIB_BAD_FC_RECEIVED 0x5c
  89. #define ETH_MIB_UNDERSIZE_RECEIVED 0x60
  90. #define ETH_MIB_FRAGMENTS_RECEIVED 0x64
  91. #define ETH_MIB_OVERSIZE_RECEIVED 0x68
  92. #define ETH_MIB_JABBER_RECEIVED 0x6c
  93. #define ETH_MIB_MAC_RECEIVE_ERROR 0x70
  94. #define ETH_MIB_BAD_CRC_EVENT 0x74
  95. #define ETH_MIB_COLLISION 0x78
  96. #define ETH_MIB_LATE_COLLISION 0x7c
  97. /* Port serial status reg (PSR) */
  98. #define ETH_INTERFACE_PCM 0x00000001
  99. #define ETH_LINK_IS_UP 0x00000002
  100. #define ETH_PORT_AT_FULL_DUPLEX 0x00000004
  101. #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
  102. #define ETH_GMII_SPEED_1000 0x00000010
  103. #define ETH_MII_SPEED_100 0x00000020
  104. #define ETH_TX_IN_PROGRESS 0x00000080
  105. #define ETH_BYPASS_ACTIVE 0x00000100
  106. #define ETH_PORT_AT_PARTITION_STATE 0x00000200
  107. #define ETH_PORT_TX_FIFO_EMPTY 0x00000400
  108. /* SMI reg */
  109. #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
  110. #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
  111. #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
  112. #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
  113. /* Interrupt Cause Register Bit Definitions */
  114. /* SDMA command status fields macros */
  115. /* Tx & Rx descriptors status */
  116. #define ETH_ERROR_SUMMARY 0x00000001
  117. /* Tx & Rx descriptors command */
  118. #define ETH_BUFFER_OWNED_BY_DMA 0x80000000
  119. /* Tx descriptors status */
  120. #define ETH_LC_ERROR 0
  121. #define ETH_UR_ERROR 0x00000002
  122. #define ETH_RL_ERROR 0x00000004
  123. #define ETH_LLC_SNAP_FORMAT 0x00000200
  124. /* Rx descriptors status */
  125. #define ETH_OVERRUN_ERROR 0x00000002
  126. #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
  127. #define ETH_RESOURCE_ERROR 0x00000006
  128. #define ETH_VLAN_TAGGED 0x00080000
  129. #define ETH_BPDU_FRAME 0x00100000
  130. #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
  131. #define ETH_OTHER_FRAME_TYPE 0x00400000
  132. #define ETH_LAYER_2_IS_ETH_V_2 0x00800000
  133. #define ETH_FRAME_TYPE_IP_V_4 0x01000000
  134. #define ETH_FRAME_HEADER_OK 0x02000000
  135. #define ETH_RX_LAST_DESC 0x04000000
  136. #define ETH_RX_FIRST_DESC 0x08000000
  137. #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
  138. #define ETH_RX_ENABLE_INTERRUPT 0x20000000
  139. #define ETH_LAYER_4_CHECKSUM_OK 0x40000000
  140. /* Rx descriptors byte count */
  141. #define ETH_FRAME_FRAGMENTED 0x00000004
  142. /* Tx descriptors command */
  143. #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
  144. #define ETH_FRAME_SET_TO_VLAN 0x00008000
  145. #define ETH_UDP_FRAME 0x00010000
  146. #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
  147. #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
  148. #define ETH_ZERO_PADDING 0x00080000
  149. #define ETH_TX_LAST_DESC 0x00100000
  150. #define ETH_TX_FIRST_DESC 0x00200000
  151. #define ETH_GEN_CRC 0x00400000
  152. #define ETH_TX_ENABLE_INTERRUPT 0x00800000
  153. #define ETH_AUTO_MODE 0x40000000
  154. #define ETH_TX_IHL_SHIFT 11
  155. /* typedefs */
  156. typedef enum _eth_func_ret_status {
  157. ETH_OK, /* Returned as expected. */
  158. ETH_ERROR, /* Fundamental error. */
  159. ETH_RETRY, /* Could not process request. Try later.*/
  160. ETH_END_OF_JOB, /* Ring has nothing to process. */
  161. ETH_QUEUE_FULL, /* Ring resource error. */
  162. ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
  163. } ETH_FUNC_RET_STATUS;
  164. typedef enum _eth_target {
  165. ETH_TARGET_DRAM,
  166. ETH_TARGET_DEVICE,
  167. ETH_TARGET_CBS,
  168. ETH_TARGET_PCI0,
  169. ETH_TARGET_PCI1
  170. } ETH_TARGET;
  171. /* These are for big-endian machines. Little endian needs different
  172. * definitions.
  173. */
  174. #if defined(__BIG_ENDIAN)
  175. struct eth_rx_desc {
  176. u16 byte_cnt; /* Descriptor buffer byte count */
  177. u16 buf_size; /* Buffer size */
  178. u32 cmd_sts; /* Descriptor command status */
  179. u32 next_desc_ptr; /* Next descriptor pointer */
  180. u32 buf_ptr; /* Descriptor buffer pointer */
  181. };
  182. struct eth_tx_desc {
  183. u16 byte_cnt; /* buffer byte count */
  184. u16 l4i_chk; /* CPU provided TCP checksum */
  185. u32 cmd_sts; /* Command/status field */
  186. u32 next_desc_ptr; /* Pointer to next descriptor */
  187. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  188. };
  189. #elif defined(__LITTLE_ENDIAN)
  190. struct eth_rx_desc {
  191. u32 cmd_sts; /* Descriptor command status */
  192. u16 buf_size; /* Buffer size */
  193. u16 byte_cnt; /* Descriptor buffer byte count */
  194. u32 buf_ptr; /* Descriptor buffer pointer */
  195. u32 next_desc_ptr; /* Next descriptor pointer */
  196. };
  197. struct eth_tx_desc {
  198. u32 cmd_sts; /* Command/status field */
  199. u16 l4i_chk; /* CPU provided TCP checksum */
  200. u16 byte_cnt; /* buffer byte count */
  201. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  202. u32 next_desc_ptr; /* Pointer to next descriptor */
  203. };
  204. #else
  205. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  206. #endif
  207. /* Unified struct for Rx and Tx operations. The user is not required to */
  208. /* be familier with neither Tx nor Rx descriptors. */
  209. struct pkt_info {
  210. unsigned short byte_cnt; /* Descriptor buffer byte count */
  211. unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
  212. unsigned int cmd_sts; /* Descriptor command status */
  213. dma_addr_t buf_ptr; /* Descriptor buffer pointer */
  214. struct sk_buff *return_info; /* User resource return information */
  215. };
  216. /* Ethernet port specific information */
  217. struct mv643xx_mib_counters {
  218. u64 good_octets_received;
  219. u32 bad_octets_received;
  220. u32 internal_mac_transmit_err;
  221. u32 good_frames_received;
  222. u32 bad_frames_received;
  223. u32 broadcast_frames_received;
  224. u32 multicast_frames_received;
  225. u32 frames_64_octets;
  226. u32 frames_65_to_127_octets;
  227. u32 frames_128_to_255_octets;
  228. u32 frames_256_to_511_octets;
  229. u32 frames_512_to_1023_octets;
  230. u32 frames_1024_to_max_octets;
  231. u64 good_octets_sent;
  232. u32 good_frames_sent;
  233. u32 excessive_collision;
  234. u32 multicast_frames_sent;
  235. u32 broadcast_frames_sent;
  236. u32 unrec_mac_control_received;
  237. u32 fc_sent;
  238. u32 good_fc_received;
  239. u32 bad_fc_received;
  240. u32 undersize_received;
  241. u32 fragments_received;
  242. u32 oversize_received;
  243. u32 jabber_received;
  244. u32 mac_receive_error;
  245. u32 bad_crc_event;
  246. u32 collision;
  247. u32 late_collision;
  248. };
  249. struct mv643xx_private {
  250. int port_num; /* User Ethernet port number */
  251. u32 rx_sram_addr; /* Base address of rx sram area */
  252. u32 rx_sram_size; /* Size of rx sram area */
  253. u32 tx_sram_addr; /* Base address of tx sram area */
  254. u32 tx_sram_size; /* Size of tx sram area */
  255. int rx_resource_err; /* Rx ring resource error flag */
  256. /* Tx/Rx rings managment indexes fields. For driver use */
  257. /* Next available and first returning Rx resource */
  258. int rx_curr_desc_q, rx_used_desc_q;
  259. /* Next available and first returning Tx resource */
  260. int tx_curr_desc_q, tx_used_desc_q;
  261. #ifdef MV643XX_TX_FAST_REFILL
  262. u32 tx_clean_threshold;
  263. #endif
  264. struct eth_rx_desc *p_rx_desc_area;
  265. dma_addr_t rx_desc_dma;
  266. int rx_desc_area_size;
  267. struct sk_buff **rx_skb;
  268. struct eth_tx_desc *p_tx_desc_area;
  269. dma_addr_t tx_desc_dma;
  270. int tx_desc_area_size;
  271. struct sk_buff **tx_skb;
  272. struct work_struct tx_timeout_task;
  273. struct net_device *dev;
  274. struct napi_struct napi;
  275. struct net_device_stats stats;
  276. struct mv643xx_mib_counters mib_counters;
  277. spinlock_t lock;
  278. /* Size of Tx Ring per queue */
  279. int tx_ring_size;
  280. /* Number of tx descriptors in use */
  281. int tx_desc_count;
  282. /* Size of Rx Ring per queue */
  283. int rx_ring_size;
  284. /* Number of rx descriptors in use */
  285. int rx_desc_count;
  286. /*
  287. * Used in case RX Ring is empty, which can be caused when
  288. * system does not have resources (skb's)
  289. */
  290. struct timer_list timeout;
  291. u32 rx_int_coal;
  292. u32 tx_int_coal;
  293. struct mii_if_info mii;
  294. };
  295. /* Port operation control routines */
  296. static void eth_port_init(struct mv643xx_private *mp);
  297. static void eth_port_reset(unsigned int eth_port_num);
  298. static void eth_port_start(struct net_device *dev);
  299. /* PHY and MIB routines */
  300. static void ethernet_phy_reset(unsigned int eth_port_num);
  301. static void eth_port_write_smi_reg(unsigned int eth_port_num,
  302. unsigned int phy_reg, unsigned int value);
  303. static void eth_port_read_smi_reg(unsigned int eth_port_num,
  304. unsigned int phy_reg, unsigned int *value);
  305. static void eth_clear_mib_counters(unsigned int eth_port_num);
  306. /* Port data flow control routines */
  307. static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
  308. struct pkt_info *p_pkt_info);
  309. static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
  310. struct pkt_info *p_pkt_info);
  311. #endif /* __MV643XX_ETH_H__ */