qeth_tso.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
  3. *
  4. * Header file for qeth TCP Segmentation Offload support.
  5. *
  6. * Copyright 2004 IBM Corporation
  7. *
  8. * Author(s): Frank Pavlic <pavlic@de.ibm.com>
  9. *
  10. * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
  11. *
  12. */
  13. #include <linux/skbuff.h>
  14. #include <linux/tcp.h>
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <net/ip6_checksum.h>
  18. #include "qeth.h"
  19. #include "qeth_mpc.h"
  20. #include "qeth_tso.h"
  21. /**
  22. * skb already partially prepared
  23. * classic qdio header in skb->data
  24. * */
  25. static inline struct qeth_hdr_tso *
  26. qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
  27. {
  28. int rc = 0;
  29. QETH_DBF_TEXT(trace, 5, "tsoprsk");
  30. rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
  31. if (rc)
  32. return NULL;
  33. return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
  34. }
  35. /**
  36. * fill header for a TSO packet
  37. */
  38. static inline void
  39. qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
  40. {
  41. struct qeth_hdr_tso *hdr;
  42. struct tcphdr *tcph;
  43. struct iphdr *iph;
  44. QETH_DBF_TEXT(trace, 5, "tsofhdr");
  45. hdr = (struct qeth_hdr_tso *) skb->data;
  46. iph = skb->nh.iph;
  47. tcph = skb->h.th;
  48. /*fix header to TSO values ...*/
  49. hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
  50. /*set values which are fix for the first approach ...*/
  51. hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
  52. hdr->ext.imb_hdr_no = 1;
  53. hdr->ext.hdr_type = 1;
  54. hdr->ext.hdr_version = 1;
  55. hdr->ext.hdr_len = 28;
  56. /*insert non-fix values */
  57. hdr->ext.mss = skb_shinfo(skb)->tso_size;
  58. hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  59. hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  60. sizeof(struct qeth_hdr_tso));
  61. }
  62. /**
  63. * change some header values as requested by hardware
  64. */
  65. static inline void
  66. qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
  67. {
  68. struct iphdr *iph;
  69. struct ipv6hdr *ip6h;
  70. struct tcphdr *tcph;
  71. iph = skb->nh.iph;
  72. ip6h = skb->nh.ipv6h;
  73. tcph = skb->h.th;
  74. tcph->check = 0;
  75. if (skb->protocol == ETH_P_IPV6) {
  76. ip6h->payload_len = 0;
  77. tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  78. 0, IPPROTO_TCP, 0);
  79. return;
  80. }
  81. /*OSA want us to set these values ...*/
  82. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  83. 0, IPPROTO_TCP, 0);
  84. iph->tot_len = 0;
  85. iph->check = 0;
  86. }
  87. static inline struct qeth_hdr_tso *
  88. qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
  89. int ipv, int cast_type)
  90. {
  91. struct qeth_hdr_tso *hdr;
  92. int rc = 0;
  93. QETH_DBF_TEXT(trace, 5, "tsoprep");
  94. /*get headroom for tso qdio header */
  95. hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
  96. if (hdr == NULL) {
  97. QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
  98. return NULL;
  99. }
  100. memset(hdr, 0, sizeof(struct qeth_hdr_tso));
  101. /*fill first 32 bytes of qdio header as used
  102. *FIXME: TSO has two struct members
  103. * with different names but same size
  104. * */
  105. qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
  106. qeth_tso_fill_header(card, skb);
  107. qeth_tso_set_tcpip_header(card, skb);
  108. return hdr;
  109. }
  110. static inline int
  111. qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
  112. {
  113. struct qeth_qdio_out_buffer *buffer;
  114. int flush_cnt = 0;
  115. QETH_DBF_TEXT(trace, 5, "tsobuf");
  116. /* force to non-packing*/
  117. if (queue->do_pack)
  118. queue->do_pack = 0;
  119. buffer = &queue->bufs[queue->next_buf_to_fill];
  120. /* get a new buffer if current is already in use*/
  121. if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
  122. (buffer->next_element_to_fill > 0)) {
  123. atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
  124. queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
  125. QDIO_MAX_BUFFERS_PER_Q;
  126. flush_cnt++;
  127. }
  128. return flush_cnt;
  129. }
  130. static inline void
  131. __qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
  132. struct sk_buff *skb)
  133. {
  134. struct skb_frag_struct *frag;
  135. struct qdio_buffer *buffer;
  136. int fragno, cnt, element;
  137. unsigned long addr;
  138. QETH_DBF_TEXT(trace, 6, "tsfilfrg");
  139. /*initialize variables ...*/
  140. fragno = skb_shinfo(skb)->nr_frags;
  141. buffer = buf->buffer;
  142. element = buf->next_element_to_fill;
  143. /*fill buffer elements .....*/
  144. for (cnt = 0; cnt < fragno; cnt++) {
  145. frag = &skb_shinfo(skb)->frags[cnt];
  146. addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
  147. frag->page_offset;
  148. buffer->element[element].addr = (char *)addr;
  149. buffer->element[element].length = frag->size;
  150. if (cnt < (fragno - 1))
  151. buffer->element[element].flags =
  152. SBAL_FLAGS_MIDDLE_FRAG;
  153. else
  154. buffer->element[element].flags =
  155. SBAL_FLAGS_LAST_FRAG;
  156. element++;
  157. }
  158. buf->next_element_to_fill = element;
  159. }
  160. static inline int
  161. qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
  162. struct sk_buff *skb)
  163. {
  164. int length, length_here, element;
  165. int hdr_len;
  166. struct qdio_buffer *buffer;
  167. struct qeth_hdr_tso *hdr;
  168. char *data;
  169. QETH_DBF_TEXT(trace, 3, "tsfilbuf");
  170. /*increment user count and queue skb ...*/
  171. atomic_inc(&skb->users);
  172. skb_queue_tail(&buf->skb_list, skb);
  173. /*initialize all variables...*/
  174. buffer = buf->buffer;
  175. hdr = (struct qeth_hdr_tso *)skb->data;
  176. hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
  177. data = skb->data + hdr_len;
  178. length = skb->len - hdr_len;
  179. element = buf->next_element_to_fill;
  180. /*fill first buffer entry only with header information */
  181. buffer->element[element].addr = skb->data;
  182. buffer->element[element].length = hdr_len;
  183. buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
  184. buf->next_element_to_fill++;
  185. if (skb_shinfo(skb)->nr_frags > 0) {
  186. __qeth_tso_fill_buffer_frag(buf, skb);
  187. goto out;
  188. }
  189. /*start filling buffer entries ...*/
  190. element++;
  191. while (length > 0) {
  192. /* length_here is the remaining amount of data in this page */
  193. length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
  194. if (length < length_here)
  195. length_here = length;
  196. buffer->element[element].addr = data;
  197. buffer->element[element].length = length_here;
  198. length -= length_here;
  199. if (!length)
  200. buffer->element[element].flags =
  201. SBAL_FLAGS_LAST_FRAG;
  202. else
  203. buffer->element[element].flags =
  204. SBAL_FLAGS_MIDDLE_FRAG;
  205. data += length_here;
  206. element++;
  207. }
  208. /*set the buffer to primed ...*/
  209. buf->next_element_to_fill = element;
  210. out:
  211. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  212. return 1;
  213. }
  214. int
  215. qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
  216. struct qeth_qdio_out_q *queue, int ipv, int cast_type)
  217. {
  218. int flush_cnt = 0;
  219. struct qeth_hdr_tso *hdr;
  220. struct qeth_qdio_out_buffer *buffer;
  221. int start_index;
  222. QETH_DBF_TEXT(trace, 3, "tsosend");
  223. if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
  224. return -ENOMEM;
  225. /*check if skb fits in one SBAL ...*/
  226. if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
  227. return -EINVAL;
  228. /*lock queue, force switching to non-packing and send it ...*/
  229. while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
  230. QETH_OUT_Q_LOCKED,
  231. &queue->state));
  232. start_index = queue->next_buf_to_fill;
  233. buffer = &queue->bufs[queue->next_buf_to_fill];
  234. /*check if card is too busy ...*/
  235. if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
  236. card->stats.tx_dropped++;
  237. goto out;
  238. }
  239. /*let's force to non-packing and get a new SBAL*/
  240. flush_cnt += qeth_tso_get_queue_buffer(queue);
  241. buffer = &queue->bufs[queue->next_buf_to_fill];
  242. if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
  243. card->stats.tx_dropped++;
  244. goto out;
  245. }
  246. flush_cnt += qeth_tso_fill_buffer(buffer, skb);
  247. queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
  248. QDIO_MAX_BUFFERS_PER_Q;
  249. out:
  250. atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
  251. if (flush_cnt)
  252. qeth_flush_buffers(queue, 0, start_index, flush_cnt);
  253. /*do some statistics */
  254. card->stats.tx_packets++;
  255. card->stats.tx_bytes += skb->len;
  256. return 0;
  257. }