qeth_core_offl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. * drivers/s390/net/qeth_core_offl.c
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
  6. * Frank Blaschka <frank.blaschka@de.ibm.com>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/ip.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/kernel.h>
  13. #include <linux/tcp.h>
  14. #include <net/tcp.h>
  15. #include <linux/skbuff.h>
  16. #include <net/ip.h>
  17. #include <net/ip6_checksum.h>
  18. #include "qeth_core.h"
  19. #include "qeth_core_mpc.h"
  20. #include "qeth_core_offl.h"
  21. int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
  22. struct qeth_eddp_context *ctx)
  23. {
  24. int index = queue->next_buf_to_fill;
  25. int elements_needed = ctx->num_elements;
  26. int elements_in_buffer;
  27. int skbs_in_buffer;
  28. int buffers_needed = 0;
  29. QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
  30. while (elements_needed > 0) {
  31. buffers_needed++;
  32. if (atomic_read(&queue->bufs[index].state) !=
  33. QETH_QDIO_BUF_EMPTY)
  34. return -EBUSY;
  35. elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  36. queue->bufs[index].next_element_to_fill;
  37. skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
  38. elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
  39. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  40. }
  41. return buffers_needed;
  42. }
  43. static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
  44. {
  45. int i;
  46. QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
  47. for (i = 0; i < ctx->num_pages; ++i)
  48. free_page((unsigned long)ctx->pages[i]);
  49. kfree(ctx->pages);
  50. kfree(ctx->elements);
  51. kfree(ctx);
  52. }
  53. static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
  54. {
  55. atomic_inc(&ctx->refcnt);
  56. }
  57. void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
  58. {
  59. if (atomic_dec_return(&ctx->refcnt) == 0)
  60. qeth_eddp_free_context(ctx);
  61. }
  62. EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
  63. void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
  64. {
  65. struct qeth_eddp_context_reference *ref;
  66. QETH_DBF_TEXT(TRACE, 6, "eddprctx");
  67. while (!list_empty(&buf->ctx_list)) {
  68. ref = list_entry(buf->ctx_list.next,
  69. struct qeth_eddp_context_reference, list);
  70. qeth_eddp_put_context(ref->ctx);
  71. list_del(&ref->list);
  72. kfree(ref);
  73. }
  74. }
  75. static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
  76. struct qeth_eddp_context *ctx)
  77. {
  78. struct qeth_eddp_context_reference *ref;
  79. QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
  80. ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
  81. if (ref == NULL)
  82. return -ENOMEM;
  83. qeth_eddp_get_context(ctx);
  84. ref->ctx = ctx;
  85. list_add_tail(&ref->list, &buf->ctx_list);
  86. return 0;
  87. }
  88. int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
  89. struct qeth_eddp_context *ctx, int index)
  90. {
  91. struct qeth_qdio_out_buffer *buf = NULL;
  92. struct qdio_buffer *buffer;
  93. int elements = ctx->num_elements;
  94. int element = 0;
  95. int flush_cnt = 0;
  96. int must_refcnt = 1;
  97. int i;
  98. QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
  99. while (elements > 0) {
  100. buf = &queue->bufs[index];
  101. if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
  102. /* normally this should not happen since we checked for
  103. * available elements in qeth_check_elements_for_context
  104. */
  105. if (element == 0)
  106. return -EBUSY;
  107. else {
  108. QETH_DBF_MESSAGE(2, "could only partially fill"
  109. "eddp buffer!\n");
  110. goto out;
  111. }
  112. }
  113. /* check if the whole next skb fits into current buffer */
  114. if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  115. buf->next_element_to_fill)
  116. < ctx->elements_per_skb){
  117. /* no -> go to next buffer */
  118. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  119. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  120. flush_cnt++;
  121. /* new buffer, so we have to add ctx to buffer'ctx_list
  122. * and increment ctx's refcnt */
  123. must_refcnt = 1;
  124. continue;
  125. }
  126. if (must_refcnt) {
  127. must_refcnt = 0;
  128. if (qeth_eddp_buf_ref_context(buf, ctx)) {
  129. goto out_check;
  130. }
  131. }
  132. buffer = buf->buffer;
  133. /* fill one skb into buffer */
  134. for (i = 0; i < ctx->elements_per_skb; ++i) {
  135. if (ctx->elements[element].length != 0) {
  136. buffer->element[buf->next_element_to_fill].
  137. addr = ctx->elements[element].addr;
  138. buffer->element[buf->next_element_to_fill].
  139. length = ctx->elements[element].length;
  140. buffer->element[buf->next_element_to_fill].
  141. flags = ctx->elements[element].flags;
  142. buf->next_element_to_fill++;
  143. }
  144. element++;
  145. elements--;
  146. }
  147. }
  148. out_check:
  149. if (!queue->do_pack) {
  150. QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
  151. /* set state to PRIMED -> will be flushed */
  152. if (buf->next_element_to_fill > 0) {
  153. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  154. flush_cnt++;
  155. }
  156. } else {
  157. if (queue->card->options.performance_stats)
  158. queue->card->perf_stats.skbs_sent_pack++;
  159. QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
  160. if (buf->next_element_to_fill >=
  161. QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
  162. /*
  163. * packed buffer if full -> set state PRIMED
  164. * -> will be flushed
  165. */
  166. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  167. flush_cnt++;
  168. }
  169. }
  170. out:
  171. return flush_cnt;
  172. }
  173. static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
  174. struct qeth_eddp_data *eddp, int data_len)
  175. {
  176. u8 *page;
  177. int page_remainder;
  178. int page_offset;
  179. int pkt_len;
  180. struct qeth_eddp_element *element;
  181. QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
  182. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  183. page_offset = ctx->offset % PAGE_SIZE;
  184. element = &ctx->elements[ctx->num_elements];
  185. pkt_len = eddp->nhl + eddp->thl + data_len;
  186. /* FIXME: layer2 and VLAN !!! */
  187. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
  188. pkt_len += ETH_HLEN;
  189. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  190. pkt_len += VLAN_HLEN;
  191. /* does complete packet fit in current page ? */
  192. page_remainder = PAGE_SIZE - page_offset;
  193. if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
  194. /* no -> go to start of next page */
  195. ctx->offset += page_remainder;
  196. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  197. page_offset = 0;
  198. }
  199. memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
  200. element->addr = page + page_offset;
  201. element->length = sizeof(struct qeth_hdr);
  202. ctx->offset += sizeof(struct qeth_hdr);
  203. page_offset += sizeof(struct qeth_hdr);
  204. /* add mac header (?) */
  205. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  206. memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
  207. element->length += ETH_HLEN;
  208. ctx->offset += ETH_HLEN;
  209. page_offset += ETH_HLEN;
  210. }
  211. /* add VLAN tag */
  212. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  213. memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
  214. element->length += VLAN_HLEN;
  215. ctx->offset += VLAN_HLEN;
  216. page_offset += VLAN_HLEN;
  217. }
  218. /* add network header */
  219. memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
  220. element->length += eddp->nhl;
  221. eddp->nh_in_ctx = page + page_offset;
  222. ctx->offset += eddp->nhl;
  223. page_offset += eddp->nhl;
  224. /* add transport header */
  225. memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
  226. element->length += eddp->thl;
  227. eddp->th_in_ctx = page + page_offset;
  228. ctx->offset += eddp->thl;
  229. }
  230. static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
  231. int len, __wsum *hcsum)
  232. {
  233. struct skb_frag_struct *frag;
  234. int left_in_frag;
  235. int copy_len;
  236. u8 *src;
  237. QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
  238. if (skb_shinfo(eddp->skb)->nr_frags == 0) {
  239. skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
  240. dst, len);
  241. *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
  242. *hcsum);
  243. eddp->skb_offset += len;
  244. } else {
  245. while (len > 0) {
  246. if (eddp->frag < 0) {
  247. /* we're in skb->data */
  248. left_in_frag = (eddp->skb->len -
  249. eddp->skb->data_len)
  250. - eddp->skb_offset;
  251. src = eddp->skb->data + eddp->skb_offset;
  252. } else {
  253. frag = &skb_shinfo(eddp->skb)->frags[
  254. eddp->frag];
  255. left_in_frag = frag->size - eddp->frag_offset;
  256. src = (u8 *)((page_to_pfn(frag->page) <<
  257. PAGE_SHIFT) + frag->page_offset +
  258. eddp->frag_offset);
  259. }
  260. if (left_in_frag <= 0) {
  261. eddp->frag++;
  262. eddp->frag_offset = 0;
  263. continue;
  264. }
  265. copy_len = min(left_in_frag, len);
  266. memcpy(dst, src, copy_len);
  267. *hcsum = csum_partial(src, copy_len, *hcsum);
  268. dst += copy_len;
  269. eddp->frag_offset += copy_len;
  270. eddp->skb_offset += copy_len;
  271. len -= copy_len;
  272. }
  273. }
  274. }
  275. static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
  276. struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
  277. {
  278. u8 *page;
  279. int page_remainder;
  280. int page_offset;
  281. struct qeth_eddp_element *element;
  282. int first_lap = 1;
  283. QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
  284. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  285. page_offset = ctx->offset % PAGE_SIZE;
  286. element = &ctx->elements[ctx->num_elements];
  287. while (data_len) {
  288. page_remainder = PAGE_SIZE - page_offset;
  289. if (page_remainder < data_len) {
  290. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  291. page_remainder, &hcsum);
  292. element->length += page_remainder;
  293. if (first_lap)
  294. element->flags = SBAL_FLAGS_FIRST_FRAG;
  295. else
  296. element->flags = SBAL_FLAGS_MIDDLE_FRAG;
  297. ctx->num_elements++;
  298. element++;
  299. data_len -= page_remainder;
  300. ctx->offset += page_remainder;
  301. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  302. page_offset = 0;
  303. element->addr = page + page_offset;
  304. } else {
  305. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  306. data_len, &hcsum);
  307. element->length += data_len;
  308. if (!first_lap)
  309. element->flags = SBAL_FLAGS_LAST_FRAG;
  310. ctx->num_elements++;
  311. ctx->offset += data_len;
  312. data_len = 0;
  313. }
  314. first_lap = 0;
  315. }
  316. ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
  317. }
  318. static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
  319. int data_len)
  320. {
  321. __wsum phcsum; /* pseudo header checksum */
  322. QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
  323. eddp->th.tcp.h.check = 0;
  324. /* compute pseudo header checksum */
  325. phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
  326. eddp->thl + data_len, IPPROTO_TCP, 0);
  327. /* compute checksum of tcp header */
  328. return csum_partial(&eddp->th, eddp->thl, phcsum);
  329. }
  330. static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
  331. int data_len)
  332. {
  333. __be32 proto;
  334. __wsum phcsum; /* pseudo header checksum */
  335. QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
  336. eddp->th.tcp.h.check = 0;
  337. /* compute pseudo header checksum */
  338. phcsum = csum_partial(&eddp->nh.ip6.h.saddr,
  339. sizeof(struct in6_addr), 0);
  340. phcsum = csum_partial(&eddp->nh.ip6.h.daddr,
  341. sizeof(struct in6_addr), phcsum);
  342. proto = htonl(IPPROTO_TCP);
  343. phcsum = csum_partial(&proto, sizeof(u32), phcsum);
  344. return phcsum;
  345. }
  346. static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
  347. u8 *nh, u8 nhl, u8 *th, u8 thl)
  348. {
  349. struct qeth_eddp_data *eddp;
  350. QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
  351. eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
  352. if (eddp) {
  353. eddp->nhl = nhl;
  354. eddp->thl = thl;
  355. memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
  356. memcpy(&eddp->nh, nh, nhl);
  357. memcpy(&eddp->th, th, thl);
  358. eddp->frag = -1; /* initially we're in skb->data */
  359. }
  360. return eddp;
  361. }
  362. static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  363. struct qeth_eddp_data *eddp)
  364. {
  365. struct tcphdr *tcph;
  366. int data_len;
  367. __wsum hcsum;
  368. QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
  369. eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
  370. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  371. eddp->skb_offset += sizeof(struct ethhdr);
  372. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  373. eddp->skb_offset += VLAN_HLEN;
  374. }
  375. tcph = tcp_hdr(eddp->skb);
  376. while (eddp->skb_offset < eddp->skb->len) {
  377. data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  378. (int)(eddp->skb->len - eddp->skb_offset));
  379. /* prepare qdio hdr */
  380. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  381. eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
  382. eddp->nhl + eddp->thl;
  383. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  384. eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
  385. } else
  386. eddp->qh.hdr.l3.length = data_len + eddp->nhl +
  387. eddp->thl;
  388. /* prepare ip hdr */
  389. if (eddp->skb->protocol == htons(ETH_P_IP)) {
  390. eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
  391. eddp->thl);
  392. eddp->nh.ip4.h.check = 0;
  393. eddp->nh.ip4.h.check =
  394. ip_fast_csum((u8 *)&eddp->nh.ip4.h,
  395. eddp->nh.ip4.h.ihl);
  396. } else
  397. eddp->nh.ip6.h.payload_len = htons(data_len +
  398. eddp->thl);
  399. /* prepare tcp hdr */
  400. if (data_len == (eddp->skb->len - eddp->skb_offset)) {
  401. /* last segment -> set FIN and PSH flags */
  402. eddp->th.tcp.h.fin = tcph->fin;
  403. eddp->th.tcp.h.psh = tcph->psh;
  404. }
  405. if (eddp->skb->protocol == htons(ETH_P_IP))
  406. hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
  407. else
  408. hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
  409. /* fill the next segment into the context */
  410. qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
  411. qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
  412. if (eddp->skb_offset >= eddp->skb->len)
  413. break;
  414. /* prepare headers for next round */
  415. if (eddp->skb->protocol == htons(ETH_P_IP))
  416. eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
  417. eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
  418. data_len);
  419. }
  420. }
  421. static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  422. struct sk_buff *skb, struct qeth_hdr *qhdr)
  423. {
  424. struct qeth_eddp_data *eddp = NULL;
  425. QETH_DBF_TEXT(TRACE, 5, "eddpficx");
  426. /* create our segmentation headers and copy original headers */
  427. if (skb->protocol == htons(ETH_P_IP))
  428. eddp = qeth_eddp_create_eddp_data(qhdr,
  429. skb_network_header(skb),
  430. ip_hdrlen(skb),
  431. skb_transport_header(skb),
  432. tcp_hdrlen(skb));
  433. else
  434. eddp = qeth_eddp_create_eddp_data(qhdr,
  435. skb_network_header(skb),
  436. sizeof(struct ipv6hdr),
  437. skb_transport_header(skb),
  438. tcp_hdrlen(skb));
  439. if (eddp == NULL) {
  440. QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
  441. return -ENOMEM;
  442. }
  443. if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  444. skb_set_mac_header(skb, sizeof(struct qeth_hdr));
  445. memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
  446. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  447. eddp->vlan[0] = skb->protocol;
  448. eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
  449. }
  450. }
  451. /* the next flags will only be set on the last segment */
  452. eddp->th.tcp.h.fin = 0;
  453. eddp->th.tcp.h.psh = 0;
  454. eddp->skb = skb;
  455. /* begin segmentation and fill context */
  456. __qeth_eddp_fill_context_tcp(ctx, eddp);
  457. kfree(eddp);
  458. return 0;
  459. }
  460. static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
  461. struct sk_buff *skb, int hdr_len)
  462. {
  463. int skbs_per_page;
  464. QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
  465. /* can we put multiple skbs in one page? */
  466. skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  467. if (skbs_per_page > 1) {
  468. ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  469. skbs_per_page + 1;
  470. ctx->elements_per_skb = 1;
  471. } else {
  472. /* no -> how many elements per skb? */
  473. ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  474. PAGE_SIZE) >> PAGE_SHIFT;
  475. ctx->num_pages = ctx->elements_per_skb *
  476. (skb_shinfo(skb)->gso_segs + 1);
  477. }
  478. ctx->num_elements = ctx->elements_per_skb *
  479. (skb_shinfo(skb)->gso_segs + 1);
  480. }
  481. static struct qeth_eddp_context *qeth_eddp_create_context_generic(
  482. struct qeth_card *card, struct sk_buff *skb, int hdr_len)
  483. {
  484. struct qeth_eddp_context *ctx = NULL;
  485. u8 *addr;
  486. int i;
  487. QETH_DBF_TEXT(TRACE, 5, "creddpcg");
  488. /* create the context and allocate pages */
  489. ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
  490. if (ctx == NULL) {
  491. QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
  492. return NULL;
  493. }
  494. ctx->type = QETH_LARGE_SEND_EDDP;
  495. qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
  496. if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
  497. QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
  498. kfree(ctx);
  499. return NULL;
  500. }
  501. ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
  502. if (ctx->pages == NULL) {
  503. QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
  504. kfree(ctx);
  505. return NULL;
  506. }
  507. for (i = 0; i < ctx->num_pages; ++i) {
  508. addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
  509. if (addr == NULL) {
  510. QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
  511. ctx->num_pages = i;
  512. qeth_eddp_free_context(ctx);
  513. return NULL;
  514. }
  515. ctx->pages[i] = addr;
  516. }
  517. ctx->elements = kcalloc(ctx->num_elements,
  518. sizeof(struct qeth_eddp_element), GFP_ATOMIC);
  519. if (ctx->elements == NULL) {
  520. QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
  521. qeth_eddp_free_context(ctx);
  522. return NULL;
  523. }
  524. /* reset num_elements; will be incremented again in fill_buffer to
  525. * reflect number of actually used elements */
  526. ctx->num_elements = 0;
  527. return ctx;
  528. }
  529. static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
  530. struct qeth_card *card, struct sk_buff *skb,
  531. struct qeth_hdr *qhdr)
  532. {
  533. struct qeth_eddp_context *ctx = NULL;
  534. QETH_DBF_TEXT(TRACE, 5, "creddpct");
  535. if (skb->protocol == htons(ETH_P_IP))
  536. ctx = qeth_eddp_create_context_generic(card, skb,
  537. (sizeof(struct qeth_hdr) +
  538. ip_hdrlen(skb) +
  539. tcp_hdrlen(skb)));
  540. else if (skb->protocol == htons(ETH_P_IPV6))
  541. ctx = qeth_eddp_create_context_generic(card, skb,
  542. sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
  543. tcp_hdrlen(skb));
  544. else
  545. QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
  546. if (ctx == NULL) {
  547. QETH_DBF_TEXT(TRACE, 2, "creddpnl");
  548. return NULL;
  549. }
  550. if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
  551. QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
  552. qeth_eddp_free_context(ctx);
  553. return NULL;
  554. }
  555. atomic_set(&ctx->refcnt, 1);
  556. return ctx;
  557. }
  558. struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
  559. struct sk_buff *skb, struct qeth_hdr *qhdr,
  560. unsigned char sk_protocol)
  561. {
  562. QETH_DBF_TEXT(TRACE, 5, "creddpc");
  563. switch (sk_protocol) {
  564. case IPPROTO_TCP:
  565. return qeth_eddp_create_context_tcp(card, skb, qhdr);
  566. default:
  567. QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
  568. }
  569. return NULL;
  570. }
  571. EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
  572. void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
  573. struct sk_buff *skb)
  574. {
  575. struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
  576. struct tcphdr *tcph = tcp_hdr(skb);
  577. struct iphdr *iph = ip_hdr(skb);
  578. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  579. QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
  580. /*fix header to TSO values ...*/
  581. hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
  582. /*set values which are fix for the first approach ...*/
  583. hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
  584. hdr->ext.imb_hdr_no = 1;
  585. hdr->ext.hdr_type = 1;
  586. hdr->ext.hdr_version = 1;
  587. hdr->ext.hdr_len = 28;
  588. /*insert non-fix values */
  589. hdr->ext.mss = skb_shinfo(skb)->gso_size;
  590. hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  591. hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  592. sizeof(struct qeth_hdr_tso));
  593. tcph->check = 0;
  594. if (skb->protocol == ETH_P_IPV6) {
  595. ip6h->payload_len = 0;
  596. tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  597. 0, IPPROTO_TCP, 0);
  598. } else {
  599. /*OSA want us to set these values ...*/
  600. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  601. 0, IPPROTO_TCP, 0);
  602. iph->tot_len = 0;
  603. iph->check = 0;
  604. }
  605. }
  606. EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
  607. void qeth_tx_csum(struct sk_buff *skb)
  608. {
  609. int tlen;
  610. if (skb->protocol == htons(ETH_P_IP)) {
  611. tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
  612. switch (ip_hdr(skb)->protocol) {
  613. case IPPROTO_TCP:
  614. tcp_hdr(skb)->check = 0;
  615. tcp_hdr(skb)->check = csum_tcpudp_magic(
  616. ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
  617. tlen, ip_hdr(skb)->protocol,
  618. skb_checksum(skb, skb_transport_offset(skb),
  619. tlen, 0));
  620. break;
  621. case IPPROTO_UDP:
  622. udp_hdr(skb)->check = 0;
  623. udp_hdr(skb)->check = csum_tcpudp_magic(
  624. ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
  625. tlen, ip_hdr(skb)->protocol,
  626. skb_checksum(skb, skb_transport_offset(skb),
  627. tlen, 0));
  628. break;
  629. }
  630. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  631. switch (ipv6_hdr(skb)->nexthdr) {
  632. case IPPROTO_TCP:
  633. tcp_hdr(skb)->check = 0;
  634. tcp_hdr(skb)->check = csum_ipv6_magic(
  635. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  636. ipv6_hdr(skb)->payload_len,
  637. ipv6_hdr(skb)->nexthdr,
  638. skb_checksum(skb, skb_transport_offset(skb),
  639. ipv6_hdr(skb)->payload_len, 0));
  640. break;
  641. case IPPROTO_UDP:
  642. udp_hdr(skb)->check = 0;
  643. udp_hdr(skb)->check = csum_ipv6_magic(
  644. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  645. ipv6_hdr(skb)->payload_len,
  646. ipv6_hdr(skb)->nexthdr,
  647. skb_checksum(skb, skb_transport_offset(skb),
  648. ipv6_hdr(skb)->payload_len, 0));
  649. break;
  650. }
  651. }
  652. }
  653. EXPORT_SYMBOL_GPL(qeth_tx_csum);