qeth_core_offl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * drivers/s390/net/qeth_core_offl.c
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
  6. * Frank Blaschka <frank.blaschka@de.ibm.com>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/ip.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/kernel.h>
  13. #include <linux/tcp.h>
  14. #include <net/tcp.h>
  15. #include <linux/skbuff.h>
  16. #include <net/ip.h>
  17. #include <net/ip6_checksum.h>
  18. #include "qeth_core.h"
  19. #include "qeth_core_mpc.h"
  20. #include "qeth_core_offl.h"
  21. int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
  22. struct qeth_eddp_context *ctx)
  23. {
  24. int index = queue->next_buf_to_fill;
  25. int elements_needed = ctx->num_elements;
  26. int elements_in_buffer;
  27. int skbs_in_buffer;
  28. int buffers_needed = 0;
  29. QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
  30. while (elements_needed > 0) {
  31. buffers_needed++;
  32. if (atomic_read(&queue->bufs[index].state) !=
  33. QETH_QDIO_BUF_EMPTY)
  34. return -EBUSY;
  35. elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  36. queue->bufs[index].next_element_to_fill;
  37. skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
  38. elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
  39. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  40. }
  41. return buffers_needed;
  42. }
  43. static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
  44. {
  45. int i;
  46. QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
  47. for (i = 0; i < ctx->num_pages; ++i)
  48. free_page((unsigned long)ctx->pages[i]);
  49. kfree(ctx->pages);
  50. kfree(ctx->elements);
  51. kfree(ctx);
  52. }
  53. static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
  54. {
  55. atomic_inc(&ctx->refcnt);
  56. }
  57. void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
  58. {
  59. if (atomic_dec_return(&ctx->refcnt) == 0)
  60. qeth_eddp_free_context(ctx);
  61. }
  62. EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
  63. void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
  64. {
  65. struct qeth_eddp_context_reference *ref;
  66. QETH_DBF_TEXT(TRACE, 6, "eddprctx");
  67. while (!list_empty(&buf->ctx_list)) {
  68. ref = list_entry(buf->ctx_list.next,
  69. struct qeth_eddp_context_reference, list);
  70. qeth_eddp_put_context(ref->ctx);
  71. list_del(&ref->list);
  72. kfree(ref);
  73. }
  74. }
  75. static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
  76. struct qeth_eddp_context *ctx)
  77. {
  78. struct qeth_eddp_context_reference *ref;
  79. QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
  80. ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
  81. if (ref == NULL)
  82. return -ENOMEM;
  83. qeth_eddp_get_context(ctx);
  84. ref->ctx = ctx;
  85. list_add_tail(&ref->list, &buf->ctx_list);
  86. return 0;
  87. }
  88. int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
  89. struct qeth_eddp_context *ctx, int index)
  90. {
  91. struct qeth_qdio_out_buffer *buf = NULL;
  92. struct qdio_buffer *buffer;
  93. int elements = ctx->num_elements;
  94. int element = 0;
  95. int flush_cnt = 0;
  96. int must_refcnt = 1;
  97. int i;
  98. QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
  99. while (elements > 0) {
  100. buf = &queue->bufs[index];
  101. if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
  102. /* normally this should not happen since we checked for
  103. * available elements in qeth_check_elements_for_context
  104. */
  105. if (element == 0)
  106. return -EBUSY;
  107. else {
  108. PRINT_WARN("could only partially fill eddp "
  109. "buffer!\n");
  110. goto out;
  111. }
  112. }
  113. /* check if the whole next skb fits into current buffer */
  114. if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  115. buf->next_element_to_fill)
  116. < ctx->elements_per_skb){
  117. /* no -> go to next buffer */
  118. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  119. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  120. flush_cnt++;
  121. /* new buffer, so we have to add ctx to buffer'ctx_list
  122. * and increment ctx's refcnt */
  123. must_refcnt = 1;
  124. continue;
  125. }
  126. if (must_refcnt) {
  127. must_refcnt = 0;
  128. if (qeth_eddp_buf_ref_context(buf, ctx)) {
  129. PRINT_WARN("no memory to create eddp context "
  130. "reference\n");
  131. goto out_check;
  132. }
  133. }
  134. buffer = buf->buffer;
  135. /* fill one skb into buffer */
  136. for (i = 0; i < ctx->elements_per_skb; ++i) {
  137. if (ctx->elements[element].length != 0) {
  138. buffer->element[buf->next_element_to_fill].
  139. addr = ctx->elements[element].addr;
  140. buffer->element[buf->next_element_to_fill].
  141. length = ctx->elements[element].length;
  142. buffer->element[buf->next_element_to_fill].
  143. flags = ctx->elements[element].flags;
  144. buf->next_element_to_fill++;
  145. }
  146. element++;
  147. elements--;
  148. }
  149. }
  150. out_check:
  151. if (!queue->do_pack) {
  152. QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
  153. /* set state to PRIMED -> will be flushed */
  154. if (buf->next_element_to_fill > 0) {
  155. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  156. flush_cnt++;
  157. }
  158. } else {
  159. if (queue->card->options.performance_stats)
  160. queue->card->perf_stats.skbs_sent_pack++;
  161. QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
  162. if (buf->next_element_to_fill >=
  163. QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
  164. /*
  165. * packed buffer if full -> set state PRIMED
  166. * -> will be flushed
  167. */
  168. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  169. flush_cnt++;
  170. }
  171. }
  172. out:
  173. return flush_cnt;
  174. }
  175. static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
  176. struct qeth_eddp_data *eddp, int data_len)
  177. {
  178. u8 *page;
  179. int page_remainder;
  180. int page_offset;
  181. int pkt_len;
  182. struct qeth_eddp_element *element;
  183. QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
  184. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  185. page_offset = ctx->offset % PAGE_SIZE;
  186. element = &ctx->elements[ctx->num_elements];
  187. pkt_len = eddp->nhl + eddp->thl + data_len;
  188. /* FIXME: layer2 and VLAN !!! */
  189. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
  190. pkt_len += ETH_HLEN;
  191. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  192. pkt_len += VLAN_HLEN;
  193. /* does complete packet fit in current page ? */
  194. page_remainder = PAGE_SIZE - page_offset;
  195. if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
  196. /* no -> go to start of next page */
  197. ctx->offset += page_remainder;
  198. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  199. page_offset = 0;
  200. }
  201. memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
  202. element->addr = page + page_offset;
  203. element->length = sizeof(struct qeth_hdr);
  204. ctx->offset += sizeof(struct qeth_hdr);
  205. page_offset += sizeof(struct qeth_hdr);
  206. /* add mac header (?) */
  207. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  208. memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
  209. element->length += ETH_HLEN;
  210. ctx->offset += ETH_HLEN;
  211. page_offset += ETH_HLEN;
  212. }
  213. /* add VLAN tag */
  214. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  215. memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
  216. element->length += VLAN_HLEN;
  217. ctx->offset += VLAN_HLEN;
  218. page_offset += VLAN_HLEN;
  219. }
  220. /* add network header */
  221. memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
  222. element->length += eddp->nhl;
  223. eddp->nh_in_ctx = page + page_offset;
  224. ctx->offset += eddp->nhl;
  225. page_offset += eddp->nhl;
  226. /* add transport header */
  227. memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
  228. element->length += eddp->thl;
  229. eddp->th_in_ctx = page + page_offset;
  230. ctx->offset += eddp->thl;
  231. }
  232. static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
  233. int len, __wsum *hcsum)
  234. {
  235. struct skb_frag_struct *frag;
  236. int left_in_frag;
  237. int copy_len;
  238. u8 *src;
  239. QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
  240. if (skb_shinfo(eddp->skb)->nr_frags == 0) {
  241. skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
  242. dst, len);
  243. *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
  244. *hcsum);
  245. eddp->skb_offset += len;
  246. } else {
  247. while (len > 0) {
  248. if (eddp->frag < 0) {
  249. /* we're in skb->data */
  250. left_in_frag = (eddp->skb->len -
  251. eddp->skb->data_len)
  252. - eddp->skb_offset;
  253. src = eddp->skb->data + eddp->skb_offset;
  254. } else {
  255. frag = &skb_shinfo(eddp->skb)->frags[
  256. eddp->frag];
  257. left_in_frag = frag->size - eddp->frag_offset;
  258. src = (u8 *)((page_to_pfn(frag->page) <<
  259. PAGE_SHIFT) + frag->page_offset +
  260. eddp->frag_offset);
  261. }
  262. if (left_in_frag <= 0) {
  263. eddp->frag++;
  264. eddp->frag_offset = 0;
  265. continue;
  266. }
  267. copy_len = min(left_in_frag, len);
  268. memcpy(dst, src, copy_len);
  269. *hcsum = csum_partial(src, copy_len, *hcsum);
  270. dst += copy_len;
  271. eddp->frag_offset += copy_len;
  272. eddp->skb_offset += copy_len;
  273. len -= copy_len;
  274. }
  275. }
  276. }
  277. static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
  278. struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
  279. {
  280. u8 *page;
  281. int page_remainder;
  282. int page_offset;
  283. struct qeth_eddp_element *element;
  284. int first_lap = 1;
  285. QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
  286. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  287. page_offset = ctx->offset % PAGE_SIZE;
  288. element = &ctx->elements[ctx->num_elements];
  289. while (data_len) {
  290. page_remainder = PAGE_SIZE - page_offset;
  291. if (page_remainder < data_len) {
  292. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  293. page_remainder, &hcsum);
  294. element->length += page_remainder;
  295. if (first_lap)
  296. element->flags = SBAL_FLAGS_FIRST_FRAG;
  297. else
  298. element->flags = SBAL_FLAGS_MIDDLE_FRAG;
  299. ctx->num_elements++;
  300. element++;
  301. data_len -= page_remainder;
  302. ctx->offset += page_remainder;
  303. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  304. page_offset = 0;
  305. element->addr = page + page_offset;
  306. } else {
  307. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  308. data_len, &hcsum);
  309. element->length += data_len;
  310. if (!first_lap)
  311. element->flags = SBAL_FLAGS_LAST_FRAG;
  312. ctx->num_elements++;
  313. ctx->offset += data_len;
  314. data_len = 0;
  315. }
  316. first_lap = 0;
  317. }
  318. ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
  319. }
  320. static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
  321. int data_len)
  322. {
  323. __wsum phcsum; /* pseudo header checksum */
  324. QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
  325. eddp->th.tcp.h.check = 0;
  326. /* compute pseudo header checksum */
  327. phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
  328. eddp->thl + data_len, IPPROTO_TCP, 0);
  329. /* compute checksum of tcp header */
  330. return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
  331. }
  332. static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
  333. int data_len)
  334. {
  335. __be32 proto;
  336. __wsum phcsum; /* pseudo header checksum */
  337. QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
  338. eddp->th.tcp.h.check = 0;
  339. /* compute pseudo header checksum */
  340. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
  341. sizeof(struct in6_addr), 0);
  342. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
  343. sizeof(struct in6_addr), phcsum);
  344. proto = htonl(IPPROTO_TCP);
  345. phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
  346. return phcsum;
  347. }
  348. static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
  349. u8 *nh, u8 nhl, u8 *th, u8 thl)
  350. {
  351. struct qeth_eddp_data *eddp;
  352. QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
  353. eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
  354. if (eddp) {
  355. eddp->nhl = nhl;
  356. eddp->thl = thl;
  357. memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
  358. memcpy(&eddp->nh, nh, nhl);
  359. memcpy(&eddp->th, th, thl);
  360. eddp->frag = -1; /* initially we're in skb->data */
  361. }
  362. return eddp;
  363. }
  364. static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  365. struct qeth_eddp_data *eddp)
  366. {
  367. struct tcphdr *tcph;
  368. int data_len;
  369. __wsum hcsum;
  370. QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
  371. eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
  372. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  373. eddp->skb_offset += sizeof(struct ethhdr);
  374. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  375. eddp->skb_offset += VLAN_HLEN;
  376. }
  377. tcph = tcp_hdr(eddp->skb);
  378. while (eddp->skb_offset < eddp->skb->len) {
  379. data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  380. (int)(eddp->skb->len - eddp->skb_offset));
  381. /* prepare qdio hdr */
  382. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  383. eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
  384. eddp->nhl + eddp->thl;
  385. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  386. eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
  387. } else
  388. eddp->qh.hdr.l3.length = data_len + eddp->nhl +
  389. eddp->thl;
  390. /* prepare ip hdr */
  391. if (eddp->skb->protocol == htons(ETH_P_IP)) {
  392. eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
  393. eddp->thl);
  394. eddp->nh.ip4.h.check = 0;
  395. eddp->nh.ip4.h.check =
  396. ip_fast_csum((u8 *)&eddp->nh.ip4.h,
  397. eddp->nh.ip4.h.ihl);
  398. } else
  399. eddp->nh.ip6.h.payload_len = htons(data_len +
  400. eddp->thl);
  401. /* prepare tcp hdr */
  402. if (data_len == (eddp->skb->len - eddp->skb_offset)) {
  403. /* last segment -> set FIN and PSH flags */
  404. eddp->th.tcp.h.fin = tcph->fin;
  405. eddp->th.tcp.h.psh = tcph->psh;
  406. }
  407. if (eddp->skb->protocol == htons(ETH_P_IP))
  408. hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
  409. else
  410. hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
  411. /* fill the next segment into the context */
  412. qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
  413. qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
  414. if (eddp->skb_offset >= eddp->skb->len)
  415. break;
  416. /* prepare headers for next round */
  417. if (eddp->skb->protocol == htons(ETH_P_IP))
  418. eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
  419. eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
  420. data_len);
  421. }
  422. }
  423. static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  424. struct sk_buff *skb, struct qeth_hdr *qhdr)
  425. {
  426. struct qeth_eddp_data *eddp = NULL;
  427. QETH_DBF_TEXT(TRACE, 5, "eddpficx");
  428. /* create our segmentation headers and copy original headers */
  429. if (skb->protocol == htons(ETH_P_IP))
  430. eddp = qeth_eddp_create_eddp_data(qhdr,
  431. skb_network_header(skb),
  432. ip_hdrlen(skb),
  433. skb_transport_header(skb),
  434. tcp_hdrlen(skb));
  435. else
  436. eddp = qeth_eddp_create_eddp_data(qhdr,
  437. skb_network_header(skb),
  438. sizeof(struct ipv6hdr),
  439. skb_transport_header(skb),
  440. tcp_hdrlen(skb));
  441. if (eddp == NULL) {
  442. QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
  443. return -ENOMEM;
  444. }
  445. if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  446. skb_set_mac_header(skb, sizeof(struct qeth_hdr));
  447. memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
  448. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  449. eddp->vlan[0] = skb->protocol;
  450. eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
  451. }
  452. }
  453. /* the next flags will only be set on the last segment */
  454. eddp->th.tcp.h.fin = 0;
  455. eddp->th.tcp.h.psh = 0;
  456. eddp->skb = skb;
  457. /* begin segmentation and fill context */
  458. __qeth_eddp_fill_context_tcp(ctx, eddp);
  459. kfree(eddp);
  460. return 0;
  461. }
  462. static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
  463. struct sk_buff *skb, int hdr_len)
  464. {
  465. int skbs_per_page;
  466. QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
  467. /* can we put multiple skbs in one page? */
  468. skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  469. if (skbs_per_page > 1) {
  470. ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  471. skbs_per_page + 1;
  472. ctx->elements_per_skb = 1;
  473. } else {
  474. /* no -> how many elements per skb? */
  475. ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  476. PAGE_SIZE) >> PAGE_SHIFT;
  477. ctx->num_pages = ctx->elements_per_skb *
  478. (skb_shinfo(skb)->gso_segs + 1);
  479. }
  480. ctx->num_elements = ctx->elements_per_skb *
  481. (skb_shinfo(skb)->gso_segs + 1);
  482. }
  483. static struct qeth_eddp_context *qeth_eddp_create_context_generic(
  484. struct qeth_card *card, struct sk_buff *skb, int hdr_len)
  485. {
  486. struct qeth_eddp_context *ctx = NULL;
  487. u8 *addr;
  488. int i;
  489. QETH_DBF_TEXT(TRACE, 5, "creddpcg");
  490. /* create the context and allocate pages */
  491. ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
  492. if (ctx == NULL) {
  493. QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
  494. return NULL;
  495. }
  496. ctx->type = QETH_LARGE_SEND_EDDP;
  497. qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
  498. if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
  499. QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
  500. kfree(ctx);
  501. return NULL;
  502. }
  503. ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
  504. if (ctx->pages == NULL) {
  505. QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
  506. kfree(ctx);
  507. return NULL;
  508. }
  509. for (i = 0; i < ctx->num_pages; ++i) {
  510. addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
  511. if (addr == NULL) {
  512. QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
  513. ctx->num_pages = i;
  514. qeth_eddp_free_context(ctx);
  515. return NULL;
  516. }
  517. ctx->pages[i] = addr;
  518. }
  519. ctx->elements = kcalloc(ctx->num_elements,
  520. sizeof(struct qeth_eddp_element), GFP_ATOMIC);
  521. if (ctx->elements == NULL) {
  522. QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
  523. qeth_eddp_free_context(ctx);
  524. return NULL;
  525. }
  526. /* reset num_elements; will be incremented again in fill_buffer to
  527. * reflect number of actually used elements */
  528. ctx->num_elements = 0;
  529. return ctx;
  530. }
  531. static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
  532. struct qeth_card *card, struct sk_buff *skb,
  533. struct qeth_hdr *qhdr)
  534. {
  535. struct qeth_eddp_context *ctx = NULL;
  536. QETH_DBF_TEXT(TRACE, 5, "creddpct");
  537. if (skb->protocol == htons(ETH_P_IP))
  538. ctx = qeth_eddp_create_context_generic(card, skb,
  539. (sizeof(struct qeth_hdr) +
  540. ip_hdrlen(skb) +
  541. tcp_hdrlen(skb)));
  542. else if (skb->protocol == htons(ETH_P_IPV6))
  543. ctx = qeth_eddp_create_context_generic(card, skb,
  544. sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
  545. tcp_hdrlen(skb));
  546. else
  547. QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
  548. if (ctx == NULL) {
  549. QETH_DBF_TEXT(TRACE, 2, "creddpnl");
  550. return NULL;
  551. }
  552. if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
  553. QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
  554. qeth_eddp_free_context(ctx);
  555. return NULL;
  556. }
  557. atomic_set(&ctx->refcnt, 1);
  558. return ctx;
  559. }
  560. struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
  561. struct sk_buff *skb, struct qeth_hdr *qhdr,
  562. unsigned char sk_protocol)
  563. {
  564. QETH_DBF_TEXT(TRACE, 5, "creddpc");
  565. switch (sk_protocol) {
  566. case IPPROTO_TCP:
  567. return qeth_eddp_create_context_tcp(card, skb, qhdr);
  568. default:
  569. QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
  570. }
  571. return NULL;
  572. }
  573. EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
  574. void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
  575. struct sk_buff *skb)
  576. {
  577. struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
  578. struct tcphdr *tcph = tcp_hdr(skb);
  579. struct iphdr *iph = ip_hdr(skb);
  580. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  581. QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
  582. /*fix header to TSO values ...*/
  583. hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
  584. /*set values which are fix for the first approach ...*/
  585. hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
  586. hdr->ext.imb_hdr_no = 1;
  587. hdr->ext.hdr_type = 1;
  588. hdr->ext.hdr_version = 1;
  589. hdr->ext.hdr_len = 28;
  590. /*insert non-fix values */
  591. hdr->ext.mss = skb_shinfo(skb)->gso_size;
  592. hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  593. hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  594. sizeof(struct qeth_hdr_tso));
  595. tcph->check = 0;
  596. if (skb->protocol == ETH_P_IPV6) {
  597. ip6h->payload_len = 0;
  598. tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  599. 0, IPPROTO_TCP, 0);
  600. } else {
  601. /*OSA want us to set these values ...*/
  602. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  603. 0, IPPROTO_TCP, 0);
  604. iph->tot_len = 0;
  605. iph->check = 0;
  606. }
  607. }
  608. EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
  609. void qeth_tx_csum(struct sk_buff *skb)
  610. {
  611. int tlen;
  612. if (skb->protocol == htons(ETH_P_IP)) {
  613. tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
  614. switch (ip_hdr(skb)->protocol) {
  615. case IPPROTO_TCP:
  616. tcp_hdr(skb)->check = 0;
  617. tcp_hdr(skb)->check = csum_tcpudp_magic(
  618. ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
  619. tlen, ip_hdr(skb)->protocol,
  620. skb_checksum(skb, skb_transport_offset(skb),
  621. tlen, 0));
  622. break;
  623. case IPPROTO_UDP:
  624. udp_hdr(skb)->check = 0;
  625. udp_hdr(skb)->check = csum_tcpudp_magic(
  626. ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
  627. tlen, ip_hdr(skb)->protocol,
  628. skb_checksum(skb, skb_transport_offset(skb),
  629. tlen, 0));
  630. break;
  631. }
  632. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  633. switch (ipv6_hdr(skb)->nexthdr) {
  634. case IPPROTO_TCP:
  635. tcp_hdr(skb)->check = 0;
  636. tcp_hdr(skb)->check = csum_ipv6_magic(
  637. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  638. ipv6_hdr(skb)->payload_len,
  639. ipv6_hdr(skb)->nexthdr,
  640. skb_checksum(skb, skb_transport_offset(skb),
  641. ipv6_hdr(skb)->payload_len, 0));
  642. break;
  643. case IPPROTO_UDP:
  644. udp_hdr(skb)->check = 0;
  645. udp_hdr(skb)->check = csum_ipv6_magic(
  646. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  647. ipv6_hdr(skb)->payload_len,
  648. ipv6_hdr(skb)->nexthdr,
  649. skb_checksum(skb, skb_transport_offset(skb),
  650. ipv6_hdr(skb)->payload_len, 0));
  651. break;
  652. }
  653. }
  654. }
  655. EXPORT_SYMBOL_GPL(qeth_tx_csum);