qeth_eddp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /*
  2. * linux/drivers/s390/net/qeth_eddp.c
  3. *
  4. * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
  5. *
  6. * Copyright 2004 IBM Corporation
  7. *
  8. * Author(s): Thomas Spatzier <tspat@de.ibm.com>
  9. *
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/ip.h>
  13. #include <linux/inetdevice.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/kernel.h>
  16. #include <linux/tcp.h>
  17. #include <net/tcp.h>
  18. #include <linux/skbuff.h>
  19. #include <net/ip.h>
  20. #include "qeth.h"
  21. #include "qeth_mpc.h"
  22. #include "qeth_eddp.h"
  23. int
  24. qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
  25. struct qeth_eddp_context *ctx)
  26. {
  27. int index = queue->next_buf_to_fill;
  28. int elements_needed = ctx->num_elements;
  29. int elements_in_buffer;
  30. int skbs_in_buffer;
  31. int buffers_needed = 0;
  32. QETH_DBF_TEXT(trace, 5, "eddpcbfc");
  33. while(elements_needed > 0) {
  34. buffers_needed++;
  35. if (atomic_read(&queue->bufs[index].state) !=
  36. QETH_QDIO_BUF_EMPTY)
  37. return -EBUSY;
  38. elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  39. queue->bufs[index].next_element_to_fill;
  40. skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
  41. elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
  42. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  43. }
  44. return buffers_needed;
  45. }
  46. static inline void
  47. qeth_eddp_free_context(struct qeth_eddp_context *ctx)
  48. {
  49. int i;
  50. QETH_DBF_TEXT(trace, 5, "eddpfctx");
  51. for (i = 0; i < ctx->num_pages; ++i)
  52. free_page((unsigned long)ctx->pages[i]);
  53. kfree(ctx->pages);
  54. kfree(ctx->elements);
  55. kfree(ctx);
  56. }
  57. static inline void
  58. qeth_eddp_get_context(struct qeth_eddp_context *ctx)
  59. {
  60. atomic_inc(&ctx->refcnt);
  61. }
  62. void
  63. qeth_eddp_put_context(struct qeth_eddp_context *ctx)
  64. {
  65. if (atomic_dec_return(&ctx->refcnt) == 0)
  66. qeth_eddp_free_context(ctx);
  67. }
  68. void
  69. qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
  70. {
  71. struct qeth_eddp_context_reference *ref;
  72. QETH_DBF_TEXT(trace, 6, "eddprctx");
  73. while (!list_empty(&buf->ctx_list)){
  74. ref = list_entry(buf->ctx_list.next,
  75. struct qeth_eddp_context_reference, list);
  76. qeth_eddp_put_context(ref->ctx);
  77. list_del(&ref->list);
  78. kfree(ref);
  79. }
  80. }
  81. static inline int
  82. qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
  83. struct qeth_eddp_context *ctx)
  84. {
  85. struct qeth_eddp_context_reference *ref;
  86. QETH_DBF_TEXT(trace, 6, "eddprfcx");
  87. ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
  88. if (ref == NULL)
  89. return -ENOMEM;
  90. qeth_eddp_get_context(ctx);
  91. ref->ctx = ctx;
  92. list_add_tail(&ref->list, &buf->ctx_list);
  93. return 0;
  94. }
  95. int
  96. qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
  97. struct qeth_eddp_context *ctx,
  98. int index)
  99. {
  100. struct qeth_qdio_out_buffer *buf = NULL;
  101. struct qdio_buffer *buffer;
  102. int elements = ctx->num_elements;
  103. int element = 0;
  104. int flush_cnt = 0;
  105. int must_refcnt = 1;
  106. int i;
  107. QETH_DBF_TEXT(trace, 5, "eddpfibu");
  108. while (elements > 0) {
  109. buf = &queue->bufs[index];
  110. if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
  111. /* normally this should not happen since we checked for
  112. * available elements in qeth_check_elements_for_context
  113. */
  114. if (element == 0)
  115. return -EBUSY;
  116. else {
  117. PRINT_WARN("could only partially fill eddp "
  118. "buffer!\n");
  119. goto out;
  120. }
  121. }
  122. /* check if the whole next skb fits into current buffer */
  123. if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  124. buf->next_element_to_fill)
  125. < ctx->elements_per_skb){
  126. /* no -> go to next buffer */
  127. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  128. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  129. flush_cnt++;
  130. /* new buffer, so we have to add ctx to buffer'ctx_list
  131. * and increment ctx's refcnt */
  132. must_refcnt = 1;
  133. continue;
  134. }
  135. if (must_refcnt){
  136. must_refcnt = 0;
  137. if (qeth_eddp_buf_ref_context(buf, ctx)){
  138. PRINT_WARN("no memory to create eddp context "
  139. "reference\n");
  140. goto out_check;
  141. }
  142. }
  143. buffer = buf->buffer;
  144. /* fill one skb into buffer */
  145. for (i = 0; i < ctx->elements_per_skb; ++i){
  146. buffer->element[buf->next_element_to_fill].addr =
  147. ctx->elements[element].addr;
  148. buffer->element[buf->next_element_to_fill].length =
  149. ctx->elements[element].length;
  150. buffer->element[buf->next_element_to_fill].flags =
  151. ctx->elements[element].flags;
  152. buf->next_element_to_fill++;
  153. element++;
  154. elements--;
  155. }
  156. }
  157. out_check:
  158. if (!queue->do_pack) {
  159. QETH_DBF_TEXT(trace, 6, "fillbfnp");
  160. /* set state to PRIMED -> will be flushed */
  161. if (buf->next_element_to_fill > 0){
  162. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  163. flush_cnt++;
  164. }
  165. } else {
  166. if (queue->card->options.performance_stats)
  167. queue->card->perf_stats.skbs_sent_pack++;
  168. QETH_DBF_TEXT(trace, 6, "fillbfpa");
  169. if (buf->next_element_to_fill >=
  170. QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
  171. /*
  172. * packed buffer if full -> set state PRIMED
  173. * -> will be flushed
  174. */
  175. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  176. flush_cnt++;
  177. }
  178. }
  179. out:
  180. return flush_cnt;
  181. }
  182. static inline void
  183. qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
  184. struct qeth_eddp_data *eddp, int data_len)
  185. {
  186. u8 *page;
  187. int page_remainder;
  188. int page_offset;
  189. int pkt_len;
  190. struct qeth_eddp_element *element;
  191. QETH_DBF_TEXT(trace, 5, "eddpcrsh");
  192. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  193. page_offset = ctx->offset % PAGE_SIZE;
  194. element = &ctx->elements[ctx->num_elements];
  195. pkt_len = eddp->nhl + eddp->thl + data_len;
  196. /* FIXME: layer2 and VLAN !!! */
  197. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
  198. pkt_len += ETH_HLEN;
  199. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  200. pkt_len += VLAN_HLEN;
  201. /* does complete packet fit in current page ? */
  202. page_remainder = PAGE_SIZE - page_offset;
  203. if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
  204. /* no -> go to start of next page */
  205. ctx->offset += page_remainder;
  206. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  207. page_offset = 0;
  208. }
  209. memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
  210. element->addr = page + page_offset;
  211. element->length = sizeof(struct qeth_hdr);
  212. ctx->offset += sizeof(struct qeth_hdr);
  213. page_offset += sizeof(struct qeth_hdr);
  214. /* add mac header (?) */
  215. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  216. memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
  217. element->length += ETH_HLEN;
  218. ctx->offset += ETH_HLEN;
  219. page_offset += ETH_HLEN;
  220. }
  221. /* add VLAN tag */
  222. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
  223. memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
  224. element->length += VLAN_HLEN;
  225. ctx->offset += VLAN_HLEN;
  226. page_offset += VLAN_HLEN;
  227. }
  228. /* add network header */
  229. memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
  230. element->length += eddp->nhl;
  231. eddp->nh_in_ctx = page + page_offset;
  232. ctx->offset += eddp->nhl;
  233. page_offset += eddp->nhl;
  234. /* add transport header */
  235. memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
  236. element->length += eddp->thl;
  237. eddp->th_in_ctx = page + page_offset;
  238. ctx->offset += eddp->thl;
  239. }
  240. static inline void
  241. qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
  242. u32 *hcsum)
  243. {
  244. struct skb_frag_struct *frag;
  245. int left_in_frag;
  246. int copy_len;
  247. u8 *src;
  248. QETH_DBF_TEXT(trace, 5, "eddpcdtc");
  249. if (skb_shinfo(eddp->skb)->nr_frags == 0) {
  250. memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
  251. *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
  252. *hcsum);
  253. eddp->skb_offset += len;
  254. } else {
  255. while (len > 0) {
  256. if (eddp->frag < 0) {
  257. /* we're in skb->data */
  258. left_in_frag = (eddp->skb->len - eddp->skb->data_len)
  259. - eddp->skb_offset;
  260. src = eddp->skb->data + eddp->skb_offset;
  261. } else {
  262. frag = &skb_shinfo(eddp->skb)->
  263. frags[eddp->frag];
  264. left_in_frag = frag->size - eddp->frag_offset;
  265. src = (u8 *)(
  266. (page_to_pfn(frag->page) << PAGE_SHIFT)+
  267. frag->page_offset + eddp->frag_offset);
  268. }
  269. if (left_in_frag <= 0) {
  270. eddp->frag++;
  271. eddp->frag_offset = 0;
  272. continue;
  273. }
  274. copy_len = min(left_in_frag, len);
  275. memcpy(dst, src, copy_len);
  276. *hcsum = csum_partial(src, copy_len, *hcsum);
  277. dst += copy_len;
  278. eddp->frag_offset += copy_len;
  279. eddp->skb_offset += copy_len;
  280. len -= copy_len;
  281. }
  282. }
  283. }
  284. static inline void
  285. qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
  286. struct qeth_eddp_data *eddp, int data_len,
  287. u32 hcsum)
  288. {
  289. u8 *page;
  290. int page_remainder;
  291. int page_offset;
  292. struct qeth_eddp_element *element;
  293. int first_lap = 1;
  294. QETH_DBF_TEXT(trace, 5, "eddpcsdt");
  295. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  296. page_offset = ctx->offset % PAGE_SIZE;
  297. element = &ctx->elements[ctx->num_elements];
  298. while (data_len){
  299. page_remainder = PAGE_SIZE - page_offset;
  300. if (page_remainder < data_len){
  301. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  302. page_remainder, &hcsum);
  303. element->length += page_remainder;
  304. if (first_lap)
  305. element->flags = SBAL_FLAGS_FIRST_FRAG;
  306. else
  307. element->flags = SBAL_FLAGS_MIDDLE_FRAG;
  308. ctx->num_elements++;
  309. element++;
  310. data_len -= page_remainder;
  311. ctx->offset += page_remainder;
  312. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  313. page_offset = 0;
  314. element->addr = page + page_offset;
  315. } else {
  316. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  317. data_len, &hcsum);
  318. element->length += data_len;
  319. if (!first_lap)
  320. element->flags = SBAL_FLAGS_LAST_FRAG;
  321. ctx->num_elements++;
  322. ctx->offset += data_len;
  323. data_len = 0;
  324. }
  325. first_lap = 0;
  326. }
  327. ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
  328. }
  329. static inline u32
  330. qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
  331. {
  332. u32 phcsum; /* pseudo header checksum */
  333. QETH_DBF_TEXT(trace, 5, "eddpckt4");
  334. eddp->th.tcp.h.check = 0;
  335. /* compute pseudo header checksum */
  336. phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
  337. eddp->thl + data_len, IPPROTO_TCP, 0);
  338. /* compute checksum of tcp header */
  339. return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
  340. }
  341. static inline u32
  342. qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
  343. {
  344. u32 proto;
  345. u32 phcsum; /* pseudo header checksum */
  346. QETH_DBF_TEXT(trace, 5, "eddpckt6");
  347. eddp->th.tcp.h.check = 0;
  348. /* compute pseudo header checksum */
  349. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
  350. sizeof(struct in6_addr), 0);
  351. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
  352. sizeof(struct in6_addr), phcsum);
  353. proto = htonl(IPPROTO_TCP);
  354. phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
  355. return phcsum;
  356. }
  357. static inline struct qeth_eddp_data *
  358. qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
  359. {
  360. struct qeth_eddp_data *eddp;
  361. QETH_DBF_TEXT(trace, 5, "eddpcrda");
  362. eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
  363. if (eddp){
  364. eddp->nhl = nhl;
  365. eddp->thl = thl;
  366. memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
  367. memcpy(&eddp->nh, nh, nhl);
  368. memcpy(&eddp->th, th, thl);
  369. eddp->frag = -1; /* initially we're in skb->data */
  370. }
  371. return eddp;
  372. }
  373. static inline void
  374. __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  375. struct qeth_eddp_data *eddp)
  376. {
  377. struct tcphdr *tcph;
  378. int data_len;
  379. u32 hcsum;
  380. QETH_DBF_TEXT(trace, 5, "eddpftcp");
  381. eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
  382. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  383. eddp->skb_offset += sizeof(struct ethhdr);
  384. #ifdef CONFIG_QETH_VLAN
  385. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  386. eddp->skb_offset += VLAN_HLEN;
  387. #endif /* CONFIG_QETH_VLAN */
  388. }
  389. tcph = eddp->skb->h.th;
  390. while (eddp->skb_offset < eddp->skb->len) {
  391. data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  392. (int)(eddp->skb->len - eddp->skb_offset));
  393. /* prepare qdio hdr */
  394. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  395. eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
  396. eddp->nhl + eddp->thl -
  397. sizeof(struct qeth_hdr);
  398. #ifdef CONFIG_QETH_VLAN
  399. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  400. eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
  401. #endif /* CONFIG_QETH_VLAN */
  402. } else
  403. eddp->qh.hdr.l3.length = data_len + eddp->nhl +
  404. eddp->thl;
  405. /* prepare ip hdr */
  406. if (eddp->skb->protocol == ETH_P_IP){
  407. eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
  408. eddp->thl;
  409. eddp->nh.ip4.h.check = 0;
  410. eddp->nh.ip4.h.check =
  411. ip_fast_csum((u8 *)&eddp->nh.ip4.h,
  412. eddp->nh.ip4.h.ihl);
  413. } else
  414. eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
  415. /* prepare tcp hdr */
  416. if (data_len == (eddp->skb->len - eddp->skb_offset)){
  417. /* last segment -> set FIN and PSH flags */
  418. eddp->th.tcp.h.fin = tcph->fin;
  419. eddp->th.tcp.h.psh = tcph->psh;
  420. }
  421. if (eddp->skb->protocol == ETH_P_IP)
  422. hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
  423. else
  424. hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
  425. /* fill the next segment into the context */
  426. qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
  427. qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
  428. if (eddp->skb_offset >= eddp->skb->len)
  429. break;
  430. /* prepare headers for next round */
  431. if (eddp->skb->protocol == ETH_P_IP)
  432. eddp->nh.ip4.h.id++;
  433. eddp->th.tcp.h.seq += data_len;
  434. }
  435. }
  436. static inline int
  437. qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  438. struct sk_buff *skb, struct qeth_hdr *qhdr)
  439. {
  440. struct qeth_eddp_data *eddp = NULL;
  441. QETH_DBF_TEXT(trace, 5, "eddpficx");
  442. /* create our segmentation headers and copy original headers */
  443. if (skb->protocol == ETH_P_IP)
  444. eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
  445. skb->nh.iph->ihl*4,
  446. (u8 *)skb->h.th, skb->h.th->doff*4);
  447. else
  448. eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
  449. sizeof(struct ipv6hdr),
  450. (u8 *)skb->h.th, skb->h.th->doff*4);
  451. if (eddp == NULL) {
  452. QETH_DBF_TEXT(trace, 2, "eddpfcnm");
  453. return -ENOMEM;
  454. }
  455. if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  456. skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
  457. memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
  458. #ifdef CONFIG_QETH_VLAN
  459. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  460. eddp->vlan[0] = __constant_htons(skb->protocol);
  461. eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
  462. }
  463. #endif /* CONFIG_QETH_VLAN */
  464. }
  465. /* the next flags will only be set on the last segment */
  466. eddp->th.tcp.h.fin = 0;
  467. eddp->th.tcp.h.psh = 0;
  468. eddp->skb = skb;
  469. /* begin segmentation and fill context */
  470. __qeth_eddp_fill_context_tcp(ctx, eddp);
  471. kfree(eddp);
  472. return 0;
  473. }
  474. static inline void
  475. qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
  476. int hdr_len)
  477. {
  478. int skbs_per_page;
  479. QETH_DBF_TEXT(trace, 5, "eddpcanp");
  480. /* can we put multiple skbs in one page? */
  481. skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  482. if (skbs_per_page > 1){
  483. ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  484. skbs_per_page + 1;
  485. ctx->elements_per_skb = 1;
  486. } else {
  487. /* no -> how many elements per skb? */
  488. ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  489. PAGE_SIZE) >> PAGE_SHIFT;
  490. ctx->num_pages = ctx->elements_per_skb *
  491. (skb_shinfo(skb)->gso_segs + 1);
  492. }
  493. ctx->num_elements = ctx->elements_per_skb *
  494. (skb_shinfo(skb)->gso_segs + 1);
  495. }
  496. static inline struct qeth_eddp_context *
  497. qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
  498. int hdr_len)
  499. {
  500. struct qeth_eddp_context *ctx = NULL;
  501. u8 *addr;
  502. int i;
  503. QETH_DBF_TEXT(trace, 5, "creddpcg");
  504. /* create the context and allocate pages */
  505. ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
  506. if (ctx == NULL){
  507. QETH_DBF_TEXT(trace, 2, "ceddpcn1");
  508. return NULL;
  509. }
  510. ctx->type = QETH_LARGE_SEND_EDDP;
  511. qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
  512. if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
  513. QETH_DBF_TEXT(trace, 2, "ceddpcis");
  514. kfree(ctx);
  515. return NULL;
  516. }
  517. ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
  518. if (ctx->pages == NULL){
  519. QETH_DBF_TEXT(trace, 2, "ceddpcn2");
  520. kfree(ctx);
  521. return NULL;
  522. }
  523. for (i = 0; i < ctx->num_pages; ++i){
  524. addr = (u8 *)__get_free_page(GFP_ATOMIC);
  525. if (addr == NULL){
  526. QETH_DBF_TEXT(trace, 2, "ceddpcn3");
  527. ctx->num_pages = i;
  528. qeth_eddp_free_context(ctx);
  529. return NULL;
  530. }
  531. memset(addr, 0, PAGE_SIZE);
  532. ctx->pages[i] = addr;
  533. }
  534. ctx->elements = kcalloc(ctx->num_elements,
  535. sizeof(struct qeth_eddp_element), GFP_ATOMIC);
  536. if (ctx->elements == NULL){
  537. QETH_DBF_TEXT(trace, 2, "ceddpcn4");
  538. qeth_eddp_free_context(ctx);
  539. return NULL;
  540. }
  541. /* reset num_elements; will be incremented again in fill_buffer to
  542. * reflect number of actually used elements */
  543. ctx->num_elements = 0;
  544. return ctx;
  545. }
  546. static inline struct qeth_eddp_context *
  547. qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
  548. struct qeth_hdr *qhdr)
  549. {
  550. struct qeth_eddp_context *ctx = NULL;
  551. QETH_DBF_TEXT(trace, 5, "creddpct");
  552. if (skb->protocol == ETH_P_IP)
  553. ctx = qeth_eddp_create_context_generic(card, skb,
  554. sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
  555. skb->h.th->doff*4);
  556. else if (skb->protocol == ETH_P_IPV6)
  557. ctx = qeth_eddp_create_context_generic(card, skb,
  558. sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
  559. skb->h.th->doff*4);
  560. else
  561. QETH_DBF_TEXT(trace, 2, "cetcpinv");
  562. if (ctx == NULL) {
  563. QETH_DBF_TEXT(trace, 2, "creddpnl");
  564. return NULL;
  565. }
  566. if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
  567. QETH_DBF_TEXT(trace, 2, "ceddptfe");
  568. qeth_eddp_free_context(ctx);
  569. return NULL;
  570. }
  571. atomic_set(&ctx->refcnt, 1);
  572. return ctx;
  573. }
  574. struct qeth_eddp_context *
  575. qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
  576. struct qeth_hdr *qhdr)
  577. {
  578. QETH_DBF_TEXT(trace, 5, "creddpc");
  579. switch (skb->sk->sk_protocol){
  580. case IPPROTO_TCP:
  581. return qeth_eddp_create_context_tcp(card, skb, qhdr);
  582. default:
  583. QETH_DBF_TEXT(trace, 2, "eddpinvp");
  584. }
  585. return NULL;
  586. }