]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/s390/net/qeth_eddp.c
45aa4a962daf9d61bb0df000e48b7ef4914798c2
[linux-2.6.git] / drivers / s390 / net / qeth_eddp.c
1 /*
2  *
3  * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.12 $)
4  *
5  * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6  *
7  * Copyright 2004 IBM Corporation
8  *
9  *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
10  *
11  *    $Revision: 1.12 $  $Date: 2005/04/01 21:40:40 $
12  *
13  */
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/ip.h>
17 #include <linux/inetdevice.h>
18 #include <linux/netdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/tcp.h>
21 #include <net/tcp.h>
22 #include <linux/skbuff.h>
23
24 #include <net/ip.h>
25
26 #include "qeth.h"
27 #include "qeth_mpc.h"
28 #include "qeth_eddp.h"
29
30 int
31 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
32                                     struct qeth_eddp_context *ctx)
33 {
34         int index = queue->next_buf_to_fill;
35         int elements_needed = ctx->num_elements;
36         int elements_in_buffer;
37         int skbs_in_buffer;
38         int buffers_needed = 0;
39
40         QETH_DBF_TEXT(trace, 5, "eddpcbfc");
41         while(elements_needed > 0) {
42                 buffers_needed++;
43                 if (atomic_read(&queue->bufs[index].state) !=
44                                 QETH_QDIO_BUF_EMPTY)
45                         return -EBUSY;
46
47                 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
48                                      queue->bufs[index].next_element_to_fill;
49                 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
50                 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
51                 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
52         }
53         return buffers_needed;
54 }
55
56 static inline void
57 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
58 {
59         int i;
60
61         QETH_DBF_TEXT(trace, 5, "eddpfctx");
62         for (i = 0; i < ctx->num_pages; ++i)
63                 free_page((unsigned long)ctx->pages[i]);
64         kfree(ctx->pages);
65         if (ctx->elements != NULL)
66                 kfree(ctx->elements);
67         kfree(ctx);
68 }
69
70
71 static inline void
72 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
73 {
74         atomic_inc(&ctx->refcnt);
75 }
76
77 void
78 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
79 {
80         if (atomic_dec_return(&ctx->refcnt) == 0)
81                 qeth_eddp_free_context(ctx);
82 }
83
84 void
85 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
86 {
87         struct qeth_eddp_context_reference *ref;
88
89         QETH_DBF_TEXT(trace, 6, "eddprctx");
90         while (!list_empty(&buf->ctx_list)){
91                 ref = list_entry(buf->ctx_list.next,
92                                  struct qeth_eddp_context_reference, list);
93                 qeth_eddp_put_context(ref->ctx);
94                 list_del(&ref->list);
95                 kfree(ref);
96         }
97 }
98
99 static inline int
100 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
101                           struct qeth_eddp_context *ctx)
102 {
103         struct qeth_eddp_context_reference *ref;
104
105         QETH_DBF_TEXT(trace, 6, "eddprfcx");
106         ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
107         if (ref == NULL)
108                 return -ENOMEM;
109         qeth_eddp_get_context(ctx);
110         ref->ctx = ctx;
111         list_add_tail(&ref->list, &buf->ctx_list);
112         return 0;
113 }
114
115 int
116 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
117                       struct qeth_eddp_context *ctx,
118                       int index)
119 {
120         struct qeth_qdio_out_buffer *buf = NULL;
121         struct qdio_buffer *buffer;
122         int elements = ctx->num_elements;
123         int element = 0;
124         int flush_cnt = 0;
125         int must_refcnt = 1;
126         int i;
127
128         QETH_DBF_TEXT(trace, 5, "eddpfibu");
129         while (elements > 0) {
130                 buf = &queue->bufs[index];
131                 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
132                         /* normally this should not happen since we checked for
133                          * available elements in qeth_check_elements_for_context
134                          */
135                         if (element == 0)
136                                 return -EBUSY;
137                         else {
138                                 PRINT_WARN("could only partially fill eddp "
139                                            "buffer!\n");
140                                 goto out;
141                         }
142                 }
143                 /* check if the whole next skb fits into current buffer */
144                 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
145                                         buf->next_element_to_fill)
146                                 < ctx->elements_per_skb){
147                         /* no -> go to next buffer */
148                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
149                         index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
150                         flush_cnt++;
151                         /* new buffer, so we have to add ctx to buffer'ctx_list
152                          * and increment ctx's refcnt */
153                         must_refcnt = 1;
154                         continue;
155                 }
156                 if (must_refcnt){
157                         must_refcnt = 0;
158                         if (qeth_eddp_buf_ref_context(buf, ctx)){
159                                 PRINT_WARN("no memory to create eddp context "
160                                            "reference\n");
161                                 goto out_check;
162                         }
163                 }
164                 buffer = buf->buffer;
165                 /* fill one skb into buffer */
166                 for (i = 0; i < ctx->elements_per_skb; ++i){
167                         buffer->element[buf->next_element_to_fill].addr =
168                                 ctx->elements[element].addr;
169                         buffer->element[buf->next_element_to_fill].length =
170                                 ctx->elements[element].length;
171                         buffer->element[buf->next_element_to_fill].flags =
172                                 ctx->elements[element].flags;
173                         buf->next_element_to_fill++;
174                         element++;
175                         elements--;
176                 }
177         }
178 out_check:
179         if (!queue->do_pack) {
180                 QETH_DBF_TEXT(trace, 6, "fillbfnp");
181                 /* set state to PRIMED -> will be flushed */
182                 if (buf->next_element_to_fill > 0){
183                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
184                         flush_cnt++;
185                 }
186         } else {
187 #ifdef CONFIG_QETH_PERF_STATS
188                 queue->card->perf_stats.skbs_sent_pack++;
189 #endif
190                 QETH_DBF_TEXT(trace, 6, "fillbfpa");
191                 if (buf->next_element_to_fill >=
192                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
193                         /*
194                          * packed buffer if full -> set state PRIMED
195                          * -> will be flushed
196                          */
197                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
198                         flush_cnt++;
199                 }
200         }
201 out:
202         return flush_cnt;
203 }
204
205 static inline void
206 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
207                               struct qeth_eddp_data *eddp)
208 {
209         u8 *page;
210         int page_remainder;
211         int page_offset;
212         int hdr_len;
213         struct qeth_eddp_element *element;
214
215         QETH_DBF_TEXT(trace, 5, "eddpcrsh");
216         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
217         page_offset = ctx->offset % PAGE_SIZE;
218         element = &ctx->elements[ctx->num_elements];
219         hdr_len = eddp->nhl + eddp->thl;
220         /* FIXME: layer2 and VLAN !!! */
221         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
222                 hdr_len += ETH_HLEN;
223         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
224                 hdr_len += VLAN_HLEN;
225         /* does complete header fit in current page ? */
226         page_remainder = PAGE_SIZE - page_offset;
227         if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
228                 /* no -> go to start of next page */
229                 ctx->offset += page_remainder;
230                 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
231                 page_offset = 0;
232         }
233         memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
234         element->addr = page + page_offset;
235         element->length = sizeof(struct qeth_hdr);
236         ctx->offset += sizeof(struct qeth_hdr);
237         page_offset += sizeof(struct qeth_hdr);
238         /* add mac header (?) */
239         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
240                 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
241                 element->length += ETH_HLEN;
242                 ctx->offset += ETH_HLEN;
243                 page_offset += ETH_HLEN;
244         }
245         /* add VLAN tag */
246         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
247                 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
248                 element->length += VLAN_HLEN;
249                 ctx->offset += VLAN_HLEN;
250                 page_offset += VLAN_HLEN;
251         }
252         /* add network header */
253         memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
254         element->length += eddp->nhl;
255         eddp->nh_in_ctx = page + page_offset;
256         ctx->offset += eddp->nhl;
257         page_offset += eddp->nhl;
258         /* add transport header */
259         memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
260         element->length += eddp->thl;
261         eddp->th_in_ctx = page + page_offset;
262         ctx->offset += eddp->thl;
263 }
264
265 static inline void
266 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
267                         u32 *hcsum)
268 {
269         struct skb_frag_struct *frag;
270         int left_in_frag;
271         int copy_len;
272         u8 *src;
273
274         QETH_DBF_TEXT(trace, 5, "eddpcdtc");
275         if (skb_shinfo(eddp->skb)->nr_frags == 0) {
276                 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
277                 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
278                                       *hcsum);
279                 eddp->skb_offset += len;
280         } else {
281                 while (len > 0) {
282                         if (eddp->frag < 0) {
283                                 /* we're in skb->data */
284                                 left_in_frag = qeth_get_skb_data_len(eddp->skb)
285                                                 - eddp->skb_offset;
286                                 src = eddp->skb->data + eddp->skb_offset;
287                         } else {
288                                 frag = &skb_shinfo(eddp->skb)->
289                                         frags[eddp->frag];
290                                 left_in_frag = frag->size - eddp->frag_offset;
291                                 src = (u8 *)(
292                                         (page_to_pfn(frag->page) << PAGE_SHIFT)+
293                                         frag->page_offset + eddp->frag_offset);
294                         }
295                         if (left_in_frag <= 0) {
296                                 eddp->frag++;
297                                 eddp->frag_offset = 0;
298                                 continue;
299                         }
300                         copy_len = min(left_in_frag, len);
301                         memcpy(dst, src, copy_len);
302                         *hcsum = csum_partial(src, copy_len, *hcsum);
303                         dst += copy_len;
304                         eddp->frag_offset += copy_len;
305                         eddp->skb_offset += copy_len;
306                         len -= copy_len;
307                 }
308         }
309 }
310
311 static inline void
312 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
313                                   struct qeth_eddp_data *eddp, int data_len,
314                                   u32 hcsum)
315 {
316         u8 *page;
317         int page_remainder;
318         int page_offset;
319         struct qeth_eddp_element *element;
320         int first_lap = 1;
321
322         QETH_DBF_TEXT(trace, 5, "eddpcsdt");
323         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
324         page_offset = ctx->offset % PAGE_SIZE;
325         element = &ctx->elements[ctx->num_elements];
326         while (data_len){
327                 page_remainder = PAGE_SIZE - page_offset;
328                 if (page_remainder < data_len){
329                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
330                                                 page_remainder, &hcsum);
331                         element->length += page_remainder;
332                         if (first_lap)
333                                 element->flags = SBAL_FLAGS_FIRST_FRAG;
334                         else
335                                 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
336                         ctx->num_elements++;
337                         element++;
338                         data_len -= page_remainder;
339                         ctx->offset += page_remainder;
340                         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
341                         page_offset = 0;
342                         element->addr = page + page_offset;
343                 } else {
344                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
345                                                 data_len, &hcsum);
346                         element->length += data_len;
347                         if (!first_lap)
348                                 element->flags = SBAL_FLAGS_LAST_FRAG;
349                         ctx->num_elements++;
350                         ctx->offset += data_len;
351                         data_len = 0;
352                 }
353                 first_lap = 0;
354         }
355         ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
356 }
357
358 static inline u32
359 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
360 {
361         u32 phcsum; /* pseudo header checksum */
362
363         QETH_DBF_TEXT(trace, 5, "eddpckt4");
364         eddp->th.tcp.h.check = 0;
365         /* compute pseudo header checksum */
366         phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
367                                     eddp->thl + data_len, IPPROTO_TCP, 0);
368         /* compute checksum of tcp header */
369         return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
370 }
371
372 static inline u32
373 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
374 {
375         u32 proto;
376         u32 phcsum; /* pseudo header checksum */
377
378         QETH_DBF_TEXT(trace, 5, "eddpckt6");
379         eddp->th.tcp.h.check = 0;
380         /* compute pseudo header checksum */
381         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
382                               sizeof(struct in6_addr), 0);
383         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
384                               sizeof(struct in6_addr), phcsum);
385         proto = htonl(IPPROTO_TCP);
386         phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
387         return phcsum;
388 }
389
390 static inline struct qeth_eddp_data *
391 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
392 {
393         struct qeth_eddp_data *eddp;
394
395         QETH_DBF_TEXT(trace, 5, "eddpcrda");
396         eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
397         if (eddp){
398                 memset(eddp, 0, sizeof(struct qeth_eddp_data));
399                 eddp->nhl = nhl;
400                 eddp->thl = thl;
401                 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
402                 memcpy(&eddp->nh, nh, nhl);
403                 memcpy(&eddp->th, th, thl);
404                 eddp->frag = -1; /* initially we're in skb->data */
405         }
406         return eddp;
407 }
408
409 static inline void
410 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
411                              struct qeth_eddp_data *eddp)
412 {
413         struct tcphdr *tcph;
414         int data_len;
415         u32 hcsum;
416
417         QETH_DBF_TEXT(trace, 5, "eddpftcp");
418         eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
419         tcph = eddp->skb->h.th;
420         while (eddp->skb_offset < eddp->skb->len) {
421                 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
422                                (int)(eddp->skb->len - eddp->skb_offset));
423                 /* prepare qdio hdr */
424                 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
425                         eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
426                                                      eddp->nhl + eddp->thl -
427                                                      sizeof(struct qeth_hdr);
428 #ifdef CONFIG_QETH_VLAN
429                         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
430                                 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
431 #endif /* CONFIG_QETH_VLAN */
432                 } else
433                         eddp->qh.hdr.l3.length = data_len + eddp->nhl +
434                                                  eddp->thl;
435                 /* prepare ip hdr */
436                 if (eddp->skb->protocol == ETH_P_IP){
437                         eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
438                                                  eddp->thl;
439                         eddp->nh.ip4.h.check = 0;
440                         eddp->nh.ip4.h.check =
441                                 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
442                                                 eddp->nh.ip4.h.ihl);
443                 } else
444                         eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
445                 /* prepare tcp hdr */
446                 if (data_len == (eddp->skb->len - eddp->skb_offset)){
447                         /* last segment -> set FIN and PSH flags */
448                         eddp->th.tcp.h.fin = tcph->fin;
449                         eddp->th.tcp.h.psh = tcph->psh;
450                 }
451                 if (eddp->skb->protocol == ETH_P_IP)
452                         hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
453                 else
454                         hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
455                 /* fill the next segment into the context */
456                 qeth_eddp_create_segment_hdrs(ctx, eddp);
457                 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
458                 if (eddp->skb_offset >= eddp->skb->len)
459                         break;
460                 /* prepare headers for next round */
461                 if (eddp->skb->protocol == ETH_P_IP)
462                         eddp->nh.ip4.h.id++;
463                 eddp->th.tcp.h.seq += data_len;
464         }
465 }
466
467 static inline int
468 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469                            struct sk_buff *skb, struct qeth_hdr *qhdr)
470 {
471         struct qeth_eddp_data *eddp = NULL;
472
473         QETH_DBF_TEXT(trace, 5, "eddpficx");
474         /* create our segmentation headers and copy original headers */
475         if (skb->protocol == ETH_P_IP)
476                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
477                                 skb->nh.iph->ihl*4,
478                                 (u8 *)skb->h.th, skb->h.th->doff*4);
479         else
480                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
481                                 sizeof(struct ipv6hdr),
482                                 (u8 *)skb->h.th, skb->h.th->doff*4);
483
484         if (eddp == NULL) {
485                 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
486                 return -ENOMEM;
487         }
488         if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
489                 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
490 #ifdef CONFIG_QETH_VLAN
491                 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
492                         eddp->vlan[0] = __constant_htons(skb->protocol);
493                         eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
494                 }
495 #endif /* CONFIG_QETH_VLAN */
496         }
497         /* the next flags will only be set on the last segment */
498         eddp->th.tcp.h.fin = 0;
499         eddp->th.tcp.h.psh = 0;
500         eddp->skb = skb;
501         /* begin segmentation and fill context */
502         __qeth_eddp_fill_context_tcp(ctx, eddp);
503         kfree(eddp);
504         return 0;
505 }
506
507 static inline void
508 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
509                          int hdr_len)
510 {
511         int skbs_per_page;
512
513         QETH_DBF_TEXT(trace, 5, "eddpcanp");
514         /* can we put multiple skbs in one page? */
515         skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
516         if (skbs_per_page > 1){
517                 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
518                                  skbs_per_page + 1;
519                 ctx->elements_per_skb = 1;
520         } else {
521                 /* no -> how many elements per skb? */
522                 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
523                                      PAGE_SIZE) >> PAGE_SHIFT;
524                 ctx->num_pages = ctx->elements_per_skb *
525                                  (skb_shinfo(skb)->tso_segs + 1);
526         }
527         ctx->num_elements = ctx->elements_per_skb *
528                             (skb_shinfo(skb)->tso_segs + 1);
529 }
530
531 static inline struct qeth_eddp_context *
532 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
533                                  int hdr_len)
534 {
535         struct qeth_eddp_context *ctx = NULL;
536         u8 *addr;
537         int i;
538
539         QETH_DBF_TEXT(trace, 5, "creddpcg");
540         /* create the context and allocate pages */
541         ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
542         if (ctx == NULL){
543                 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
544                 return NULL;
545         }
546         memset(ctx, 0, sizeof(struct qeth_eddp_context));
547         ctx->type = QETH_LARGE_SEND_EDDP;
548         qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
549         if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
550                 QETH_DBF_TEXT(trace, 2, "ceddpcis");
551                 kfree(ctx);
552                 return NULL;
553         }
554         ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
555         if (ctx->pages == NULL){
556                 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
557                 kfree(ctx);
558                 return NULL;
559         }
560         memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
561         for (i = 0; i < ctx->num_pages; ++i){
562                 addr = (u8 *)__get_free_page(GFP_ATOMIC);
563                 if (addr == NULL){
564                         QETH_DBF_TEXT(trace, 2, "ceddpcn3");
565                         ctx->num_pages = i;
566                         qeth_eddp_free_context(ctx);
567                         return NULL;
568                 }
569                 memset(addr, 0, PAGE_SIZE);
570                 ctx->pages[i] = addr;
571         }
572         ctx->elements = kmalloc(ctx->num_elements *
573                                 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
574         if (ctx->elements == NULL){
575                 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
576                 qeth_eddp_free_context(ctx);
577                 return NULL;
578         }
579         memset(ctx->elements, 0,
580                ctx->num_elements * sizeof(struct qeth_eddp_element));
581         /* reset num_elements; will be incremented again in fill_buffer to
582          * reflect number of actually used elements */
583         ctx->num_elements = 0;
584         return ctx;
585 }
586
587 static inline struct qeth_eddp_context *
588 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
589                              struct qeth_hdr *qhdr)
590 {
591         struct qeth_eddp_context *ctx = NULL;
592
593         QETH_DBF_TEXT(trace, 5, "creddpct");
594         if (skb->protocol == ETH_P_IP)
595                 ctx = qeth_eddp_create_context_generic(card, skb,
596                         sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
597                         skb->h.th->doff*4);
598         else if (skb->protocol == ETH_P_IPV6)
599                 ctx = qeth_eddp_create_context_generic(card, skb,
600                         sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
601                         skb->h.th->doff*4);
602         else
603                 QETH_DBF_TEXT(trace, 2, "cetcpinv");
604
605         if (ctx == NULL) {
606                 QETH_DBF_TEXT(trace, 2, "creddpnl");
607                 return NULL;
608         }
609         if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
610                 QETH_DBF_TEXT(trace, 2, "ceddptfe");
611                 qeth_eddp_free_context(ctx);
612                 return NULL;
613         }
614         atomic_set(&ctx->refcnt, 1);
615         return ctx;
616 }
617
618 struct qeth_eddp_context *
619 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
620                          struct qeth_hdr *qhdr)
621 {
622         QETH_DBF_TEXT(trace, 5, "creddpc");
623         switch (skb->sk->sk_protocol){
624         case IPPROTO_TCP:
625                 return qeth_eddp_create_context_tcp(card, skb, qhdr);
626         default:
627                 QETH_DBF_TEXT(trace, 2, "eddpinvp");
628         }
629         return NULL;
630 }
631
632