Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa...
[linux-2.6] / drivers / net / wireless / wl12xx / tx.c
1 /*
2  * This file is part of wl12xx
3  *
4  * Copyright (c) 1998-2007 Texas Instruments Incorporated
5  * Copyright (C) 2008 Nokia Corporation
6  *
7  * Contact: Kalle Valo <kalle.valo@nokia.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27
28 #include "wl12xx.h"
29 #include "reg.h"
30 #include "spi.h"
31 #include "tx.h"
32 #include "ps.h"
33
34 static bool wl12xx_tx_double_buffer_busy(struct wl12xx *wl, u32 data_out_count)
35 {
36         int used, data_in_count;
37
38         data_in_count = wl->data_in_count;
39
40         if (data_in_count < data_out_count)
41                 /* data_in_count has wrapped */
42                 data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
43
44         used = data_in_count - data_out_count;
45
46         WARN_ON(used < 0);
47         WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
48
49         if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
50                 return true;
51         else
52                 return false;
53 }
54
55 static int wl12xx_tx_path_status(struct wl12xx *wl)
56 {
57         u32 status, addr, data_out_count;
58         bool busy;
59
60         addr = wl->data_path->tx_control_addr;
61         status = wl12xx_mem_read32(wl, addr);
62         data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
63         busy = wl12xx_tx_double_buffer_busy(wl, data_out_count);
64
65         if (busy)
66                 return -EBUSY;
67
68         return 0;
69 }
70
71 static int wl12xx_tx_id(struct wl12xx *wl, struct sk_buff *skb)
72 {
73         int i;
74
75         for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
76                 if (wl->tx_frames[i] == NULL) {
77                         wl->tx_frames[i] = skb;
78                         return i;
79                 }
80
81         return -EBUSY;
82 }
83
84 static void wl12xx_tx_control(struct tx_double_buffer_desc *tx_hdr,
85                               struct ieee80211_tx_info *control, u16 fc)
86 {
87         *(u16 *)&tx_hdr->control = 0;
88
89         tx_hdr->control.rate_policy = 0;
90
91         /* 802.11 packets */
92         tx_hdr->control.packet_type = 0;
93
94         if (control->flags & IEEE80211_TX_CTL_NO_ACK)
95                 tx_hdr->control.ack_policy = 1;
96
97         tx_hdr->control.tx_complete = 1;
98
99         if ((fc & IEEE80211_FTYPE_DATA) &&
100             ((fc & IEEE80211_STYPE_QOS_DATA) ||
101              (fc & IEEE80211_STYPE_QOS_NULLFUNC)))
102                 tx_hdr->control.qos = 1;
103 }
104
105 /* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
106 #define MAX_MSDU_SECURITY_LENGTH      16
107 #define MAX_MPDU_SECURITY_LENGTH      16
108 #define WLAN_QOS_HDR_LEN              26
109 #define MAX_MPDU_HEADER_AND_SECURITY  (MAX_MPDU_SECURITY_LENGTH + \
110                                        WLAN_QOS_HDR_LEN)
111 #define HW_BLOCK_SIZE                 252
112 static void wl12xx_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
113 {
114         u16 payload_len, frag_threshold, mem_blocks;
115         u16 num_mpdus, mem_blocks_per_frag;
116
117         frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
118         tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
119
120         payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH;
121
122         if (payload_len > frag_threshold) {
123                 mem_blocks_per_frag =
124                         ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
125                          HW_BLOCK_SIZE) + 1;
126                 num_mpdus = payload_len / frag_threshold;
127                 mem_blocks = num_mpdus * mem_blocks_per_frag;
128                 payload_len -= num_mpdus * frag_threshold;
129                 num_mpdus++;
130
131         } else {
132                 mem_blocks_per_frag = 0;
133                 mem_blocks = 0;
134                 num_mpdus = 1;
135         }
136
137         mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
138
139         if (num_mpdus > 1)
140                 mem_blocks += min(num_mpdus, mem_blocks_per_frag);
141
142         tx_hdr->num_mem_blocks = mem_blocks;
143 }
144
145 static int wl12xx_tx_fill_hdr(struct wl12xx *wl, struct sk_buff *skb,
146                               struct ieee80211_tx_info *control)
147 {
148         struct tx_double_buffer_desc *tx_hdr;
149         struct ieee80211_rate *rate;
150         int id;
151         u16 fc;
152
153         if (!skb)
154                 return -EINVAL;
155
156         id = wl12xx_tx_id(wl, skb);
157         if (id < 0)
158                 return id;
159
160         fc = *(u16 *)skb->data;
161         tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb,
162                                                            sizeof(*tx_hdr));
163
164         tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
165         rate = ieee80211_get_tx_rate(wl->hw, control);
166         tx_hdr->rate = cpu_to_le16(rate->hw_value);
167         tx_hdr->expiry_time = cpu_to_le32(1 << 16);
168         tx_hdr->id = id;
169
170         /* FIXME: how to get the correct queue id? */
171         tx_hdr->xmit_queue = 0;
172
173         wl12xx_tx_control(tx_hdr, control, fc);
174         wl12xx_tx_frag_block_num(tx_hdr);
175
176         return 0;
177 }
178
179 /* We copy the packet to the target */
180 static int wl12xx_tx_send_packet(struct wl12xx *wl, struct sk_buff *skb,
181                                  struct ieee80211_tx_info *control)
182 {
183         struct tx_double_buffer_desc *tx_hdr;
184         int len;
185         u32 addr;
186
187         if (!skb)
188                 return -EINVAL;
189
190         tx_hdr = (struct tx_double_buffer_desc *) skb->data;
191
192         if (control->control.hw_key &&
193             control->control.hw_key->alg == ALG_TKIP) {
194                 int hdrlen;
195                 u16 fc;
196                 u8 *pos;
197
198                 fc = *(u16 *)(skb->data + sizeof(*tx_hdr));
199                 tx_hdr->length += WL12XX_TKIP_IV_SPACE;
200
201                 hdrlen = ieee80211_hdrlen(fc);
202
203                 pos = skb_push(skb, WL12XX_TKIP_IV_SPACE);
204                 memmove(pos, pos + WL12XX_TKIP_IV_SPACE,
205                         sizeof(*tx_hdr) + hdrlen);
206         }
207
208         /* Revisit. This is a workaround for getting non-aligned packets.
209            This happens at least with EAPOL packets from the user space.
210            Our DMA requires packets to be aligned on a 4-byte boundary.
211         */
212         if (unlikely((long)skb->data & 0x03)) {
213                 int offset = (4 - (long)skb->data) & 0x03;
214                 wl12xx_debug(DEBUG_TX, "skb offset %d", offset);
215
216                 /* check whether the current skb can be used */
217                 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
218                         unsigned char *src = skb->data;
219
220                         /* align the buffer on a 4-byte boundary */
221                         skb_reserve(skb, offset);
222                         memmove(skb->data, src, skb->len);
223                 } else {
224                         wl12xx_info("No handler, fixme!");
225                         return -EINVAL;
226                 }
227         }
228
229         /* Our skb->data at this point includes the HW header */
230         len = WL12XX_TX_ALIGN(skb->len);
231
232         if (wl->data_in_count & 0x1)
233                 addr = wl->data_path->tx_packet_ring_addr +
234                         wl->data_path->tx_packet_ring_chunk_size;
235         else
236                 addr = wl->data_path->tx_packet_ring_addr;
237
238         wl12xx_spi_mem_write(wl, addr, skb->data, len);
239
240         wl12xx_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
241                      tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
242
243         return 0;
244 }
245
246 static void wl12xx_tx_trigger(struct wl12xx *wl)
247 {
248         u32 data, addr;
249
250         if (wl->data_in_count & 0x1) {
251                 addr = ACX_REG_INTERRUPT_TRIG_H;
252                 data = INTR_TRIG_TX_PROC1;
253         } else {
254                 addr = ACX_REG_INTERRUPT_TRIG;
255                 data = INTR_TRIG_TX_PROC0;
256         }
257
258         wl12xx_reg_write32(wl, addr, data);
259
260         /* Bumping data in */
261         wl->data_in_count = (wl->data_in_count + 1) &
262                 TX_STATUS_DATA_OUT_COUNT_MASK;
263 }
264
265 /* caller must hold wl->mutex */
266 static int wl12xx_tx_frame(struct wl12xx *wl, struct sk_buff *skb)
267 {
268         struct ieee80211_tx_info *info;
269         int ret = 0;
270         u8 idx;
271
272         info = IEEE80211_SKB_CB(skb);
273
274         if (info->control.hw_key) {
275                 idx = info->control.hw_key->hw_key_idx;
276                 if (unlikely(wl->default_key != idx)) {
277                         ret = wl12xx_acx_default_key(wl, idx);
278                         if (ret < 0)
279                                 return ret;
280                 }
281         }
282
283         ret = wl12xx_tx_path_status(wl);
284         if (ret < 0)
285                 return ret;
286
287         ret = wl12xx_tx_fill_hdr(wl, skb, info);
288         if (ret < 0)
289                 return ret;
290
291         ret = wl12xx_tx_send_packet(wl, skb, info);
292         if (ret < 0)
293                 return ret;
294
295         wl12xx_tx_trigger(wl);
296
297         return ret;
298 }
299
300 void wl12xx_tx_work(struct work_struct *work)
301 {
302         struct wl12xx *wl = container_of(work, struct wl12xx, tx_work);
303         struct sk_buff *skb;
304         bool woken_up = false;
305         int ret;
306
307         mutex_lock(&wl->mutex);
308
309         if (unlikely(wl->state == WL12XX_STATE_OFF))
310                 goto out;
311
312         while ((skb = skb_dequeue(&wl->tx_queue))) {
313                 if (!woken_up) {
314                         wl12xx_ps_elp_wakeup(wl);
315                         woken_up = true;
316                 }
317
318                 ret = wl12xx_tx_frame(wl, skb);
319                 if (ret == -EBUSY) {
320                         /* firmware buffer is full, stop queues */
321                         wl12xx_debug(DEBUG_TX, "tx_work: fw buffer full, "
322                                      "stop queues");
323                         ieee80211_stop_queues(wl->hw);
324                         wl->tx_queue_stopped = true;
325                         skb_queue_head(&wl->tx_queue, skb);
326                         goto out;
327                 } else if (ret < 0) {
328                         dev_kfree_skb(skb);
329                         goto out;
330                 }
331         }
332
333 out:
334         if (woken_up)
335                 wl12xx_ps_elp_sleep(wl);
336
337         mutex_unlock(&wl->mutex);
338 }
339
340 static const char *wl12xx_tx_parse_status(u8 status)
341 {
342         /* 8 bit status field, one character per bit plus null */
343         static char buf[9];
344         int i = 0;
345
346         memset(buf, 0, sizeof(buf));
347
348         if (status & TX_DMA_ERROR)
349                 buf[i++] = 'm';
350         if (status & TX_DISABLED)
351                 buf[i++] = 'd';
352         if (status & TX_RETRY_EXCEEDED)
353                 buf[i++] = 'r';
354         if (status & TX_TIMEOUT)
355                 buf[i++] = 't';
356         if (status & TX_KEY_NOT_FOUND)
357                 buf[i++] = 'k';
358         if (status & TX_ENCRYPT_FAIL)
359                 buf[i++] = 'e';
360         if (status & TX_UNAVAILABLE_PRIORITY)
361                 buf[i++] = 'p';
362
363         /* bit 0 is unused apparently */
364
365         return buf;
366 }
367
368 static void wl12xx_tx_packet_cb(struct wl12xx *wl,
369                                 struct tx_result *result)
370 {
371         struct ieee80211_tx_info *info;
372         struct sk_buff *skb;
373         int hdrlen, ret;
374         u8 *frame;
375
376         skb = wl->tx_frames[result->id];
377         if (skb == NULL) {
378                 wl12xx_error("SKB for packet %d is NULL", result->id);
379                 return;
380         }
381
382         info = IEEE80211_SKB_CB(skb);
383
384         if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
385             (result->status == TX_SUCCESS))
386                 info->flags |= IEEE80211_TX_STAT_ACK;
387
388         info->status.rates[0].count = result->ack_failures + 1;
389         wl->stats.retry_count += result->ack_failures;
390
391         /*
392          * We have to remove our private TX header before pushing
393          * the skb back to mac80211.
394          */
395         frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
396         if (info->control.hw_key &&
397             info->control.hw_key->alg == ALG_TKIP) {
398                 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
399                 memmove(frame + WL12XX_TKIP_IV_SPACE, frame, hdrlen);
400                 skb_pull(skb, WL12XX_TKIP_IV_SPACE);
401         }
402
403         wl12xx_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
404                      " status 0x%x (%s)",
405                      result->id, skb, result->ack_failures, result->rate,
406                      result->status, wl12xx_tx_parse_status(result->status));
407
408
409         ieee80211_tx_status(wl->hw, skb);
410
411         wl->tx_frames[result->id] = NULL;
412
413         if (wl->tx_queue_stopped) {
414                 wl12xx_debug(DEBUG_TX, "cb: queue was stopped");
415
416                 skb = skb_dequeue(&wl->tx_queue);
417
418                 /* The skb can be NULL because tx_work might have been
419                    scheduled before the queue was stopped making the
420                    queue empty */
421
422                 if (skb) {
423                         ret = wl12xx_tx_frame(wl, skb);
424                         if (ret == -EBUSY) {
425                                 /* firmware buffer is still full */
426                                 wl12xx_debug(DEBUG_TX, "cb: fw buffer "
427                                              "still full");
428                                 skb_queue_head(&wl->tx_queue, skb);
429                                 return;
430                         } else if (ret < 0) {
431                                 dev_kfree_skb(skb);
432                                 return;
433                         }
434                 }
435
436                 wl12xx_debug(DEBUG_TX, "cb: waking queues");
437                 ieee80211_wake_queues(wl->hw);
438                 wl->tx_queue_stopped = false;
439         }
440 }
441
442 /* Called upon reception of a TX complete interrupt */
443 void wl12xx_tx_complete(struct wl12xx *wl)
444 {
445         int i, result_index, num_complete = 0;
446         struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
447
448         if (unlikely(wl->state != WL12XX_STATE_ON))
449                 return;
450
451         /* First we read the result */
452         wl12xx_spi_mem_read(wl, wl->data_path->tx_complete_addr,
453                             result, sizeof(result));
454
455         result_index = wl->next_tx_complete;
456
457         for (i = 0; i < ARRAY_SIZE(result); i++) {
458                 result_ptr = &result[result_index];
459
460                 if (result_ptr->done_1 == 1 &&
461                     result_ptr->done_2 == 1) {
462                         wl12xx_tx_packet_cb(wl, result_ptr);
463
464                         result_ptr->done_1 = 0;
465                         result_ptr->done_2 = 0;
466
467                         result_index = (result_index + 1) &
468                                 (FW_TX_CMPLT_BLOCK_SIZE - 1);
469                         num_complete++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* Every completed frame needs to be acknowledged */
476         if (num_complete) {
477                 /*
478                  * If we've wrapped, we have to clear
479                  * the results in 2 steps.
480                  */
481                 if (result_index > wl->next_tx_complete) {
482                         /* Only 1 write is needed */
483                         wl12xx_spi_mem_write(wl,
484                                              wl->data_path->tx_complete_addr +
485                                              (wl->next_tx_complete *
486                                               sizeof(struct tx_result)),
487                                              &result[wl->next_tx_complete],
488                                              num_complete *
489                                              sizeof(struct tx_result));
490
491
492                 } else if (result_index < wl->next_tx_complete) {
493                         /* 2 writes are needed */
494                         wl12xx_spi_mem_write(wl,
495                                              wl->data_path->tx_complete_addr +
496                                              (wl->next_tx_complete *
497                                               sizeof(struct tx_result)),
498                                              &result[wl->next_tx_complete],
499                                              (FW_TX_CMPLT_BLOCK_SIZE -
500                                               wl->next_tx_complete) *
501                                              sizeof(struct tx_result));
502
503                         wl12xx_spi_mem_write(wl,
504                                              wl->data_path->tx_complete_addr,
505                                              result,
506                                              (num_complete -
507                                               FW_TX_CMPLT_BLOCK_SIZE +
508                                               wl->next_tx_complete) *
509                                              sizeof(struct tx_result));
510
511                 } else {
512                         /* We have to write the whole array */
513                         wl12xx_spi_mem_write(wl,
514                                              wl->data_path->tx_complete_addr,
515                                              result,
516                                              FW_TX_CMPLT_BLOCK_SIZE *
517                                              sizeof(struct tx_result));
518                 }
519
520         }
521
522         wl->next_tx_complete = result_index;
523 }
524
525 /* caller must hold wl->mutex */
526 void wl12xx_tx_flush(struct wl12xx *wl)
527 {
528         int i;
529         struct sk_buff *skb;
530         struct ieee80211_tx_info *info;
531
532         /* TX failure */
533 /*      control->flags = 0; FIXME */
534
535         while ((skb = skb_dequeue(&wl->tx_queue))) {
536                 info = IEEE80211_SKB_CB(skb);
537
538                 wl12xx_debug(DEBUG_TX, "flushing skb 0x%p", skb);
539
540                 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
541                                 continue;
542
543                 ieee80211_tx_status(wl->hw, skb);
544         }
545
546         for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
547                 if (wl->tx_frames[i] != NULL) {
548                         skb = wl->tx_frames[i];
549                         info = IEEE80211_SKB_CB(skb);
550
551                         if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
552                                 continue;
553
554                         ieee80211_tx_status(wl->hw, skb);
555                         wl->tx_frames[i] = NULL;
556                 }
557 }