Merge branch 'x86/fpu' into x86/urgent
[linux-2.6] / net / mac80211 / wme.c
1 /*
2  * Copyright 2004, Instant802 Networks, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/module.h>
12 #include <linux/if_arp.h>
13 #include <linux/types.h>
14 #include <net/ip.h>
15 #include <net/pkt_sched.h>
16
17 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
19 #include "wme.h"
20
21 /* Default mapping in classifier to work with default
22  * queue setup.
23  */
24 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25
26 static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
27
28 /* Given a data frame determine the 802.1p/1d tag to use.  */
29 static unsigned int classify_1d(struct sk_buff *skb)
30 {
31         unsigned int dscp;
32
33         /* skb->priority values from 256->263 are magic values to
34          * directly indicate a specific 802.1d priority.  This is used
35          * to allow 802.1d priority to be passed directly in from VLAN
36          * tags, etc.
37          */
38         if (skb->priority >= 256 && skb->priority <= 263)
39                 return skb->priority - 256;
40
41         switch (skb->protocol) {
42         case __constant_htons(ETH_P_IP):
43                 dscp = ip_hdr(skb)->tos & 0xfc;
44                 break;
45
46         default:
47                 return 0;
48         }
49
50         if (dscp & 0x1c)
51                 return 0;
52         return dscp >> 5;
53 }
54
55
56 static int wme_downgrade_ac(struct sk_buff *skb)
57 {
58         switch (skb->priority) {
59         case 6:
60         case 7:
61                 skb->priority = 5; /* VO -> VI */
62                 return 0;
63         case 4:
64         case 5:
65                 skb->priority = 3; /* VI -> BE */
66                 return 0;
67         case 0:
68         case 3:
69                 skb->priority = 2; /* BE -> BK */
70                 return 0;
71         default:
72                 return -1;
73         }
74 }
75
76
77 /* Indicate which queue to use.  */
78 static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
79 {
80         struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
81         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
82
83         if (!ieee80211_is_data(hdr->frame_control)) {
84                 /* management frames go on AC_VO queue, but are sent
85                 * without QoS control fields */
86                 return 0;
87         }
88
89         if (0 /* injected */) {
90                 /* use AC from radiotap */
91         }
92
93         if (!ieee80211_is_data_qos(hdr->frame_control)) {
94                 skb->priority = 0; /* required for correct WPA/11i MIC */
95                 return ieee802_1d_to_ac[skb->priority];
96         }
97
98         /* use the data classifier to determine what 802.1d tag the
99          * data frame has */
100         skb->priority = classify_1d(skb);
101
102         /* in case we are a client verify acm is not set for this ac */
103         while (unlikely(local->wmm_acm & BIT(skb->priority))) {
104                 if (wme_downgrade_ac(skb)) {
105                         /* The old code would drop the packet in this
106                          * case.
107                          */
108                         return 0;
109                 }
110         }
111
112         /* look up which queue to use for frames with this 1d tag */
113         return ieee802_1d_to_ac[skb->priority];
114 }
115
116 u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
117 {
118         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
119         struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
120         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
121         struct sta_info *sta;
122         u16 queue;
123         u8 tid;
124
125         queue = classify80211(skb, dev);
126         if (unlikely(queue >= local->hw.queues))
127                 queue = local->hw.queues - 1;
128
129         if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
130                 rcu_read_lock();
131                 sta = sta_info_get(local, hdr->addr1);
132                 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
133                 if (sta) {
134                         struct ieee80211_hw *hw = &local->hw;
135                         int ampdu_queue = sta->tid_to_tx_q[tid];
136
137                         if ((ampdu_queue < ieee80211_num_queues(hw)) &&
138                             test_bit(ampdu_queue, local->queue_pool)) {
139                                 queue = ampdu_queue;
140                                 info->flags |= IEEE80211_TX_CTL_AMPDU;
141                         } else {
142                                 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
143                         }
144                 }
145                 rcu_read_unlock();
146
147                 return queue;
148         }
149
150         /* Now we know the 1d priority, fill in the QoS header if
151          * there is one.
152          */
153         if (ieee80211_is_data_qos(hdr->frame_control)) {
154                 u8 *p = ieee80211_get_qos_ctl(hdr);
155                 u8 ack_policy = 0;
156                 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
157                 if (local->wifi_wme_noack_test)
158                         ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
159                                         QOS_CONTROL_ACK_POLICY_SHIFT;
160                 /* qos header is 2 bytes, second reserved */
161                 *p++ = ack_policy | tid;
162                 *p = 0;
163
164                 rcu_read_lock();
165
166                 sta = sta_info_get(local, hdr->addr1);
167                 if (sta) {
168                         int ampdu_queue = sta->tid_to_tx_q[tid];
169                         struct ieee80211_hw *hw = &local->hw;
170
171                         if ((ampdu_queue < ieee80211_num_queues(hw)) &&
172                             test_bit(ampdu_queue, local->queue_pool)) {
173                                 queue = ampdu_queue;
174                                 info->flags |= IEEE80211_TX_CTL_AMPDU;
175                         } else {
176                                 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
177                         }
178                 }
179
180                 rcu_read_unlock();
181         }
182
183         return queue;
184 }
185
186 int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
187                                struct sta_info *sta, u16 tid)
188 {
189         int i;
190
191         /* XXX: currently broken due to cb/requeue use */
192         return -EPERM;
193
194         /* prepare the filter and save it for the SW queue
195          * matching the received HW queue */
196
197         if (!local->hw.ampdu_queues)
198                 return -EPERM;
199
200         /* try to get a Qdisc from the pool */
201         for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
202                 if (!test_and_set_bit(i, local->queue_pool)) {
203                         ieee80211_stop_queue(local_to_hw(local), i);
204                         sta->tid_to_tx_q[tid] = i;
205
206                         /* IF there are already pending packets
207                          * on this tid first we need to drain them
208                          * on the previous queue
209                          * since HT is strict in order */
210 #ifdef CONFIG_MAC80211_HT_DEBUG
211                         if (net_ratelimit()) {
212                                 DECLARE_MAC_BUF(mac);
213                                 printk(KERN_DEBUG "allocated aggregation queue"
214                                         " %d tid %d addr %s pool=0x%lX\n",
215                                         i, tid, print_mac(mac, sta->addr),
216                                         local->queue_pool[0]);
217                         }
218 #endif /* CONFIG_MAC80211_HT_DEBUG */
219                         return 0;
220                 }
221
222         return -EAGAIN;
223 }
224
225 /**
226  * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
227  */
228 void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
229                                    struct sta_info *sta, u16 tid,
230                                    u8 requeue)
231 {
232         int agg_queue = sta->tid_to_tx_q[tid];
233         struct ieee80211_hw *hw = &local->hw;
234
235         /* return the qdisc to the pool */
236         clear_bit(agg_queue, local->queue_pool);
237         sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
238
239         if (requeue) {
240                 ieee80211_requeue(local, agg_queue);
241         } else {
242                 struct netdev_queue *txq;
243                 spinlock_t *root_lock;
244                 struct Qdisc *q;
245
246                 txq = netdev_get_tx_queue(local->mdev, agg_queue);
247                 q = rcu_dereference(txq->qdisc);
248                 root_lock = qdisc_lock(q);
249
250                 spin_lock_bh(root_lock);
251                 qdisc_reset(q);
252                 spin_unlock_bh(root_lock);
253         }
254 }
255
256 void ieee80211_requeue(struct ieee80211_local *local, int queue)
257 {
258         struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
259         struct sk_buff_head list;
260         spinlock_t *root_lock;
261         struct Qdisc *qdisc;
262         u32 len;
263
264         rcu_read_lock_bh();
265
266         qdisc = rcu_dereference(txq->qdisc);
267         if (!qdisc || !qdisc->dequeue)
268                 goto out_unlock;
269
270         skb_queue_head_init(&list);
271
272         root_lock = qdisc_root_lock(qdisc);
273         spin_lock(root_lock);
274         for (len = qdisc->q.qlen; len > 0; len--) {
275                 struct sk_buff *skb = qdisc->dequeue(qdisc);
276
277                 if (skb)
278                         __skb_queue_tail(&list, skb);
279         }
280         spin_unlock(root_lock);
281
282         for (len = list.qlen; len > 0; len--) {
283                 struct sk_buff *skb = __skb_dequeue(&list);
284                 u16 new_queue;
285
286                 BUG_ON(!skb);
287                 new_queue = ieee80211_select_queue(local->mdev, skb);
288                 skb_set_queue_mapping(skb, new_queue);
289
290                 txq = netdev_get_tx_queue(local->mdev, new_queue);
291
292
293                 qdisc = rcu_dereference(txq->qdisc);
294                 root_lock = qdisc_root_lock(qdisc);
295
296                 spin_lock(root_lock);
297                 qdisc_enqueue_root(skb, qdisc);
298                 spin_unlock(root_lock);
299         }
300
301 out_unlock:
302         rcu_read_unlock_bh();
303 }