Merge branches 'x86/xen', 'x86/build', 'x86/microcode', 'x86/mm-debug-v2', 'x86/memor...
[linux-2.6] / net / mac80211 / wme.c
1 /*
2  * Copyright 2004, Instant802 Networks, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/module.h>
12 #include <linux/if_arp.h>
13 #include <linux/types.h>
14 #include <net/ip.h>
15 #include <net/pkt_sched.h>
16
17 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
19 #include "wme.h"
20
21 /* Default mapping in classifier to work with default
22  * queue setup.
23  */
24 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25
26 static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
27
28 /* Given a data frame determine the 802.1p/1d tag to use.  */
29 static unsigned int classify_1d(struct sk_buff *skb)
30 {
31         unsigned int dscp;
32
33         /* skb->priority values from 256->263 are magic values to
34          * directly indicate a specific 802.1d priority.  This is used
35          * to allow 802.1d priority to be passed directly in from VLAN
36          * tags, etc.
37          */
38         if (skb->priority >= 256 && skb->priority <= 263)
39                 return skb->priority - 256;
40
41         switch (skb->protocol) {
42         case htons(ETH_P_IP):
43                 dscp = ip_hdr(skb)->tos & 0xfc;
44                 break;
45
46         default:
47                 return 0;
48         }
49
50         return dscp >> 5;
51 }
52
53
54 static int wme_downgrade_ac(struct sk_buff *skb)
55 {
56         switch (skb->priority) {
57         case 6:
58         case 7:
59                 skb->priority = 5; /* VO -> VI */
60                 return 0;
61         case 4:
62         case 5:
63                 skb->priority = 3; /* VI -> BE */
64                 return 0;
65         case 0:
66         case 3:
67                 skb->priority = 2; /* BE -> BK */
68                 return 0;
69         default:
70                 return -1;
71         }
72 }
73
74
75 /* Indicate which queue to use.  */
76 static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
77 {
78         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
79
80         if (!ieee80211_is_data(hdr->frame_control)) {
81                 /* management frames go on AC_VO queue, but are sent
82                 * without QoS control fields */
83                 return 0;
84         }
85
86         if (0 /* injected */) {
87                 /* use AC from radiotap */
88         }
89
90         if (!ieee80211_is_data_qos(hdr->frame_control)) {
91                 skb->priority = 0; /* required for correct WPA/11i MIC */
92                 return ieee802_1d_to_ac[skb->priority];
93         }
94
95         /* use the data classifier to determine what 802.1d tag the
96          * data frame has */
97         skb->priority = classify_1d(skb);
98
99         /* in case we are a client verify acm is not set for this ac */
100         while (unlikely(local->wmm_acm & BIT(skb->priority))) {
101                 if (wme_downgrade_ac(skb)) {
102                         /* The old code would drop the packet in this
103                          * case.
104                          */
105                         return 0;
106                 }
107         }
108
109         /* look up which queue to use for frames with this 1d tag */
110         return ieee802_1d_to_ac[skb->priority];
111 }
112
113 u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
114 {
115         struct ieee80211_master_priv *mpriv = netdev_priv(dev);
116         struct ieee80211_local *local = mpriv->local;
117         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
118         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
119         struct sta_info *sta;
120         u16 queue;
121         u8 tid;
122
123         queue = classify80211(local, skb);
124         if (unlikely(queue >= local->hw.queues))
125                 queue = local->hw.queues - 1;
126
127         if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
128                 rcu_read_lock();
129                 sta = sta_info_get(local, hdr->addr1);
130                 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
131                 if (sta) {
132                         struct ieee80211_hw *hw = &local->hw;
133                         int ampdu_queue = sta->tid_to_tx_q[tid];
134
135                         if ((ampdu_queue < ieee80211_num_queues(hw)) &&
136                             test_bit(ampdu_queue, local->queue_pool)) {
137                                 queue = ampdu_queue;
138                                 info->flags |= IEEE80211_TX_CTL_AMPDU;
139                         } else {
140                                 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
141                         }
142                 }
143                 rcu_read_unlock();
144
145                 return queue;
146         }
147
148         /* Now we know the 1d priority, fill in the QoS header if
149          * there is one.
150          */
151         if (ieee80211_is_data_qos(hdr->frame_control)) {
152                 u8 *p = ieee80211_get_qos_ctl(hdr);
153                 u8 ack_policy = 0;
154                 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
155                 if (local->wifi_wme_noack_test)
156                         ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
157                                         QOS_CONTROL_ACK_POLICY_SHIFT;
158                 /* qos header is 2 bytes, second reserved */
159                 *p++ = ack_policy | tid;
160                 *p = 0;
161
162                 rcu_read_lock();
163
164                 sta = sta_info_get(local, hdr->addr1);
165                 if (sta) {
166                         int ampdu_queue = sta->tid_to_tx_q[tid];
167                         struct ieee80211_hw *hw = &local->hw;
168
169                         if ((ampdu_queue < ieee80211_num_queues(hw)) &&
170                             test_bit(ampdu_queue, local->queue_pool)) {
171                                 queue = ampdu_queue;
172                                 info->flags |= IEEE80211_TX_CTL_AMPDU;
173                         } else {
174                                 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
175                         }
176                 }
177
178                 rcu_read_unlock();
179         }
180
181         return queue;
182 }
183
184 int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
185                                struct sta_info *sta, u16 tid)
186 {
187         int i;
188
189         /* XXX: currently broken due to cb/requeue use */
190         return -EPERM;
191
192         /* prepare the filter and save it for the SW queue
193          * matching the received HW queue */
194
195         if (!local->hw.ampdu_queues)
196                 return -EPERM;
197
198         /* try to get a Qdisc from the pool */
199         for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
200                 if (!test_and_set_bit(i, local->queue_pool)) {
201                         ieee80211_stop_queue(local_to_hw(local), i);
202                         sta->tid_to_tx_q[tid] = i;
203
204                         /* IF there are already pending packets
205                          * on this tid first we need to drain them
206                          * on the previous queue
207                          * since HT is strict in order */
208 #ifdef CONFIG_MAC80211_HT_DEBUG
209                         if (net_ratelimit()) {
210                                 DECLARE_MAC_BUF(mac);
211                                 printk(KERN_DEBUG "allocated aggregation queue"
212                                         " %d tid %d addr %s pool=0x%lX\n",
213                                         i, tid, print_mac(mac, sta->sta.addr),
214                                         local->queue_pool[0]);
215                         }
216 #endif /* CONFIG_MAC80211_HT_DEBUG */
217                         return 0;
218                 }
219
220         return -EAGAIN;
221 }
222
223 /**
224  * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
225  */
226 void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
227                                    struct sta_info *sta, u16 tid,
228                                    u8 requeue)
229 {
230         int agg_queue = sta->tid_to_tx_q[tid];
231         struct ieee80211_hw *hw = &local->hw;
232
233         /* return the qdisc to the pool */
234         clear_bit(agg_queue, local->queue_pool);
235         sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
236
237         if (requeue) {
238                 ieee80211_requeue(local, agg_queue);
239         } else {
240                 struct netdev_queue *txq;
241                 spinlock_t *root_lock;
242                 struct Qdisc *q;
243
244                 txq = netdev_get_tx_queue(local->mdev, agg_queue);
245                 q = rcu_dereference(txq->qdisc);
246                 root_lock = qdisc_lock(q);
247
248                 spin_lock_bh(root_lock);
249                 qdisc_reset(q);
250                 spin_unlock_bh(root_lock);
251         }
252 }
253
254 void ieee80211_requeue(struct ieee80211_local *local, int queue)
255 {
256         struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
257         struct sk_buff_head list;
258         spinlock_t *root_lock;
259         struct Qdisc *qdisc;
260         u32 len;
261
262         rcu_read_lock_bh();
263
264         qdisc = rcu_dereference(txq->qdisc);
265         if (!qdisc || !qdisc->dequeue)
266                 goto out_unlock;
267
268         skb_queue_head_init(&list);
269
270         root_lock = qdisc_root_lock(qdisc);
271         spin_lock(root_lock);
272         for (len = qdisc->q.qlen; len > 0; len--) {
273                 struct sk_buff *skb = qdisc->dequeue(qdisc);
274
275                 if (skb)
276                         __skb_queue_tail(&list, skb);
277         }
278         spin_unlock(root_lock);
279
280         for (len = list.qlen; len > 0; len--) {
281                 struct sk_buff *skb = __skb_dequeue(&list);
282                 u16 new_queue;
283
284                 BUG_ON(!skb);
285                 new_queue = ieee80211_select_queue(local->mdev, skb);
286                 skb_set_queue_mapping(skb, new_queue);
287
288                 txq = netdev_get_tx_queue(local->mdev, new_queue);
289
290
291                 qdisc = rcu_dereference(txq->qdisc);
292                 root_lock = qdisc_root_lock(qdisc);
293
294                 spin_lock(root_lock);
295                 qdisc_enqueue_root(skb, qdisc);
296                 spin_unlock(root_lock);
297         }
298
299 out_unlock:
300         rcu_read_unlock_bh();
301 }