1 /******************************************************************************
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
26 #include <linux/compiler.h>
27 #include <linux/config.h>
28 #include <linux/errno.h>
29 #include <linux/if_arp.h>
30 #include <linux/in6.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
36 #include <linux/proc_fs.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/tcp.h>
40 #include <linux/types.h>
41 #include <linux/wireless.h>
42 #include <linux/etherdevice.h>
43 #include <asm/uaccess.h>
45 #include <net/ieee80211.h>
51 ,-------------------------------------------------------------------.
52 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
53 |------|------|---------|---------|---------|------|---------|------|
54 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
55 | | tion | (BSSID) | | | ence | data | |
56 `--------------------------------------------------| |------'
57 Total: 28 non-data bytes `----.----'
59 .- 'Frame data' expands, if WEP enabled, to <----------'
62 ,-----------------------.
63 Bytes | 4 | 0-2296 | 4 |
64 |-----|-----------|-----|
65 Desc. | IV | Encrypted | ICV |
70 .- 'Encrypted Packet' expands to
73 ,---------------------------------------------------.
74 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
75 |------|------|---------|----------|------|---------|
76 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
77 | DSAP | SSAP | | | | Packet |
78 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
79 `----------------------------------------------------
80 Total: 8 non-data bytes
82 802.3 Ethernet Data Frame
84 ,-----------------------------------------.
85 Bytes | 6 | 6 | 2 | Variable | 4 |
86 |-------|-------|------|-----------|------|
87 Desc. | Dest. | Source| Type | IP Packet | fcs |
89 `-----------------------------------------'
90 Total: 18 non-data bytes
92 In the event that fragmentation is required, the incoming payload is split into
93 N parts of size ieee->fts. The first fragment contains the SNAP header and the
94 remaining packets are just data.
96 If encryption is enabled, each fragment payload size is reduced by enough space
97 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
98 So if you have 1500 bytes of payload with ieee->fts set to 500 without
99 encryption it will take 3 frames. With WEP it will take 4 frames as the
100 payload of each frame is reduced to 492 bytes.
106 * | ETHERNET HEADER ,-<-- PAYLOAD
107 * | | 14 bytes from skb->data
108 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
110 * |,-Dest.--. ,--Src.---. | | |
111 * | 6 bytes| | 6 bytes | | | |
114 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
117 * | | | | `T' <---- 2 bytes for Type
119 * | | '---SNAP--' <-------- 6 bytes for SNAP
121 * `-IV--' <-------------------- 4 bytes for IV (WEP)
127 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
128 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
130 static int ieee80211_copy_snap(u8 * data, u16 h_proto)
132 struct ieee80211_snap_hdr *snap;
135 snap = (struct ieee80211_snap_hdr *)data;
140 if (h_proto == 0x8137 || h_proto == 0x80f3)
144 snap->oui[0] = oui[0];
145 snap->oui[1] = oui[1];
146 snap->oui[2] = oui[2];
148 *(u16 *) (data + SNAP_SIZE) = htons(h_proto);
150 return SNAP_SIZE + sizeof(u16);
153 static int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
154 struct sk_buff *frag, int hdr_len)
156 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
162 /* To encrypt, frame format is:
163 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
164 atomic_inc(&crypt->refcnt);
166 if (crypt->ops && crypt->ops->encrypt_mpdu)
167 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
169 atomic_dec(&crypt->refcnt);
171 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
172 ieee->dev->name, frag->len);
173 ieee->ieee_stats.tx_discards++;
180 void ieee80211_txb_free(struct ieee80211_txb *txb)
185 for (i = 0; i < txb->nr_frags; i++)
186 if (txb->fragments[i])
187 dev_kfree_skb_any(txb->fragments[i]);
191 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
192 int headroom, gfp_t gfp_mask)
194 struct ieee80211_txb *txb;
196 txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
201 memset(txb, 0, sizeof(struct ieee80211_txb));
202 txb->nr_frags = nr_frags;
203 txb->frag_size = txb_size;
205 for (i = 0; i < nr_frags; i++) {
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
208 if (unlikely(!txb->fragments[i])) {
212 skb_reserve(txb->fragments[i], headroom);
214 if (unlikely(i != nr_frags)) {
216 dev_kfree_skb_any(txb->fragments[i--]);
223 static int ieee80211_classify(struct sk_buff *skb)
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != __constant_htons(ETH_P_IP))
233 switch (ip->tos & 0xfc) {
253 /* Incoming skb is converted to a txb which consists of
254 * a block of 802.11 fragment packets (stored as skbs) */
255 int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
257 struct ieee80211_device *ieee = netdev_priv(dev);
258 struct ieee80211_txb *txb = NULL;
259 struct ieee80211_hdr_3addrqos *frag_hdr;
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
263 struct net_device_stats *stats = &ieee->stats;
264 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
265 int bytes, fc, hdr_len;
266 struct sk_buff *skb_frag;
267 struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
272 u8 dest[ETH_ALEN], src[ETH_ALEN];
273 struct ieee80211_crypt_data *crypt;
274 int priority = skb->priority;
277 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
278 return NETDEV_TX_BUSY;
280 spin_lock_irqsave(&ieee->lock, flags);
282 /* If there is no driver handler to take the TXB, dont' bother
284 if (!ieee->hard_start_xmit) {
285 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
289 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
290 printk(KERN_WARNING "%s: skb too small (%d).\n",
291 ieee->dev->name, skb->len);
295 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
297 crypt = ieee->crypt[ieee->tx_keyidx];
299 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
302 host_encrypt = ieee->host_encrypt && encrypt && crypt;
303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
304 host_build_iv = ieee->host_build_iv && encrypt && crypt;
306 if (!encrypt && ieee->ieee802_1x &&
307 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
312 /* Save source and destination addresses */
313 memcpy(dest, skb->data, ETH_ALEN);
314 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
316 if (host_encrypt || host_build_iv)
317 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
318 IEEE80211_FCTL_PROTECTED;
320 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
322 if (ieee->iw_mode == IW_MODE_INFRA) {
323 fc |= IEEE80211_FCTL_TODS;
324 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
325 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
326 memcpy(header.addr2, src, ETH_ALEN);
327 memcpy(header.addr3, dest, ETH_ALEN);
328 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
329 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
330 memcpy(header.addr1, dest, ETH_ALEN);
331 memcpy(header.addr2, src, ETH_ALEN);
332 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
334 hdr_len = IEEE80211_3ADDR_LEN;
336 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
337 fc |= IEEE80211_STYPE_QOS_DATA;
340 skb->priority = ieee80211_classify(skb);
341 header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
343 header.frame_ctl = cpu_to_le16(fc);
345 /* Advance the SKB to the start of the payload */
346 skb_pull(skb, sizeof(struct ethhdr));
348 /* Determine total amount of storage required for TXB packets */
349 bytes = skb->len + SNAP_SIZE + sizeof(u16);
351 /* Encrypt msdu first on the whole data packet. */
352 if ((host_encrypt || host_encrypt_msdu) &&
353 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
355 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
356 crypt->ops->extra_msdu_postfix_len;
357 struct sk_buff *skb_new = dev_alloc_skb(len);
359 if (unlikely(!skb_new))
362 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
363 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
365 ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
367 memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
368 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
370 IEEE80211_ERROR("msdu encryption failed\n");
371 dev_kfree_skb_any(skb_new);
374 dev_kfree_skb_any(skb);
376 bytes += crypt->ops->extra_msdu_prefix_len +
377 crypt->ops->extra_msdu_postfix_len;
378 skb_pull(skb, hdr_len);
381 if (host_encrypt || ieee->host_open_frag) {
382 /* Determine fragmentation size based on destination (multicast
383 * and broadcast are not fragmented) */
384 if (is_multicast_ether_addr(dest) ||
385 is_broadcast_ether_addr(dest))
386 frag_size = MAX_FRAG_THRESHOLD;
388 frag_size = ieee->fts;
390 /* Determine amount of payload per fragment. Regardless of if
391 * this stack is providing the full 802.11 header, one will
392 * eventually be affixed to this fragment -- so we must account
393 * for it when determining the amount of payload space. */
394 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
396 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
397 bytes_per_frag -= IEEE80211_FCS_LEN;
399 /* Each fragment may need to have room for encryptiong
402 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
403 crypt->ops->extra_mpdu_postfix_len;
405 /* Number of fragments is the total
406 * bytes_per_frag / payload_per_fragment */
407 nr_frags = bytes / bytes_per_frag;
408 bytes_last_frag = bytes % bytes_per_frag;
412 bytes_last_frag = bytes_per_frag;
415 bytes_per_frag = bytes_last_frag = bytes;
416 frag_size = bytes + IEEE80211_3ADDR_LEN;
419 rts_required = (frag_size > ieee->rts
420 && ieee->config & CFG_IEEE80211_RTS);
424 /* When we allocate the TXB we allocate enough space for the reserve
425 * and full fragment bytes (bytes_per_frag doesn't include prefix,
426 * postfix, header, FCS, etc.) */
427 txb = ieee80211_alloc_txb(nr_frags, frag_size,
428 ieee->tx_headroom, GFP_ATOMIC);
429 if (unlikely(!txb)) {
430 printk(KERN_WARNING "%s: Could not allocate TXB\n",
434 txb->encrypted = encrypt;
436 txb->payload_size = frag_size * (nr_frags - 1) +
439 txb->payload_size = bytes;
442 skb_frag = txb->fragments[0];
444 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
447 * Set header frame_ctl to the RTS.
450 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
451 memcpy(frag_hdr, &header, hdr_len);
454 * Restore header frame_ctl to the original data setting.
456 header.frame_ctl = cpu_to_le16(fc);
459 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
460 skb_put(skb_frag, 4);
462 txb->rts_included = 1;
467 for (; i < nr_frags; i++) {
468 skb_frag = txb->fragments[i];
470 if (host_encrypt || host_build_iv)
471 skb_reserve(skb_frag,
472 crypt->ops->extra_mpdu_prefix_len);
475 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
476 memcpy(frag_hdr, &header, hdr_len);
478 /* If this is not the last fragment, then add the MOREFRAGS
479 * bit to the frame control */
480 if (i != nr_frags - 1) {
481 frag_hdr->frame_ctl =
482 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
483 bytes = bytes_per_frag;
485 /* The last fragment takes the remaining length */
486 bytes = bytes_last_frag;
489 if (i == 0 && !snapped) {
490 ieee80211_copy_snap(skb_put
491 (skb_frag, SNAP_SIZE + sizeof(u16)),
493 bytes -= SNAP_SIZE + sizeof(u16);
496 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
498 /* Advance the SKB... */
499 skb_pull(skb, bytes);
501 /* Encryption routine will move the header forward in order
502 * to insert the IV between the header and the payload */
504 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
505 else if (host_build_iv) {
506 struct ieee80211_crypt_data *crypt;
508 crypt = ieee->crypt[ieee->tx_keyidx];
509 atomic_inc(&crypt->refcnt);
510 if (crypt->ops->build_iv)
511 crypt->ops->build_iv(skb_frag, hdr_len,
512 ieee->sec.keys[ieee->sec.active_key],
513 ieee->sec.key_sizes[ieee->sec.active_key],
515 atomic_dec(&crypt->refcnt);
519 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
520 skb_put(skb_frag, 4);
524 spin_unlock_irqrestore(&ieee->lock, flags);
526 dev_kfree_skb_any(skb);
529 int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
532 stats->tx_bytes += txb->payload_size;
536 if (ret == NETDEV_TX_BUSY) {
537 printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
538 "driver should report queue full via "
539 "ieee_device->is_queue_full.\n",
543 ieee80211_txb_free(txb);
549 spin_unlock_irqrestore(&ieee->lock, flags);
550 netif_stop_queue(dev);
555 /* Incoming 802.11 strucure is converted to a TXB
556 * a block of 802.11 fragment packets (stored as skbs) */
557 int ieee80211_tx_frame(struct ieee80211_device *ieee,
558 struct ieee80211_hdr *frame, int hdr_len, int total_len,
561 struct ieee80211_txb *txb = NULL;
563 struct net_device_stats *stats = &ieee->stats;
564 struct sk_buff *skb_frag;
567 spin_lock_irqsave(&ieee->lock, flags);
569 if (encrypt_mpdu && !ieee->sec.encrypt)
572 /* If there is no driver handler to take the TXB, dont' bother
574 if (!ieee->hard_start_xmit) {
575 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
579 if (unlikely(total_len < 24)) {
580 printk(KERN_WARNING "%s: skb too small (%d).\n",
581 ieee->dev->name, total_len);
586 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
588 /* When we allocate the TXB we allocate enough space for the reserve
589 * and full fragment bytes (bytes_per_frag doesn't include prefix,
590 * postfix, header, FCS, etc.) */
591 txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC);
592 if (unlikely(!txb)) {
593 printk(KERN_WARNING "%s: Could not allocate TXB\n",
598 txb->payload_size = total_len;
600 skb_frag = txb->fragments[0];
602 memcpy(skb_put(skb_frag, total_len), frame, total_len);
605 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
606 skb_put(skb_frag, 4);
608 /* To avoid overcomplicating things, we do the corner-case frame
609 * encryption in software. The only real situation where encryption is
610 * needed here is during software-based shared key authentication. */
612 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
615 spin_unlock_irqrestore(&ieee->lock, flags);
618 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
620 stats->tx_bytes += txb->payload_size;
623 ieee80211_txb_free(txb);
628 spin_unlock_irqrestore(&ieee->lock, flags);
633 EXPORT_SYMBOL(ieee80211_tx_frame);
634 EXPORT_SYMBOL(ieee80211_txb_free);