1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <linux/moduleparam.h>
13 #include <net/net_namespace.h>
14 #include <asm/unaligned.h>
17 static int aoe_deadsecs = 60 * 3;
18 module_param(aoe_deadsecs, int, 0644);
19 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
21 static int aoe_maxout = 16;
22 module_param(aoe_maxout, int, 0644);
23 MODULE_PARM_DESC(aoe_maxout,
24 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
26 static struct sk_buff *
31 skb = alloc_skb(len, GFP_ATOMIC);
33 skb_reset_mac_header(skb);
34 skb_reset_network_header(skb);
35 skb->protocol = __constant_htons(ETH_P_AOE);
37 skb->next = skb->prev = NULL;
39 /* tell the network layer not to perform IP checksums
40 * or to get the NIC to do it
42 skb->ip_summed = CHECKSUM_NONE;
48 getframe(struct aoetgt *t, int tag)
61 * Leave the top bit clear so we have tagspace for userland.
62 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
63 * This driver reserves tag -1 to mean "unused frame."
66 newtag(struct aoetgt *t)
71 return n |= (++t->lasttag & 0x7fff) << 16;
75 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
77 u32 host_tag = newtag(t);
79 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
80 memcpy(h->dst, t->addr, sizeof h->dst);
81 h->type = __constant_cpu_to_be16(ETH_P_AOE);
83 h->major = cpu_to_be16(d->aoemajor);
84 h->minor = d->aoeminor;
86 h->tag = cpu_to_be32(host_tag);
92 put_lba(struct aoe_atahdr *ah, sector_t lba)
103 ifrotate(struct aoetgt *t)
106 if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL)
108 if (t->ifp->nd == NULL) {
109 printk(KERN_INFO "aoe: no interface to rotate to\n");
115 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
117 __skb_queue_tail(&d->skbpool, skb);
120 static struct sk_buff *
121 skb_pool_get(struct aoedev *d)
123 struct sk_buff *skb = skb_peek(&d->skbpool);
125 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
126 __skb_unlink(skb, &d->skbpool);
129 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
130 (skb = new_skb(ETH_ZLEN)))
136 /* freeframe is where we do our load balancing so it's a little hairy. */
137 static struct frame *
138 freeframe(struct aoedev *d)
140 struct frame *f, *e, *rf;
144 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
145 printk(KERN_ERR "aoe: NULL TARGETS!\n");
150 if (t >= &d->targets[NTARGETS] || !*t)
153 if ((*t)->nout < (*t)->maxout
158 e = f + (*t)->nframes;
160 if (f->tag != FREETAG)
164 && !(f->skb = skb = new_skb(ETH_ZLEN)))
166 if (atomic_read(&skb_shinfo(skb)->dataref)
172 gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0;
178 /* Work can be done, but the network layer is
179 holding our precious packets. Try to grab
180 one from the pool. */
182 if (f == NULL) { /* more paranoia */
184 "aoe: freeframe: %s.\n",
185 "unexpected null rf");
186 d->flags |= DEVFL_KICKME;
189 skb = skb_pool_get(d);
191 skb_pool_put(d, f->skb);
197 d->flags |= DEVFL_KICKME;
199 if (t == d->tgt) /* we've looped and found nada */
202 if (t >= &d->targets[NTARGETS] || !*t)
209 aoecmd_ata_rw(struct aoedev *d)
213 struct aoe_atahdr *ah;
219 char writebit, extbit;
230 bcnt = t->ifp->maxbcnt;
233 if (bcnt > buf->bv_resid)
234 bcnt = buf->bv_resid;
235 /* initialize the headers & frame */
237 h = (struct aoe_hdr *) skb_mac_header(skb);
238 ah = (struct aoe_atahdr *) (h+1);
239 skb_put(skb, sizeof *h + sizeof *ah);
240 memset(h, 0, skb->len);
241 f->tag = aoehdr_atainit(d, t, h);
245 f->bufaddr = page_address(bv->bv_page) + buf->bv_off;
247 f->lba = buf->sector;
249 /* set up ata header */
250 ah->scnt = bcnt >> 9;
251 put_lba(ah, buf->sector);
252 if (d->flags & DEVFL_EXT) {
253 ah->aflags |= AOEAFL_EXT;
257 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
259 if (bio_data_dir(buf->bio) == WRITE) {
260 skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt);
261 ah->aflags |= AOEAFL_WRITE;
263 skb->data_len = bcnt;
270 ah->cmdstat = WIN_READ | writebit | extbit;
272 /* mark all tracking fields and load out */
273 buf->nframesout += 1;
275 buf->bv_resid -= bcnt;
277 buf->sector += bcnt >> 9;
278 if (buf->resid == 0) {
280 } else if (buf->bv_resid == 0) {
282 buf->bv_resid = bv->bv_len;
283 WARN_ON(buf->bv_resid == 0);
284 buf->bv_off = bv->bv_offset;
287 skb->dev = t->ifp->nd;
288 skb = skb_clone(skb, GFP_ATOMIC);
290 __skb_queue_tail(&d->sendq, skb);
294 /* some callers cannot sleep, and they can call this function,
295 * transmitting the packets later, when interrupts are on
298 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
301 struct aoe_cfghdr *ch;
303 struct net_device *ifp;
305 read_lock(&dev_base_lock);
306 for_each_netdev(&init_net, ifp) {
308 if (!is_aoe_netif(ifp))
311 skb = new_skb(sizeof *h + sizeof *ch);
313 printk(KERN_INFO "aoe: skb alloc failure\n");
316 skb_put(skb, sizeof *h + sizeof *ch);
318 __skb_queue_tail(queue, skb);
319 h = (struct aoe_hdr *) skb_mac_header(skb);
320 memset(h, 0, sizeof *h + sizeof *ch);
322 memset(h->dst, 0xff, sizeof h->dst);
323 memcpy(h->src, ifp->dev_addr, sizeof h->src);
324 h->type = __constant_cpu_to_be16(ETH_P_AOE);
326 h->major = cpu_to_be16(aoemajor);
333 read_unlock(&dev_base_lock);
337 resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
341 struct aoe_atahdr *ah;
348 h = (struct aoe_hdr *) skb_mac_header(skb);
349 ah = (struct aoe_atahdr *) (h+1);
351 snprintf(buf, sizeof buf,
352 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x "
353 "s=%012llx d=%012llx nout=%d\n",
354 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
356 mac_addr(h->dst), t->nout);
360 h->tag = cpu_to_be32(n);
361 memcpy(h->dst, t->addr, sizeof h->dst);
362 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
364 switch (ah->cmdstat) {
377 if (ah->aflags & AOEAFL_WRITE) {
378 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
379 offset_in_page(f->bufaddr), n);
380 skb->len = sizeof *h + sizeof *ah + n;
384 skb->dev = t->ifp->nd;
385 skb = skb_clone(skb, GFP_ATOMIC);
388 __skb_queue_tail(&d->sendq, skb);
396 n = jiffies & 0xffff;
403 static struct aoeif *
404 getif(struct aoetgt *t, struct net_device *nd)
416 static struct aoeif *
417 addif(struct aoetgt *t, struct net_device *nd)
425 p->maxbcnt = DEFAULTBCNT;
432 ejectif(struct aoetgt *t, struct aoeif *ifp)
437 e = t->ifs + NAOEIFS - 1;
438 n = (e - ifp) * sizeof *ifp;
439 memmove(ifp, ifp+1, n);
444 sthtith(struct aoedev *d)
446 struct frame *f, *e, *nf;
448 struct aoetgt *ht = *d->htgt;
453 if (f->tag == FREETAG)
465 resend(d, *d->tgt, nf);
467 /* he's clean, he's useless. take away his interfaces */
468 memset(ht->ifs, 0, sizeof ht->ifs);
473 static inline unsigned char
474 ata_scnt(unsigned char *packet) {
476 struct aoe_atahdr *ah;
478 h = (struct aoe_hdr *) packet;
479 ah = (struct aoe_atahdr *) (h+1);
484 rexmit_timer(ulong vp)
486 struct sk_buff_head queue;
488 struct aoetgt *t, **tt, **te;
491 register long timeout;
494 d = (struct aoedev *) vp;
496 /* timeout is always ~150% of the moving average */
498 timeout += timeout >> 1;
500 spin_lock_irqsave(&d->lock, flags);
502 if (d->flags & DEVFL_TKILL) {
503 spin_unlock_irqrestore(&d->lock, flags);
508 for (; tt < te && *tt; tt++) {
513 if (f->tag == FREETAG
514 || tsince(f->tag) < timeout)
516 n = f->waited += timeout;
518 if (n > aoe_deadsecs) {
519 /* waited too long. device failure. */
524 if (n > HELPWAIT /* see if another target can help */
525 && (tt != d->targets || d->targets[1]))
528 if (t->nout == t->maxout) {
531 t->lastwadj = jiffies;
534 ifp = getif(t, f->skb->dev);
535 if (ifp && ++ifp->lost > (t->nframes << 1)
536 && (ifp != t->ifs || t->ifs[1].nd)) {
541 if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512
542 && ifp && ++ifp->lostjumbo > (t->nframes << 1)
543 && ifp->maxbcnt != DEFAULTBCNT) {
546 "too many lost jumbo on "
548 "falling back to %d frames.\n",
549 d->aoemajor, d->aoeminor,
550 ifp->nd->name, mac_addr(t->addr),
558 if (t->nout == t->maxout
559 && t->maxout < t->nframes
560 && (jiffies - t->lastwadj)/HZ > 10) {
562 t->lastwadj = jiffies;
566 if (!skb_queue_empty(&d->sendq)) {
569 d->rttavg = MAXTIMER;
572 if (d->flags & DEVFL_KICKME || d->htgt) {
573 d->flags &= ~DEVFL_KICKME;
577 __skb_queue_head_init(&queue);
578 skb_queue_splice_init(&d->sendq, &queue);
580 d->timer.expires = jiffies + TIMERTICK;
581 add_timer(&d->timer);
583 spin_unlock_irqrestore(&d->lock, flags);
588 /* enters with d->lock held */
590 aoecmd_work(struct aoedev *d)
594 if (d->htgt && !sthtith(d))
596 if (d->inprocess == NULL) {
597 if (list_empty(&d->bufq))
599 buf = container_of(d->bufq.next, struct buf, bufs);
600 list_del(d->bufq.next);
603 if (aoecmd_ata_rw(d))
607 /* this function performs work that has been deferred until sleeping is OK
610 aoecmd_sleepwork(struct work_struct *work)
612 struct aoedev *d = container_of(work, struct aoedev, work);
614 if (d->flags & DEVFL_GDALLOC)
617 if (d->flags & DEVFL_NEWSIZE) {
618 struct block_device *bd;
622 ssize = get_capacity(d->gd);
623 bd = bdget_disk(d->gd, 0);
626 mutex_lock(&bd->bd_inode->i_mutex);
627 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
628 mutex_unlock(&bd->bd_inode->i_mutex);
631 spin_lock_irqsave(&d->lock, flags);
632 d->flags |= DEVFL_UP;
633 d->flags &= ~DEVFL_NEWSIZE;
634 spin_unlock_irqrestore(&d->lock, flags);
639 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
644 /* word 83: command set supported */
645 n = get_unaligned_le16(&id[83 << 1]);
647 /* word 86: command set/feature enabled */
648 n |= get_unaligned_le16(&id[86 << 1]);
650 if (n & (1<<10)) { /* bit 10: LBA 48 */
651 d->flags |= DEVFL_EXT;
653 /* word 100: number lba48 sectors */
654 ssize = get_unaligned_le64(&id[100 << 1]);
656 /* set as in ide-disk.c:init_idedisk_capacity */
657 d->geo.cylinders = ssize;
658 d->geo.cylinders /= (255 * 63);
662 d->flags &= ~DEVFL_EXT;
664 /* number lba28 sectors */
665 ssize = get_unaligned_le32(&id[60 << 1]);
667 /* NOTE: obsolete in ATA 6 */
668 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
669 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
670 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
673 if (d->ssize != ssize)
675 "aoe: %012llx e%ld.%d v%04x has %llu sectors\n",
677 d->aoemajor, d->aoeminor,
678 d->fw_ver, (long long)ssize);
681 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
684 set_capacity(d->gd, ssize);
685 d->flags |= DEVFL_NEWSIZE;
687 d->flags |= DEVFL_GDALLOC;
688 schedule_work(&d->work);
692 calc_rttavg(struct aoedev *d, int rtt)
701 else if (n > MAXTIMER)
703 d->mintimer += (n - d->mintimer) >> 1;
704 } else if (n < d->mintimer)
706 else if (n > MAXTIMER)
709 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
714 static struct aoetgt *
715 gettgt(struct aoedev *d, char *addr)
717 struct aoetgt **t, **e;
721 for (; t < e && *t; t++)
722 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
728 diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector)
730 unsigned long n_sect = bio->bi_size >> 9;
731 const int rw = bio_data_dir(bio);
732 struct hd_struct *part;
735 cpu = part_stat_lock();
736 part = disk_map_sector_rcu(disk, sector);
738 part_stat_inc(cpu, part, ios[rw]);
739 part_stat_add(cpu, part, ticks[rw], duration);
740 part_stat_add(cpu, part, sectors[rw], n_sect);
741 part_stat_add(cpu, part, io_ticks, duration);
747 aoecmd_ata_rsp(struct sk_buff *skb)
749 struct sk_buff_head queue;
751 struct aoe_hdr *hin, *hout;
752 struct aoe_atahdr *ahin, *ahout;
762 hin = (struct aoe_hdr *) skb_mac_header(skb);
763 aoemajor = get_unaligned_be16(&hin->major);
764 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
766 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
767 "for unknown device %d.%d\n",
768 aoemajor, hin->minor);
773 spin_lock_irqsave(&d->lock, flags);
775 n = get_unaligned_be32(&hin->tag);
776 t = gettgt(d, hin->src);
778 printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n",
779 d->aoemajor, d->aoeminor, mac_addr(hin->src));
780 spin_unlock_irqrestore(&d->lock, flags);
785 calc_rttavg(d, -tsince(n));
786 spin_unlock_irqrestore(&d->lock, flags);
787 snprintf(ebuf, sizeof ebuf,
788 "%15s e%d.%d tag=%08x@%08lx\n",
790 get_unaligned_be16(&hin->major),
792 get_unaligned_be32(&hin->tag),
798 calc_rttavg(d, tsince(f->tag));
800 ahin = (struct aoe_atahdr *) (hin+1);
801 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
802 ahout = (struct aoe_atahdr *) (hout+1);
805 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
807 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
808 ahout->cmdstat, ahin->cmdstat,
809 d->aoemajor, d->aoeminor);
811 buf->flags |= BUFFL_FAIL;
813 if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */
815 n = ahout->scnt << 9;
816 switch (ahout->cmdstat) {
819 if (skb->len - sizeof *hin - sizeof *ahin < n) {
821 "aoe: %s. skb->len=%d need=%ld\n",
822 "runt data size in read", skb->len, n);
823 /* fail frame f? just returning will rexmit. */
824 spin_unlock_irqrestore(&d->lock, flags);
827 memcpy(f->bufaddr, ahin+1, n);
830 ifp = getif(t, skb->dev);
844 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
846 "aoe: runt data size in ataid. skb->len=%d\n",
848 spin_unlock_irqrestore(&d->lock, flags);
851 ataid_complete(d, t, (char *) (ahin+1));
855 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
857 get_unaligned_be16(&hin->major),
862 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
863 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
864 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
865 bio_endio(buf->bio, n);
866 mempool_free(buf, d->bufpool);
875 __skb_queue_head_init(&queue);
876 skb_queue_splice_init(&d->sendq, &queue);
878 spin_unlock_irqrestore(&d->lock, flags);
883 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
885 struct sk_buff_head queue;
887 __skb_queue_head_init(&queue);
888 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
893 aoecmd_ata_id(struct aoedev *d)
896 struct aoe_atahdr *ah;
907 /* initialize the headers & frame */
909 h = (struct aoe_hdr *) skb_mac_header(skb);
910 ah = (struct aoe_atahdr *) (h+1);
911 skb_put(skb, sizeof *h + sizeof *ah);
912 memset(h, 0, skb->len);
913 f->tag = aoehdr_atainit(d, t, h);
917 /* set up ata header */
919 ah->cmdstat = WIN_IDENTIFY;
922 skb->dev = t->ifp->nd;
924 d->rttavg = MAXTIMER;
925 d->timer.function = rexmit_timer;
927 return skb_clone(skb, GFP_ATOMIC);
930 static struct aoetgt *
931 addtgt(struct aoedev *d, char *addr, ulong nframes)
933 struct aoetgt *t, **tt, **te;
938 for (; tt < te && *tt; tt++)
943 "aoe: device addtgt failure; too many targets\n");
946 t = kcalloc(1, sizeof *t, GFP_ATOMIC);
947 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
951 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
955 t->nframes = nframes;
960 memcpy(t->addr, addr, sizeof t->addr);
962 t->maxout = t->nframes;
967 aoecmd_cfg_rsp(struct sk_buff *skb)
971 struct aoe_cfghdr *ch;
974 ulong flags, sysminor, aoemajor;
978 h = (struct aoe_hdr *) skb_mac_header(skb);
979 ch = (struct aoe_cfghdr *) (h+1);
982 * Enough people have their dip switches set backwards to
983 * warrant a loud message for this special case.
985 aoemajor = get_unaligned_be16(&h->major);
986 if (aoemajor == 0xfff) {
987 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
988 "Check shelf dip switches.\n");
992 sysminor = SYSMINOR(aoemajor, h->minor);
993 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
994 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
995 aoemajor, (int) h->minor);
999 n = be16_to_cpu(ch->bufcnt);
1000 if (n > aoe_maxout) /* keep it reasonable */
1003 d = aoedev_by_sysminor_m(sysminor);
1005 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1009 spin_lock_irqsave(&d->lock, flags);
1011 t = gettgt(d, h->src);
1013 t = addtgt(d, h->src, n);
1015 spin_unlock_irqrestore(&d->lock, flags);
1019 ifp = getif(t, skb->dev);
1021 ifp = addif(t, skb->dev);
1024 "aoe: device addif failure; "
1025 "too many interfaces?\n");
1026 spin_unlock_irqrestore(&d->lock, flags);
1032 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
1036 n = n ? n * 512 : DEFAULTBCNT;
1037 if (n != ifp->maxbcnt) {
1039 "aoe: e%ld.%d: setting %d%s%s:%012llx\n",
1040 d->aoemajor, d->aoeminor, n,
1041 " byte data frames on ", ifp->nd->name,
1047 /* don't change users' perspective */
1049 spin_unlock_irqrestore(&d->lock, flags);
1052 d->fw_ver = be16_to_cpu(ch->fwver);
1054 sl = aoecmd_ata_id(d);
1056 spin_unlock_irqrestore(&d->lock, flags);
1059 struct sk_buff_head queue;
1060 __skb_queue_head_init(&queue);
1061 __skb_queue_tail(&queue, sl);
1062 aoenet_xmit(&queue);
1067 aoecmd_cleanslate(struct aoedev *d)
1069 struct aoetgt **t, **te;
1070 struct aoeif *p, *e;
1072 d->mintimer = MINTIMER;
1076 for (; t < te && *t; t++) {
1077 (*t)->maxout = (*t)->nframes;
1080 for (; p < e; p++) {
1083 p->maxbcnt = DEFAULTBCNT;