1 /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <asm/unaligned.h>
15 #define TIMERTICK (HZ / 10)
16 #define MINTIMER (2 * TIMERTICK)
17 #define MAXTIMER (HZ << 1)
19 static int aoe_deadsecs = 60 * 3;
20 module_param(aoe_deadsecs, int, 0644);
21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
28 skb = alloc_skb(len, GFP_ATOMIC);
30 skb->nh.raw = skb->mac.raw = skb->data;
31 skb->protocol = __constant_htons(ETH_P_AOE);
33 skb->next = skb->prev = NULL;
35 /* tell the network layer not to perform IP checksums
36 * or to get the NIC to do it
38 skb->ip_summed = CHECKSUM_NONE;
44 getframe(struct aoedev *d, int tag)
57 * Leave the top bit clear so we have tagspace for userland.
58 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
59 * This driver reserves tag -1 to mean "unused frame."
62 newtag(struct aoedev *d)
67 return n |= (++d->lasttag & 0x7fff) << 16;
71 aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
73 u32 host_tag = newtag(d);
75 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
76 memcpy(h->dst, d->addr, sizeof h->dst);
77 h->type = __constant_cpu_to_be16(ETH_P_AOE);
79 h->major = cpu_to_be16(d->aoemajor);
80 h->minor = d->aoeminor;
82 h->tag = cpu_to_be32(host_tag);
88 put_lba(struct aoe_atahdr *ah, sector_t lba)
99 aoecmd_ata_rw(struct aoedev *d, struct frame *f)
102 struct aoe_atahdr *ah;
106 register sector_t sector;
107 char writebit, extbit;
114 sector = buf->sector;
115 bcnt = buf->bv_resid;
116 if (bcnt > d->maxbcnt)
119 /* initialize the headers & frame */
121 h = (struct aoe_hdr *) skb->mac.raw;
122 ah = (struct aoe_atahdr *) (h+1);
123 skb_put(skb, sizeof *h + sizeof *ah);
124 memset(h, 0, skb->len);
125 f->tag = aoehdr_atainit(d, h);
128 f->bufaddr = buf->bufaddr;
132 /* set up ata header */
133 ah->scnt = bcnt >> 9;
135 if (d->flags & DEVFL_EXT) {
136 ah->aflags |= AOEAFL_EXT;
140 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
143 if (bio_data_dir(buf->bio) == WRITE) {
144 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
145 offset_in_page(f->bufaddr), bcnt);
146 ah->aflags |= AOEAFL_WRITE;
148 skb->data_len = bcnt;
153 ah->cmdstat = WIN_READ | writebit | extbit;
155 /* mark all tracking fields and load out */
156 buf->nframesout += 1;
157 buf->bufaddr += bcnt;
158 buf->bv_resid -= bcnt;
159 /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
161 buf->sector += bcnt >> 9;
162 if (buf->resid == 0) {
164 } else if (buf->bv_resid == 0) {
166 WARN_ON(buf->bv->bv_len == 0);
167 buf->bv_resid = buf->bv->bv_len;
168 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
172 skb = skb_clone(skb, GFP_ATOMIC);
176 d->sendq_tl->next = skb;
182 /* some callers cannot sleep, and they can call this function,
183 * transmitting the packets later, when interrupts are on
185 static struct sk_buff *
186 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
189 struct aoe_cfghdr *ch;
190 struct sk_buff *skb, *sl, *sl_tail;
191 struct net_device *ifp;
195 read_lock(&dev_base_lock);
196 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
198 if (!is_aoe_netif(ifp))
201 skb = new_skb(sizeof *h + sizeof *ch);
203 printk(KERN_INFO "aoe: skb alloc failure\n");
206 skb_put(skb, sizeof *h + sizeof *ch);
210 h = (struct aoe_hdr *) skb->mac.raw;
211 memset(h, 0, sizeof *h + sizeof *ch);
213 memset(h->dst, 0xff, sizeof h->dst);
214 memcpy(h->src, ifp->dev_addr, sizeof h->src);
215 h->type = __constant_cpu_to_be16(ETH_P_AOE);
217 h->major = cpu_to_be16(aoemajor);
224 read_unlock(&dev_base_lock);
231 static struct frame *
232 freeframe(struct aoedev *d)
240 if (f->tag != FREETAG)
242 if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
243 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
249 if (n == d->nframes) /* wait for network layer */
250 d->flags |= DEVFL_KICKME;
255 /* enters with d->lock held */
257 aoecmd_work(struct aoedev *d)
262 if (d->flags & DEVFL_PAUSE) {
263 if (!aoedev_isbusy(d))
264 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
265 d->aoeminor, &d->sendq_tl);
273 if (d->inprocess == NULL) {
274 if (list_empty(&d->bufq))
276 buf = container_of(d->bufq.next, struct buf, bufs);
277 list_del(d->bufq.next);
278 /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
286 rexmit(struct aoedev *d, struct frame *f)
290 struct aoe_atahdr *ah;
296 snprintf(buf, sizeof buf,
297 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
299 d->aoemajor, d->aoeminor, f->tag, jiffies, n);
303 h = (struct aoe_hdr *) skb->mac.raw;
304 ah = (struct aoe_atahdr *) (h+1);
306 h->tag = cpu_to_be32(n);
307 memcpy(h->dst, d->addr, sizeof h->dst);
308 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
310 n = DEFAULTBCNT / 512;
313 if (ah->aflags & AOEAFL_WRITE) {
314 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
315 offset_in_page(f->bufaddr), DEFAULTBCNT);
316 skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
317 skb->data_len = DEFAULTBCNT;
319 if (++d->lostjumbo > (d->nframes << 1))
320 if (d->maxbcnt != DEFAULTBCNT) {
321 printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
322 d->aoemajor, d->aoeminor, d->ifp->name);
323 d->maxbcnt = DEFAULTBCNT;
324 d->flags |= DEVFL_MAXBCNT;
329 skb = skb_clone(skb, GFP_ATOMIC);
333 d->sendq_tl->next = skb;
344 n = jiffies & 0xffff;
352 rexmit_timer(ulong vp)
357 register long timeout;
360 d = (struct aoedev *) vp;
363 /* timeout is always ~150% of the moving average */
365 timeout += timeout >> 1;
367 spin_lock_irqsave(&d->lock, flags);
369 if (d->flags & DEVFL_TKILL) {
370 spin_unlock_irqrestore(&d->lock, flags);
376 if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
377 n = f->waited += timeout;
379 if (n > aoe_deadsecs) { /* waited too long for response */
386 if (d->flags & DEVFL_KICKME) {
387 d->flags &= ~DEVFL_KICKME;
392 d->sendq_hd = d->sendq_tl = NULL;
396 d->rttavg = MAXTIMER;
399 d->timer.expires = jiffies + TIMERTICK;
400 add_timer(&d->timer);
402 spin_unlock_irqrestore(&d->lock, flags);
407 /* this function performs work that has been deferred until sleeping is OK
410 aoecmd_sleepwork(struct work_struct *work)
412 struct aoedev *d = container_of(work, struct aoedev, work);
414 if (d->flags & DEVFL_GDALLOC)
417 if (d->flags & DEVFL_NEWSIZE) {
418 struct block_device *bd;
422 ssize = d->gd->capacity;
423 bd = bdget_disk(d->gd, 0);
426 mutex_lock(&bd->bd_inode->i_mutex);
427 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
428 mutex_unlock(&bd->bd_inode->i_mutex);
431 spin_lock_irqsave(&d->lock, flags);
432 d->flags |= DEVFL_UP;
433 d->flags &= ~DEVFL_NEWSIZE;
434 spin_unlock_irqrestore(&d->lock, flags);
439 ataid_complete(struct aoedev *d, unsigned char *id)
444 /* word 83: command set supported */
445 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
447 /* word 86: command set/feature enabled */
448 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
450 if (n & (1<<10)) { /* bit 10: LBA 48 */
451 d->flags |= DEVFL_EXT;
453 /* word 100: number lba48 sectors */
454 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
456 /* set as in ide-disk.c:init_idedisk_capacity */
457 d->geo.cylinders = ssize;
458 d->geo.cylinders /= (255 * 63);
462 d->flags &= ~DEVFL_EXT;
464 /* number lba28 sectors */
465 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
467 /* NOTE: obsolete in ATA 6 */
468 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
469 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
470 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
473 if (d->ssize != ssize)
474 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
475 (unsigned long long)mac_addr(d->addr),
476 d->aoemajor, d->aoeminor,
477 d->fw_ver, (long long)ssize);
481 d->gd->capacity = ssize;
482 d->flags |= DEVFL_NEWSIZE;
484 if (d->flags & DEVFL_GDALLOC) {
485 printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
486 d->aoemajor, d->aoeminor,
487 "it's already on! This shouldn't happen.\n");
490 d->flags |= DEVFL_GDALLOC;
492 schedule_work(&d->work);
496 calc_rttavg(struct aoedev *d, int rtt)
505 else if (n > MAXTIMER)
507 d->mintimer += (n - d->mintimer) >> 1;
508 } else if (n < d->mintimer)
510 else if (n > MAXTIMER)
513 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
519 aoecmd_ata_rsp(struct sk_buff *skb)
522 struct aoe_hdr *hin, *hout;
523 struct aoe_atahdr *ahin, *ahout;
532 hin = (struct aoe_hdr *) skb->mac.raw;
533 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
534 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
536 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
537 "for unknown device %d.%d\n",
538 aoemajor, hin->minor);
543 spin_lock_irqsave(&d->lock, flags);
545 n = be32_to_cpu(get_unaligned(&hin->tag));
548 calc_rttavg(d, -tsince(n));
549 spin_unlock_irqrestore(&d->lock, flags);
550 snprintf(ebuf, sizeof ebuf,
551 "%15s e%d.%d tag=%08x@%08lx\n",
553 be16_to_cpu(get_unaligned(&hin->major)),
555 be32_to_cpu(get_unaligned(&hin->tag)),
561 calc_rttavg(d, tsince(f->tag));
563 ahin = (struct aoe_atahdr *) (hin+1);
564 hout = (struct aoe_hdr *) f->skb->mac.raw;
565 ahout = (struct aoe_atahdr *) (hout+1);
568 if (ahout->cmdstat == WIN_IDENTIFY)
569 d->flags &= ~DEVFL_PAUSE;
570 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
572 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
573 ahout->cmdstat, ahin->cmdstat,
574 d->aoemajor, d->aoeminor);
576 buf->flags |= BUFFL_FAIL;
578 n = ahout->scnt << 9;
579 switch (ahout->cmdstat) {
582 if (skb->len - sizeof *hin - sizeof *ahin < n) {
584 "aoe: runt data size in read. skb->len=%d\n",
586 /* fail frame f? just returning will rexmit. */
587 spin_unlock_irqrestore(&d->lock, flags);
590 memcpy(f->bufaddr, ahin+1, n);
596 put_lba(ahout, f->lba += ahout->scnt);
600 ahout->scnt = n >> 9;
601 if (ahout->aflags & AOEAFL_WRITE) {
602 skb_fill_page_desc(skb, 0,
603 virt_to_page(f->bufaddr),
604 offset_in_page(f->bufaddr), n);
605 skb->len = sizeof *hout + sizeof *ahout + n;
609 hout->tag = cpu_to_be32(f->tag);
611 skb = skb_clone(skb, GFP_ATOMIC);
612 spin_unlock_irqrestore(&d->lock, flags);
621 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
623 "aoe: runt data size in ataid. skb->len=%d\n",
625 spin_unlock_irqrestore(&d->lock, flags);
628 ataid_complete(d, (char *) (ahin+1));
632 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
634 be16_to_cpu(get_unaligned(&hin->major)),
640 buf->nframesout -= 1;
641 if (buf->nframesout == 0 && buf->resid == 0) {
642 unsigned long duration = jiffies - buf->start_time;
643 unsigned long n_sect = buf->bio->bi_size >> 9;
644 struct gendisk *disk = d->gd;
645 const int rw = bio_data_dir(buf->bio);
647 disk_stat_inc(disk, ios[rw]);
648 disk_stat_add(disk, ticks[rw], duration);
649 disk_stat_add(disk, sectors[rw], n_sect);
650 disk_stat_add(disk, io_ticks, duration);
651 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
652 bio_endio(buf->bio, buf->bio->bi_size, n);
653 mempool_free(buf, d->bufpool);
662 d->sendq_hd = d->sendq_tl = NULL;
664 spin_unlock_irqrestore(&d->lock, flags);
669 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
673 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
679 * Since we only call this in one place (and it only prepares one frame)
680 * we just return the skb. Usually we'd chain it up to the aoedev sendq.
682 static struct sk_buff *
683 aoecmd_ata_id(struct aoedev *d)
686 struct aoe_atahdr *ah;
692 printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
696 /* initialize the headers & frame */
698 h = (struct aoe_hdr *) skb->mac.raw;
699 ah = (struct aoe_atahdr *) (h+1);
700 skb_put(skb, sizeof *h + sizeof *ah);
701 memset(h, 0, skb->len);
702 f->tag = aoehdr_atainit(d, h);
705 /* set up ata header */
707 ah->cmdstat = WIN_IDENTIFY;
712 d->rttavg = MAXTIMER;
713 d->timer.function = rexmit_timer;
715 return skb_clone(skb, GFP_ATOMIC);
719 aoecmd_cfg_rsp(struct sk_buff *skb)
723 struct aoe_cfghdr *ch;
724 ulong flags, sysminor, aoemajor;
726 enum { MAXFRAMES = 16 };
729 h = (struct aoe_hdr *) skb->mac.raw;
730 ch = (struct aoe_cfghdr *) (h+1);
733 * Enough people have their dip switches set backwards to
734 * warrant a loud message for this special case.
736 aoemajor = be16_to_cpu(get_unaligned(&h->major));
737 if (aoemajor == 0xfff) {
738 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
739 "Check shelf dip switches.\n");
743 sysminor = SYSMINOR(aoemajor, h->minor);
744 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
745 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
746 aoemajor, (int) h->minor);
750 n = be16_to_cpu(ch->bufcnt);
751 if (n > MAXFRAMES) /* keep it reasonable */
754 d = aoedev_by_sysminor_m(sysminor, n);
756 printk(KERN_INFO "aoe: device sysminor_m failure\n");
760 spin_lock_irqsave(&d->lock, flags);
762 /* permit device to migrate mac and network interface */
764 memcpy(d->addr, h->src, sizeof d->addr);
765 if (!(d->flags & DEVFL_MAXBCNT)) {
767 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
771 n = n ? n * 512 : DEFAULTBCNT;
772 if (n != d->maxbcnt) {
774 "aoe: e%ld.%ld: setting %d byte data frames on %s\n",
775 d->aoemajor, d->aoeminor, n, d->ifp->name);
780 /* don't change users' perspective */
781 if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
782 spin_unlock_irqrestore(&d->lock, flags);
785 d->flags |= DEVFL_PAUSE; /* force pause */
786 d->mintimer = MINTIMER;
787 d->fw_ver = be16_to_cpu(ch->fwver);
789 /* check for already outstanding ataid */
790 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
792 spin_unlock_irqrestore(&d->lock, flags);