1 /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <asm/unaligned.h>
15 #define TIMERTICK (HZ / 10)
16 #define MINTIMER (2 * TIMERTICK)
17 #define MAXTIMER (HZ << 1)
19 static int aoe_deadsecs = 60 * 3;
20 module_param(aoe_deadsecs, int, 0644);
21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
28 skb = alloc_skb(len, GFP_ATOMIC);
30 skb_reset_mac_header(skb);
31 skb_reset_network_header(skb);
32 skb->protocol = __constant_htons(ETH_P_AOE);
34 skb->next = skb->prev = NULL;
36 /* tell the network layer not to perform IP checksums
37 * or to get the NIC to do it
39 skb->ip_summed = CHECKSUM_NONE;
45 getframe(struct aoedev *d, int tag)
58 * Leave the top bit clear so we have tagspace for userland.
59 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
60 * This driver reserves tag -1 to mean "unused frame."
63 newtag(struct aoedev *d)
68 return n |= (++d->lasttag & 0x7fff) << 16;
72 aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
74 u32 host_tag = newtag(d);
76 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
77 memcpy(h->dst, d->addr, sizeof h->dst);
78 h->type = __constant_cpu_to_be16(ETH_P_AOE);
80 h->major = cpu_to_be16(d->aoemajor);
81 h->minor = d->aoeminor;
83 h->tag = cpu_to_be32(host_tag);
89 put_lba(struct aoe_atahdr *ah, sector_t lba)
100 aoecmd_ata_rw(struct aoedev *d, struct frame *f)
103 struct aoe_atahdr *ah;
107 register sector_t sector;
108 char writebit, extbit;
115 sector = buf->sector;
116 bcnt = buf->bv_resid;
117 if (bcnt > d->maxbcnt)
120 /* initialize the headers & frame */
123 ah = (struct aoe_atahdr *) (h+1);
124 skb_put(skb, sizeof *h + sizeof *ah);
125 memset(h, 0, skb->len);
126 f->tag = aoehdr_atainit(d, h);
129 f->bufaddr = buf->bufaddr;
133 /* set up ata header */
134 ah->scnt = bcnt >> 9;
136 if (d->flags & DEVFL_EXT) {
137 ah->aflags |= AOEAFL_EXT;
141 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
144 if (bio_data_dir(buf->bio) == WRITE) {
145 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
146 offset_in_page(f->bufaddr), bcnt);
147 ah->aflags |= AOEAFL_WRITE;
149 skb->data_len = bcnt;
154 ah->cmdstat = WIN_READ | writebit | extbit;
156 /* mark all tracking fields and load out */
157 buf->nframesout += 1;
158 buf->bufaddr += bcnt;
159 buf->bv_resid -= bcnt;
160 /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
162 buf->sector += bcnt >> 9;
163 if (buf->resid == 0) {
165 } else if (buf->bv_resid == 0) {
167 WARN_ON(buf->bv->bv_len == 0);
168 buf->bv_resid = buf->bv->bv_len;
169 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
173 skb = skb_clone(skb, GFP_ATOMIC);
177 d->sendq_tl->next = skb;
183 /* some callers cannot sleep, and they can call this function,
184 * transmitting the packets later, when interrupts are on
186 static struct sk_buff *
187 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
190 struct aoe_cfghdr *ch;
191 struct sk_buff *skb, *sl, *sl_tail;
192 struct net_device *ifp;
196 read_lock(&dev_base_lock);
197 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
199 if (!is_aoe_netif(ifp))
202 skb = new_skb(sizeof *h + sizeof *ch);
204 printk(KERN_INFO "aoe: skb alloc failure\n");
207 skb_put(skb, sizeof *h + sizeof *ch);
212 memset(h, 0, sizeof *h + sizeof *ch);
214 memset(h->dst, 0xff, sizeof h->dst);
215 memcpy(h->src, ifp->dev_addr, sizeof h->src);
216 h->type = __constant_cpu_to_be16(ETH_P_AOE);
218 h->major = cpu_to_be16(aoemajor);
225 read_unlock(&dev_base_lock);
232 static struct frame *
233 freeframe(struct aoedev *d)
241 if (f->tag != FREETAG)
243 if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
244 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
250 if (n == d->nframes) /* wait for network layer */
251 d->flags |= DEVFL_KICKME;
256 /* enters with d->lock held */
258 aoecmd_work(struct aoedev *d)
263 if (d->flags & DEVFL_PAUSE) {
264 if (!aoedev_isbusy(d))
265 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
266 d->aoeminor, &d->sendq_tl);
274 if (d->inprocess == NULL) {
275 if (list_empty(&d->bufq))
277 buf = container_of(d->bufq.next, struct buf, bufs);
278 list_del(d->bufq.next);
279 /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
287 rexmit(struct aoedev *d, struct frame *f)
291 struct aoe_atahdr *ah;
297 snprintf(buf, sizeof buf,
298 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
300 d->aoemajor, d->aoeminor, f->tag, jiffies, n);
305 ah = (struct aoe_atahdr *) (h+1);
307 h->tag = cpu_to_be32(n);
308 memcpy(h->dst, d->addr, sizeof h->dst);
309 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
311 n = DEFAULTBCNT / 512;
314 if (ah->aflags & AOEAFL_WRITE) {
315 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
316 offset_in_page(f->bufaddr), DEFAULTBCNT);
317 skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
318 skb->data_len = DEFAULTBCNT;
320 if (++d->lostjumbo > (d->nframes << 1))
321 if (d->maxbcnt != DEFAULTBCNT) {
322 printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
323 d->aoemajor, d->aoeminor, d->ifp->name);
324 d->maxbcnt = DEFAULTBCNT;
325 d->flags |= DEVFL_MAXBCNT;
330 skb = skb_clone(skb, GFP_ATOMIC);
334 d->sendq_tl->next = skb;
345 n = jiffies & 0xffff;
353 rexmit_timer(ulong vp)
358 register long timeout;
361 d = (struct aoedev *) vp;
364 /* timeout is always ~150% of the moving average */
366 timeout += timeout >> 1;
368 spin_lock_irqsave(&d->lock, flags);
370 if (d->flags & DEVFL_TKILL) {
371 spin_unlock_irqrestore(&d->lock, flags);
377 if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
378 n = f->waited += timeout;
380 if (n > aoe_deadsecs) { /* waited too long for response */
387 if (d->flags & DEVFL_KICKME) {
388 d->flags &= ~DEVFL_KICKME;
393 d->sendq_hd = d->sendq_tl = NULL;
397 d->rttavg = MAXTIMER;
400 d->timer.expires = jiffies + TIMERTICK;
401 add_timer(&d->timer);
403 spin_unlock_irqrestore(&d->lock, flags);
408 /* this function performs work that has been deferred until sleeping is OK
411 aoecmd_sleepwork(struct work_struct *work)
413 struct aoedev *d = container_of(work, struct aoedev, work);
415 if (d->flags & DEVFL_GDALLOC)
418 if (d->flags & DEVFL_NEWSIZE) {
419 struct block_device *bd;
423 ssize = d->gd->capacity;
424 bd = bdget_disk(d->gd, 0);
427 mutex_lock(&bd->bd_inode->i_mutex);
428 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
429 mutex_unlock(&bd->bd_inode->i_mutex);
432 spin_lock_irqsave(&d->lock, flags);
433 d->flags |= DEVFL_UP;
434 d->flags &= ~DEVFL_NEWSIZE;
435 spin_unlock_irqrestore(&d->lock, flags);
440 ataid_complete(struct aoedev *d, unsigned char *id)
445 /* word 83: command set supported */
446 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
448 /* word 86: command set/feature enabled */
449 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
451 if (n & (1<<10)) { /* bit 10: LBA 48 */
452 d->flags |= DEVFL_EXT;
454 /* word 100: number lba48 sectors */
455 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
457 /* set as in ide-disk.c:init_idedisk_capacity */
458 d->geo.cylinders = ssize;
459 d->geo.cylinders /= (255 * 63);
463 d->flags &= ~DEVFL_EXT;
465 /* number lba28 sectors */
466 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
468 /* NOTE: obsolete in ATA 6 */
469 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
470 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
471 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
474 if (d->ssize != ssize)
475 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
476 (unsigned long long)mac_addr(d->addr),
477 d->aoemajor, d->aoeminor,
478 d->fw_ver, (long long)ssize);
482 d->gd->capacity = ssize;
483 d->flags |= DEVFL_NEWSIZE;
485 if (d->flags & DEVFL_GDALLOC) {
486 printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
487 d->aoemajor, d->aoeminor,
488 "it's already on! This shouldn't happen.\n");
491 d->flags |= DEVFL_GDALLOC;
493 schedule_work(&d->work);
497 calc_rttavg(struct aoedev *d, int rtt)
506 else if (n > MAXTIMER)
508 d->mintimer += (n - d->mintimer) >> 1;
509 } else if (n < d->mintimer)
511 else if (n > MAXTIMER)
514 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
520 aoecmd_ata_rsp(struct sk_buff *skb)
523 struct aoe_hdr *hin, *hout;
524 struct aoe_atahdr *ahin, *ahout;
534 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
535 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
537 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
538 "for unknown device %d.%d\n",
539 aoemajor, hin->minor);
544 spin_lock_irqsave(&d->lock, flags);
546 n = be32_to_cpu(get_unaligned(&hin->tag));
549 calc_rttavg(d, -tsince(n));
550 spin_unlock_irqrestore(&d->lock, flags);
551 snprintf(ebuf, sizeof ebuf,
552 "%15s e%d.%d tag=%08x@%08lx\n",
554 be16_to_cpu(get_unaligned(&hin->major)),
556 be32_to_cpu(get_unaligned(&hin->tag)),
562 calc_rttavg(d, tsince(f->tag));
564 ahin = (struct aoe_atahdr *) (hin+1);
565 hout = aoe_hdr(f->skb);
566 ahout = (struct aoe_atahdr *) (hout+1);
569 if (ahout->cmdstat == WIN_IDENTIFY)
570 d->flags &= ~DEVFL_PAUSE;
571 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
573 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
574 ahout->cmdstat, ahin->cmdstat,
575 d->aoemajor, d->aoeminor);
577 buf->flags |= BUFFL_FAIL;
579 n = ahout->scnt << 9;
580 switch (ahout->cmdstat) {
583 if (skb->len - sizeof *hin - sizeof *ahin < n) {
585 "aoe: runt data size in read. skb->len=%d\n",
587 /* fail frame f? just returning will rexmit. */
588 spin_unlock_irqrestore(&d->lock, flags);
591 memcpy(f->bufaddr, ahin+1, n);
597 put_lba(ahout, f->lba += ahout->scnt);
601 ahout->scnt = n >> 9;
602 if (ahout->aflags & AOEAFL_WRITE) {
603 skb_fill_page_desc(skb, 0,
604 virt_to_page(f->bufaddr),
605 offset_in_page(f->bufaddr), n);
606 skb->len = sizeof *hout + sizeof *ahout + n;
610 hout->tag = cpu_to_be32(f->tag);
612 skb = skb_clone(skb, GFP_ATOMIC);
613 spin_unlock_irqrestore(&d->lock, flags);
622 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
624 "aoe: runt data size in ataid. skb->len=%d\n",
626 spin_unlock_irqrestore(&d->lock, flags);
629 ataid_complete(d, (char *) (ahin+1));
633 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
635 be16_to_cpu(get_unaligned(&hin->major)),
641 buf->nframesout -= 1;
642 if (buf->nframesout == 0 && buf->resid == 0) {
643 unsigned long duration = jiffies - buf->start_time;
644 unsigned long n_sect = buf->bio->bi_size >> 9;
645 struct gendisk *disk = d->gd;
646 const int rw = bio_data_dir(buf->bio);
648 disk_stat_inc(disk, ios[rw]);
649 disk_stat_add(disk, ticks[rw], duration);
650 disk_stat_add(disk, sectors[rw], n_sect);
651 disk_stat_add(disk, io_ticks, duration);
652 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
653 bio_endio(buf->bio, buf->bio->bi_size, n);
654 mempool_free(buf, d->bufpool);
663 d->sendq_hd = d->sendq_tl = NULL;
665 spin_unlock_irqrestore(&d->lock, flags);
670 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
674 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
680 * Since we only call this in one place (and it only prepares one frame)
681 * we just return the skb. Usually we'd chain it up to the aoedev sendq.
683 static struct sk_buff *
684 aoecmd_ata_id(struct aoedev *d)
687 struct aoe_atahdr *ah;
693 printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
697 /* initialize the headers & frame */
700 ah = (struct aoe_atahdr *) (h+1);
701 skb_put(skb, sizeof *h + sizeof *ah);
702 memset(h, 0, skb->len);
703 f->tag = aoehdr_atainit(d, h);
706 /* set up ata header */
708 ah->cmdstat = WIN_IDENTIFY;
713 d->rttavg = MAXTIMER;
714 d->timer.function = rexmit_timer;
716 return skb_clone(skb, GFP_ATOMIC);
720 aoecmd_cfg_rsp(struct sk_buff *skb)
724 struct aoe_cfghdr *ch;
725 ulong flags, sysminor, aoemajor;
727 enum { MAXFRAMES = 16 };
731 ch = (struct aoe_cfghdr *) (h+1);
734 * Enough people have their dip switches set backwards to
735 * warrant a loud message for this special case.
737 aoemajor = be16_to_cpu(get_unaligned(&h->major));
738 if (aoemajor == 0xfff) {
739 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
740 "Check shelf dip switches.\n");
744 sysminor = SYSMINOR(aoemajor, h->minor);
745 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
746 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
747 aoemajor, (int) h->minor);
751 n = be16_to_cpu(ch->bufcnt);
752 if (n > MAXFRAMES) /* keep it reasonable */
755 d = aoedev_by_sysminor_m(sysminor, n);
757 printk(KERN_INFO "aoe: device sysminor_m failure\n");
761 spin_lock_irqsave(&d->lock, flags);
763 /* permit device to migrate mac and network interface */
765 memcpy(d->addr, h->src, sizeof d->addr);
766 if (!(d->flags & DEVFL_MAXBCNT)) {
768 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
772 n = n ? n * 512 : DEFAULTBCNT;
773 if (n != d->maxbcnt) {
775 "aoe: e%ld.%ld: setting %d byte data frames on %s\n",
776 d->aoemajor, d->aoeminor, n, d->ifp->name);
781 /* don't change users' perspective */
782 if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
783 spin_unlock_irqrestore(&d->lock, flags);
786 d->flags |= DEVFL_PAUSE; /* force pause */
787 d->mintimer = MINTIMER;
788 d->fw_ver = be16_to_cpu(ch->fwver);
790 /* check for already outstanding ataid */
791 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
793 spin_unlock_irqrestore(&d->lock, flags);