aoecmd.c revision 459a98ed881802dee55897441bc7f77af614368e
1/* Copyright (c) 2006 Coraid, Inc.  See COPYING for GPL terms. */
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
11#include <linux/genhd.h>
12#include <asm/unaligned.h>
13#include "aoe.h"
14
15#define TIMERTICK (HZ / 10)
16#define MINTIMER (2 * TIMERTICK)
17#define MAXTIMER (HZ << 1)
18
19static int aoe_deadsecs = 60 * 3;
20module_param(aoe_deadsecs, int, 0644);
21MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
22
23struct sk_buff *
24new_skb(ulong len)
25{
26	struct sk_buff *skb;
27
28	skb = alloc_skb(len, GFP_ATOMIC);
29	if (skb) {
30		skb_reset_mac_header(skb);
31		skb->nh.raw = skb->data;
32		skb->protocol = __constant_htons(ETH_P_AOE);
33		skb->priority = 0;
34		skb->next = skb->prev = NULL;
35
36		/* tell the network layer not to perform IP checksums
37		 * or to get the NIC to do it
38		 */
39		skb->ip_summed = CHECKSUM_NONE;
40	}
41	return skb;
42}
43
44static struct frame *
45getframe(struct aoedev *d, int tag)
46{
47	struct frame *f, *e;
48
49	f = d->frames;
50	e = f + d->nframes;
51	for (; f<e; f++)
52		if (f->tag == tag)
53			return f;
54	return NULL;
55}
56
57/*
58 * Leave the top bit clear so we have tagspace for userland.
59 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
60 * This driver reserves tag -1 to mean "unused frame."
61 */
62static int
63newtag(struct aoedev *d)
64{
65	register ulong n;
66
67	n = jiffies & 0xffff;
68	return n |= (++d->lasttag & 0x7fff) << 16;
69}
70
71static int
72aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
73{
74	u32 host_tag = newtag(d);
75
76	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
77	memcpy(h->dst, d->addr, sizeof h->dst);
78	h->type = __constant_cpu_to_be16(ETH_P_AOE);
79	h->verfl = AOE_HVER;
80	h->major = cpu_to_be16(d->aoemajor);
81	h->minor = d->aoeminor;
82	h->cmd = AOECMD_ATA;
83	h->tag = cpu_to_be32(host_tag);
84
85	return host_tag;
86}
87
88static inline void
89put_lba(struct aoe_atahdr *ah, sector_t lba)
90{
91	ah->lba0 = lba;
92	ah->lba1 = lba >>= 8;
93	ah->lba2 = lba >>= 8;
94	ah->lba3 = lba >>= 8;
95	ah->lba4 = lba >>= 8;
96	ah->lba5 = lba >>= 8;
97}
98
99static void
100aoecmd_ata_rw(struct aoedev *d, struct frame *f)
101{
102	struct aoe_hdr *h;
103	struct aoe_atahdr *ah;
104	struct buf *buf;
105	struct sk_buff *skb;
106	ulong bcnt;
107	register sector_t sector;
108	char writebit, extbit;
109
110	writebit = 0x10;
111	extbit = 0x4;
112
113	buf = d->inprocess;
114
115	sector = buf->sector;
116	bcnt = buf->bv_resid;
117	if (bcnt > d->maxbcnt)
118		bcnt = d->maxbcnt;
119
120	/* initialize the headers & frame */
121	skb = f->skb;
122	h = aoe_hdr(skb);
123	ah = (struct aoe_atahdr *) (h+1);
124	skb_put(skb, sizeof *h + sizeof *ah);
125	memset(h, 0, skb->len);
126	f->tag = aoehdr_atainit(d, h);
127	f->waited = 0;
128	f->buf = buf;
129	f->bufaddr = buf->bufaddr;
130	f->bcnt = bcnt;
131	f->lba = sector;
132
133	/* set up ata header */
134	ah->scnt = bcnt >> 9;
135	put_lba(ah, sector);
136	if (d->flags & DEVFL_EXT) {
137		ah->aflags |= AOEAFL_EXT;
138	} else {
139		extbit = 0;
140		ah->lba3 &= 0x0f;
141		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
142	}
143
144	if (bio_data_dir(buf->bio) == WRITE) {
145		skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
146			offset_in_page(f->bufaddr), bcnt);
147		ah->aflags |= AOEAFL_WRITE;
148		skb->len += bcnt;
149		skb->data_len = bcnt;
150	} else {
151		writebit = 0;
152	}
153
154	ah->cmdstat = WIN_READ | writebit | extbit;
155
156	/* mark all tracking fields and load out */
157	buf->nframesout += 1;
158	buf->bufaddr += bcnt;
159	buf->bv_resid -= bcnt;
160/* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
161	buf->resid -= bcnt;
162	buf->sector += bcnt >> 9;
163	if (buf->resid == 0) {
164		d->inprocess = NULL;
165	} else if (buf->bv_resid == 0) {
166		buf->bv++;
167		WARN_ON(buf->bv->bv_len == 0);
168		buf->bv_resid = buf->bv->bv_len;
169		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
170	}
171
172	skb->dev = d->ifp;
173	skb = skb_clone(skb, GFP_ATOMIC);
174	if (skb == NULL)
175		return;
176	if (d->sendq_hd)
177		d->sendq_tl->next = skb;
178	else
179		d->sendq_hd = skb;
180	d->sendq_tl = skb;
181}
182
183/* some callers cannot sleep, and they can call this function,
184 * transmitting the packets later, when interrupts are on
185 */
186static struct sk_buff *
187aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
188{
189	struct aoe_hdr *h;
190	struct aoe_cfghdr *ch;
191	struct sk_buff *skb, *sl, *sl_tail;
192	struct net_device *ifp;
193
194	sl = sl_tail = NULL;
195
196	read_lock(&dev_base_lock);
197	for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
198		dev_hold(ifp);
199		if (!is_aoe_netif(ifp))
200			continue;
201
202		skb = new_skb(sizeof *h + sizeof *ch);
203		if (skb == NULL) {
204			printk(KERN_INFO "aoe: skb alloc failure\n");
205			continue;
206		}
207		skb_put(skb, sizeof *h + sizeof *ch);
208		skb->dev = ifp;
209		if (sl_tail == NULL)
210			sl_tail = skb;
211		h = aoe_hdr(skb);
212		memset(h, 0, sizeof *h + sizeof *ch);
213
214		memset(h->dst, 0xff, sizeof h->dst);
215		memcpy(h->src, ifp->dev_addr, sizeof h->src);
216		h->type = __constant_cpu_to_be16(ETH_P_AOE);
217		h->verfl = AOE_HVER;
218		h->major = cpu_to_be16(aoemajor);
219		h->minor = aoeminor;
220		h->cmd = AOECMD_CFG;
221
222		skb->next = sl;
223		sl = skb;
224	}
225	read_unlock(&dev_base_lock);
226
227	if (tail != NULL)
228		*tail = sl_tail;
229	return sl;
230}
231
232static struct frame *
233freeframe(struct aoedev *d)
234{
235	struct frame *f, *e;
236	int n = 0;
237
238	f = d->frames;
239	e = f + d->nframes;
240	for (; f<e; f++) {
241		if (f->tag != FREETAG)
242			continue;
243		if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
244			skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
245			skb_trim(f->skb, 0);
246			return f;
247		}
248		n++;
249	}
250	if (n == d->nframes)	/* wait for network layer */
251		d->flags |= DEVFL_KICKME;
252
253	return NULL;
254}
255
256/* enters with d->lock held */
257void
258aoecmd_work(struct aoedev *d)
259{
260	struct frame *f;
261	struct buf *buf;
262
263	if (d->flags & DEVFL_PAUSE) {
264		if (!aoedev_isbusy(d))
265			d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
266						d->aoeminor, &d->sendq_tl);
267		return;
268	}
269
270loop:
271	f = freeframe(d);
272	if (f == NULL)
273		return;
274	if (d->inprocess == NULL) {
275		if (list_empty(&d->bufq))
276			return;
277		buf = container_of(d->bufq.next, struct buf, bufs);
278		list_del(d->bufq.next);
279/*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
280		d->inprocess = buf;
281	}
282	aoecmd_ata_rw(d, f);
283	goto loop;
284}
285
286static void
287rexmit(struct aoedev *d, struct frame *f)
288{
289	struct sk_buff *skb;
290	struct aoe_hdr *h;
291	struct aoe_atahdr *ah;
292	char buf[128];
293	u32 n;
294
295	n = newtag(d);
296
297	snprintf(buf, sizeof buf,
298		"%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
299		"retransmit",
300		d->aoemajor, d->aoeminor, f->tag, jiffies, n);
301	aoechr_error(buf);
302
303	skb = f->skb;
304	h = aoe_hdr(skb);
305	ah = (struct aoe_atahdr *) (h+1);
306	f->tag = n;
307	h->tag = cpu_to_be32(n);
308	memcpy(h->dst, d->addr, sizeof h->dst);
309	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
310
311	n = DEFAULTBCNT / 512;
312	if (ah->scnt > n) {
313		ah->scnt = n;
314		if (ah->aflags & AOEAFL_WRITE) {
315			skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
316				offset_in_page(f->bufaddr), DEFAULTBCNT);
317			skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
318			skb->data_len = DEFAULTBCNT;
319		}
320		if (++d->lostjumbo > (d->nframes << 1))
321		if (d->maxbcnt != DEFAULTBCNT) {
322			printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
323				d->aoemajor, d->aoeminor, d->ifp->name);
324			d->maxbcnt = DEFAULTBCNT;
325			d->flags |= DEVFL_MAXBCNT;
326		}
327	}
328
329	skb->dev = d->ifp;
330	skb = skb_clone(skb, GFP_ATOMIC);
331	if (skb == NULL)
332		return;
333	if (d->sendq_hd)
334		d->sendq_tl->next = skb;
335	else
336		d->sendq_hd = skb;
337	d->sendq_tl = skb;
338}
339
340static int
341tsince(int tag)
342{
343	int n;
344
345	n = jiffies & 0xffff;
346	n -= tag & 0xffff;
347	if (n < 0)
348		n += 1<<16;
349	return n;
350}
351
352static void
353rexmit_timer(ulong vp)
354{
355	struct aoedev *d;
356	struct frame *f, *e;
357	struct sk_buff *sl;
358	register long timeout;
359	ulong flags, n;
360
361	d = (struct aoedev *) vp;
362	sl = NULL;
363
364	/* timeout is always ~150% of the moving average */
365	timeout = d->rttavg;
366	timeout += timeout >> 1;
367
368	spin_lock_irqsave(&d->lock, flags);
369
370	if (d->flags & DEVFL_TKILL) {
371		spin_unlock_irqrestore(&d->lock, flags);
372		return;
373	}
374	f = d->frames;
375	e = f + d->nframes;
376	for (; f<e; f++) {
377		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
378			n = f->waited += timeout;
379			n /= HZ;
380			if (n > aoe_deadsecs) { /* waited too long for response */
381				aoedev_downdev(d);
382				break;
383			}
384			rexmit(d, f);
385		}
386	}
387	if (d->flags & DEVFL_KICKME) {
388		d->flags &= ~DEVFL_KICKME;
389		aoecmd_work(d);
390	}
391
392	sl = d->sendq_hd;
393	d->sendq_hd = d->sendq_tl = NULL;
394	if (sl) {
395		n = d->rttavg <<= 1;
396		if (n > MAXTIMER)
397			d->rttavg = MAXTIMER;
398	}
399
400	d->timer.expires = jiffies + TIMERTICK;
401	add_timer(&d->timer);
402
403	spin_unlock_irqrestore(&d->lock, flags);
404
405	aoenet_xmit(sl);
406}
407
408/* this function performs work that has been deferred until sleeping is OK
409 */
410void
411aoecmd_sleepwork(struct work_struct *work)
412{
413	struct aoedev *d = container_of(work, struct aoedev, work);
414
415	if (d->flags & DEVFL_GDALLOC)
416		aoeblk_gdalloc(d);
417
418	if (d->flags & DEVFL_NEWSIZE) {
419		struct block_device *bd;
420		unsigned long flags;
421		u64 ssize;
422
423		ssize = d->gd->capacity;
424		bd = bdget_disk(d->gd, 0);
425
426		if (bd) {
427			mutex_lock(&bd->bd_inode->i_mutex);
428			i_size_write(bd->bd_inode, (loff_t)ssize<<9);
429			mutex_unlock(&bd->bd_inode->i_mutex);
430			bdput(bd);
431		}
432		spin_lock_irqsave(&d->lock, flags);
433		d->flags |= DEVFL_UP;
434		d->flags &= ~DEVFL_NEWSIZE;
435		spin_unlock_irqrestore(&d->lock, flags);
436	}
437}
438
439static void
440ataid_complete(struct aoedev *d, unsigned char *id)
441{
442	u64 ssize;
443	u16 n;
444
445	/* word 83: command set supported */
446	n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
447
448	/* word 86: command set/feature enabled */
449	n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
450
451	if (n & (1<<10)) {	/* bit 10: LBA 48 */
452		d->flags |= DEVFL_EXT;
453
454		/* word 100: number lba48 sectors */
455		ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
456
457		/* set as in ide-disk.c:init_idedisk_capacity */
458		d->geo.cylinders = ssize;
459		d->geo.cylinders /= (255 * 63);
460		d->geo.heads = 255;
461		d->geo.sectors = 63;
462	} else {
463		d->flags &= ~DEVFL_EXT;
464
465		/* number lba28 sectors */
466		ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
467
468		/* NOTE: obsolete in ATA 6 */
469		d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
470		d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
471		d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
472	}
473
474	if (d->ssize != ssize)
475		printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
476			(unsigned long long)mac_addr(d->addr),
477			d->aoemajor, d->aoeminor,
478			d->fw_ver, (long long)ssize);
479	d->ssize = ssize;
480	d->geo.start = 0;
481	if (d->gd != NULL) {
482		d->gd->capacity = ssize;
483		d->flags |= DEVFL_NEWSIZE;
484	} else {
485		if (d->flags & DEVFL_GDALLOC) {
486			printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
487			       d->aoemajor, d->aoeminor,
488			       "it's already on!  This shouldn't happen.\n");
489			return;
490		}
491		d->flags |= DEVFL_GDALLOC;
492	}
493	schedule_work(&d->work);
494}
495
496static void
497calc_rttavg(struct aoedev *d, int rtt)
498{
499	register long n;
500
501	n = rtt;
502	if (n < 0) {
503		n = -rtt;
504		if (n < MINTIMER)
505			n = MINTIMER;
506		else if (n > MAXTIMER)
507			n = MAXTIMER;
508		d->mintimer += (n - d->mintimer) >> 1;
509	} else if (n < d->mintimer)
510		n = d->mintimer;
511	else if (n > MAXTIMER)
512		n = MAXTIMER;
513
514	/* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
515	n -= d->rttavg;
516	d->rttavg += n >> 2;
517}
518
519void
520aoecmd_ata_rsp(struct sk_buff *skb)
521{
522	struct aoedev *d;
523	struct aoe_hdr *hin, *hout;
524	struct aoe_atahdr *ahin, *ahout;
525	struct frame *f;
526	struct buf *buf;
527	struct sk_buff *sl;
528	register long n;
529	ulong flags;
530	char ebuf[128];
531	u16 aoemajor;
532
533	hin = aoe_hdr(skb);
534	aoemajor = be16_to_cpu(get_unaligned(&hin->major));
535	d = aoedev_by_aoeaddr(aoemajor, hin->minor);
536	if (d == NULL) {
537		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
538			"for unknown device %d.%d\n",
539			 aoemajor, hin->minor);
540		aoechr_error(ebuf);
541		return;
542	}
543
544	spin_lock_irqsave(&d->lock, flags);
545
546	n = be32_to_cpu(get_unaligned(&hin->tag));
547	f = getframe(d, n);
548	if (f == NULL) {
549		calc_rttavg(d, -tsince(n));
550		spin_unlock_irqrestore(&d->lock, flags);
551		snprintf(ebuf, sizeof ebuf,
552			"%15s e%d.%d    tag=%08x@%08lx\n",
553			"unexpected rsp",
554			be16_to_cpu(get_unaligned(&hin->major)),
555			hin->minor,
556			be32_to_cpu(get_unaligned(&hin->tag)),
557			jiffies);
558		aoechr_error(ebuf);
559		return;
560	}
561
562	calc_rttavg(d, tsince(f->tag));
563
564	ahin = (struct aoe_atahdr *) (hin+1);
565	hout = aoe_hdr(f->skb);
566	ahout = (struct aoe_atahdr *) (hout+1);
567	buf = f->buf;
568
569	if (ahout->cmdstat == WIN_IDENTIFY)
570		d->flags &= ~DEVFL_PAUSE;
571	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
572		printk(KERN_ERR
573			"aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
574			ahout->cmdstat, ahin->cmdstat,
575			d->aoemajor, d->aoeminor);
576		if (buf)
577			buf->flags |= BUFFL_FAIL;
578	} else {
579		n = ahout->scnt << 9;
580		switch (ahout->cmdstat) {
581		case WIN_READ:
582		case WIN_READ_EXT:
583			if (skb->len - sizeof *hin - sizeof *ahin < n) {
584				printk(KERN_ERR
585					"aoe: runt data size in read.  skb->len=%d\n",
586					skb->len);
587				/* fail frame f?  just returning will rexmit. */
588				spin_unlock_irqrestore(&d->lock, flags);
589				return;
590			}
591			memcpy(f->bufaddr, ahin+1, n);
592		case WIN_WRITE:
593		case WIN_WRITE_EXT:
594			if (f->bcnt -= n) {
595				skb = f->skb;
596				f->bufaddr += n;
597				put_lba(ahout, f->lba += ahout->scnt);
598				n = f->bcnt;
599				if (n > DEFAULTBCNT)
600					n = DEFAULTBCNT;
601				ahout->scnt = n >> 9;
602				if (ahout->aflags & AOEAFL_WRITE) {
603					skb_fill_page_desc(skb, 0,
604						virt_to_page(f->bufaddr),
605						offset_in_page(f->bufaddr), n);
606					skb->len = sizeof *hout + sizeof *ahout + n;
607					skb->data_len = n;
608				}
609				f->tag = newtag(d);
610				hout->tag = cpu_to_be32(f->tag);
611				skb->dev = d->ifp;
612				skb = skb_clone(skb, GFP_ATOMIC);
613				spin_unlock_irqrestore(&d->lock, flags);
614				if (skb)
615					aoenet_xmit(skb);
616				return;
617			}
618			if (n > DEFAULTBCNT)
619				d->lostjumbo = 0;
620			break;
621		case WIN_IDENTIFY:
622			if (skb->len - sizeof *hin - sizeof *ahin < 512) {
623				printk(KERN_INFO
624					"aoe: runt data size in ataid.  skb->len=%d\n",
625					skb->len);
626				spin_unlock_irqrestore(&d->lock, flags);
627				return;
628			}
629			ataid_complete(d, (char *) (ahin+1));
630			break;
631		default:
632			printk(KERN_INFO
633				"aoe: unrecognized ata command %2.2Xh for %d.%d\n",
634				ahout->cmdstat,
635				be16_to_cpu(get_unaligned(&hin->major)),
636				hin->minor);
637		}
638	}
639
640	if (buf) {
641		buf->nframesout -= 1;
642		if (buf->nframesout == 0 && buf->resid == 0) {
643			unsigned long duration = jiffies - buf->start_time;
644			unsigned long n_sect = buf->bio->bi_size >> 9;
645			struct gendisk *disk = d->gd;
646			const int rw = bio_data_dir(buf->bio);
647
648			disk_stat_inc(disk, ios[rw]);
649			disk_stat_add(disk, ticks[rw], duration);
650			disk_stat_add(disk, sectors[rw], n_sect);
651			disk_stat_add(disk, io_ticks, duration);
652			n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
653			bio_endio(buf->bio, buf->bio->bi_size, n);
654			mempool_free(buf, d->bufpool);
655		}
656	}
657
658	f->buf = NULL;
659	f->tag = FREETAG;
660
661	aoecmd_work(d);
662	sl = d->sendq_hd;
663	d->sendq_hd = d->sendq_tl = NULL;
664
665	spin_unlock_irqrestore(&d->lock, flags);
666	aoenet_xmit(sl);
667}
668
669void
670aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
671{
672	struct sk_buff *sl;
673
674	sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
675
676	aoenet_xmit(sl);
677}
678
679/*
680 * Since we only call this in one place (and it only prepares one frame)
681 * we just return the skb.  Usually we'd chain it up to the aoedev sendq.
682 */
683static struct sk_buff *
684aoecmd_ata_id(struct aoedev *d)
685{
686	struct aoe_hdr *h;
687	struct aoe_atahdr *ah;
688	struct frame *f;
689	struct sk_buff *skb;
690
691	f = freeframe(d);
692	if (f == NULL) {
693		printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
694		return NULL;
695	}
696
697	/* initialize the headers & frame */
698	skb = f->skb;
699	h = aoe_hdr(skb);
700	ah = (struct aoe_atahdr *) (h+1);
701	skb_put(skb, sizeof *h + sizeof *ah);
702	memset(h, 0, skb->len);
703	f->tag = aoehdr_atainit(d, h);
704	f->waited = 0;
705
706	/* set up ata header */
707	ah->scnt = 1;
708	ah->cmdstat = WIN_IDENTIFY;
709	ah->lba3 = 0xa0;
710
711	skb->dev = d->ifp;
712
713	d->rttavg = MAXTIMER;
714	d->timer.function = rexmit_timer;
715
716	return skb_clone(skb, GFP_ATOMIC);
717}
718
719void
720aoecmd_cfg_rsp(struct sk_buff *skb)
721{
722	struct aoedev *d;
723	struct aoe_hdr *h;
724	struct aoe_cfghdr *ch;
725	ulong flags, sysminor, aoemajor;
726	struct sk_buff *sl;
727	enum { MAXFRAMES = 16 };
728	u16 n;
729
730	h = aoe_hdr(skb);
731	ch = (struct aoe_cfghdr *) (h+1);
732
733	/*
734	 * Enough people have their dip switches set backwards to
735	 * warrant a loud message for this special case.
736	 */
737	aoemajor = be16_to_cpu(get_unaligned(&h->major));
738	if (aoemajor == 0xfff) {
739		printk(KERN_ERR "aoe: Warning: shelf address is all ones.  "
740			"Check shelf dip switches.\n");
741		return;
742	}
743
744	sysminor = SYSMINOR(aoemajor, h->minor);
745	if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
746		printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
747			aoemajor, (int) h->minor);
748		return;
749	}
750
751	n = be16_to_cpu(ch->bufcnt);
752	if (n > MAXFRAMES)	/* keep it reasonable */
753		n = MAXFRAMES;
754
755	d = aoedev_by_sysminor_m(sysminor, n);
756	if (d == NULL) {
757		printk(KERN_INFO "aoe: device sysminor_m failure\n");
758		return;
759	}
760
761	spin_lock_irqsave(&d->lock, flags);
762
763	/* permit device to migrate mac and network interface */
764	d->ifp = skb->dev;
765	memcpy(d->addr, h->src, sizeof d->addr);
766	if (!(d->flags & DEVFL_MAXBCNT)) {
767		n = d->ifp->mtu;
768		n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
769		n /= 512;
770		if (n > ch->scnt)
771			n = ch->scnt;
772		n = n ? n * 512 : DEFAULTBCNT;
773		if (n != d->maxbcnt) {
774			printk(KERN_INFO
775				"aoe: e%ld.%ld: setting %d byte data frames on %s\n",
776				d->aoemajor, d->aoeminor, n, d->ifp->name);
777			d->maxbcnt = n;
778		}
779	}
780
781	/* don't change users' perspective */
782	if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
783		spin_unlock_irqrestore(&d->lock, flags);
784		return;
785	}
786	d->flags |= DEVFL_PAUSE;	/* force pause */
787	d->mintimer = MINTIMER;
788	d->fw_ver = be16_to_cpu(ch->fwver);
789
790	/* check for already outstanding ataid */
791	sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
792
793	spin_unlock_irqrestore(&d->lock, flags);
794
795	aoenet_xmit(sl);
796}
797
798