aoecmd.c revision b751e8b6590efdb76e1682c85bfcd5f3531ccae4
1/* Copyright (c) 2006 Coraid, Inc.  See COPYING for GPL terms. */
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
11#include <linux/genhd.h>
12#include <asm/unaligned.h>
13#include "aoe.h"
14
15#define TIMERTICK (HZ / 10)
16#define MINTIMER (2 * TIMERTICK)
17#define MAXTIMER (HZ << 1)
18
19static int aoe_deadsecs = 60 * 3;
20module_param(aoe_deadsecs, int, 0644);
21MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
22
23struct sk_buff *
24new_skb(ulong len)
25{
26	struct sk_buff *skb;
27
28	skb = alloc_skb(len, GFP_ATOMIC);
29	if (skb) {
30		skb->nh.raw = skb->mac.raw = skb->data;
31		skb->protocol = __constant_htons(ETH_P_AOE);
32		skb->priority = 0;
33		skb_put(skb, len);
34		memset(skb->head, 0, len);
35		skb->next = skb->prev = NULL;
36
37		/* tell the network layer not to perform IP checksums
38		 * or to get the NIC to do it
39		 */
40		skb->ip_summed = CHECKSUM_NONE;
41	}
42	return skb;
43}
44
45static struct frame *
46getframe(struct aoedev *d, int tag)
47{
48	struct frame *f, *e;
49
50	f = d->frames;
51	e = f + d->nframes;
52	for (; f<e; f++)
53		if (f->tag == tag)
54			return f;
55	return NULL;
56}
57
58/*
59 * Leave the top bit clear so we have tagspace for userland.
60 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
61 * This driver reserves tag -1 to mean "unused frame."
62 */
63static int
64newtag(struct aoedev *d)
65{
66	register ulong n;
67
68	n = jiffies & 0xffff;
69	return n |= (++d->lasttag & 0x7fff) << 16;
70}
71
72static int
73aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
74{
75	u32 host_tag = newtag(d);
76
77	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
78	memcpy(h->dst, d->addr, sizeof h->dst);
79	h->type = __constant_cpu_to_be16(ETH_P_AOE);
80	h->verfl = AOE_HVER;
81	h->major = cpu_to_be16(d->aoemajor);
82	h->minor = d->aoeminor;
83	h->cmd = AOECMD_ATA;
84	h->tag = cpu_to_be32(host_tag);
85
86	return host_tag;
87}
88
89static inline void
90put_lba(struct aoe_atahdr *ah, sector_t lba)
91{
92	ah->lba0 = lba;
93	ah->lba1 = lba >>= 8;
94	ah->lba2 = lba >>= 8;
95	ah->lba3 = lba >>= 8;
96	ah->lba4 = lba >>= 8;
97	ah->lba5 = lba >>= 8;
98}
99
100static void
101aoecmd_ata_rw(struct aoedev *d, struct frame *f)
102{
103	struct aoe_hdr *h;
104	struct aoe_atahdr *ah;
105	struct buf *buf;
106	struct sk_buff *skb;
107	ulong bcnt;
108	register sector_t sector;
109	char writebit, extbit;
110
111	writebit = 0x10;
112	extbit = 0x4;
113
114	buf = d->inprocess;
115
116	sector = buf->sector;
117	bcnt = buf->bv_resid;
118	if (bcnt > d->maxbcnt)
119		bcnt = d->maxbcnt;
120
121	/* initialize the headers & frame */
122	skb = f->skb;
123	h = (struct aoe_hdr *) skb->mac.raw;
124	ah = (struct aoe_atahdr *) (h+1);
125	skb->len = sizeof *h + sizeof *ah;
126	memset(h, 0, ETH_ZLEN);
127	f->tag = aoehdr_atainit(d, h);
128	f->waited = 0;
129	f->buf = buf;
130	f->bufaddr = buf->bufaddr;
131	f->bcnt = bcnt;
132	f->lba = sector;
133
134	/* set up ata header */
135	ah->scnt = bcnt >> 9;
136	put_lba(ah, sector);
137	if (d->flags & DEVFL_EXT) {
138		ah->aflags |= AOEAFL_EXT;
139	} else {
140		extbit = 0;
141		ah->lba3 &= 0x0f;
142		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
143	}
144
145	if (bio_data_dir(buf->bio) == WRITE) {
146		skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
147			offset_in_page(f->bufaddr), bcnt);
148		ah->aflags |= AOEAFL_WRITE;
149		skb->len += bcnt;
150		skb->data_len = bcnt;
151	} else {
152		skb->len = ETH_ZLEN;
153		writebit = 0;
154	}
155
156	ah->cmdstat = WIN_READ | writebit | extbit;
157
158	/* mark all tracking fields and load out */
159	buf->nframesout += 1;
160	buf->bufaddr += bcnt;
161	buf->bv_resid -= bcnt;
162/* dprintk("bv_resid=%ld\n", buf->bv_resid); */
163	buf->resid -= bcnt;
164	buf->sector += bcnt >> 9;
165	if (buf->resid == 0) {
166		d->inprocess = NULL;
167	} else if (buf->bv_resid == 0) {
168		buf->bv++;
169		buf->bv_resid = buf->bv->bv_len;
170		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
171	}
172
173	skb->dev = d->ifp;
174	skb = skb_clone(skb, GFP_ATOMIC);
175	if (skb == NULL)
176		return;
177	if (d->sendq_hd)
178		d->sendq_tl->next = skb;
179	else
180		d->sendq_hd = skb;
181	d->sendq_tl = skb;
182}
183
184/* some callers cannot sleep, and they can call this function,
185 * transmitting the packets later, when interrupts are on
186 */
187static struct sk_buff *
188aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
189{
190	struct aoe_hdr *h;
191	struct aoe_cfghdr *ch;
192	struct sk_buff *skb, *sl, *sl_tail;
193	struct net_device *ifp;
194
195	sl = sl_tail = NULL;
196
197	read_lock(&dev_base_lock);
198	for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
199		dev_hold(ifp);
200		if (!is_aoe_netif(ifp))
201			continue;
202
203		skb = new_skb(sizeof *h + sizeof *ch);
204		if (skb == NULL) {
205			iprintk("skb alloc failure\n");
206			continue;
207		}
208		skb->dev = ifp;
209		if (sl_tail == NULL)
210			sl_tail = skb;
211		h = (struct aoe_hdr *) skb->mac.raw;
212		memset(h, 0, sizeof *h + sizeof *ch);
213
214		memset(h->dst, 0xff, sizeof h->dst);
215		memcpy(h->src, ifp->dev_addr, sizeof h->src);
216		h->type = __constant_cpu_to_be16(ETH_P_AOE);
217		h->verfl = AOE_HVER;
218		h->major = cpu_to_be16(aoemajor);
219		h->minor = aoeminor;
220		h->cmd = AOECMD_CFG;
221
222		skb->next = sl;
223		sl = skb;
224	}
225	read_unlock(&dev_base_lock);
226
227	if (tail != NULL)
228		*tail = sl_tail;
229	return sl;
230}
231
232static struct frame *
233freeframe(struct aoedev *d)
234{
235	struct frame *f, *e;
236	int n = 0;
237
238	f = d->frames;
239	e = f + d->nframes;
240	for (; f<e; f++) {
241		if (f->tag != FREETAG)
242			continue;
243		if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
244			skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
245			return f;
246		}
247		n++;
248	}
249	if (n == d->nframes)	/* wait for network layer */
250		d->flags |= DEVFL_KICKME;
251
252	return NULL;
253}
254
255/* enters with d->lock held */
256void
257aoecmd_work(struct aoedev *d)
258{
259	struct frame *f;
260	struct buf *buf;
261
262	if (d->flags & DEVFL_PAUSE) {
263		if (!aoedev_isbusy(d))
264			d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
265						d->aoeminor, &d->sendq_tl);
266		return;
267	}
268
269loop:
270	f = freeframe(d);
271	if (f == NULL)
272		return;
273	if (d->inprocess == NULL) {
274		if (list_empty(&d->bufq))
275			return;
276		buf = container_of(d->bufq.next, struct buf, bufs);
277		list_del(d->bufq.next);
278/*dprintk("bi_size=%ld\n", buf->bio->bi_size); */
279		d->inprocess = buf;
280	}
281	aoecmd_ata_rw(d, f);
282	goto loop;
283}
284
285static void
286rexmit(struct aoedev *d, struct frame *f)
287{
288	struct sk_buff *skb;
289	struct aoe_hdr *h;
290	struct aoe_atahdr *ah;
291	char buf[128];
292	u32 n;
293
294	n = newtag(d);
295
296	snprintf(buf, sizeof buf,
297		"%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
298		"retransmit",
299		d->aoemajor, d->aoeminor, f->tag, jiffies, n);
300	aoechr_error(buf);
301
302	skb = f->skb;
303	h = (struct aoe_hdr *) skb->mac.raw;
304	ah = (struct aoe_atahdr *) (h+1);
305	f->tag = n;
306	h->tag = cpu_to_be32(n);
307	memcpy(h->dst, d->addr, sizeof h->dst);
308	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
309
310	n = DEFAULTBCNT / 512;
311	if (ah->scnt > n) {
312		ah->scnt = n;
313		if (ah->aflags & AOEAFL_WRITE) {
314			skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
315				offset_in_page(f->bufaddr), DEFAULTBCNT);
316			skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
317			skb->data_len = DEFAULTBCNT;
318		}
319		if (++d->lostjumbo > (d->nframes << 1))
320		if (d->maxbcnt != DEFAULTBCNT) {
321			iprintk("e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
322				d->aoemajor, d->aoeminor, d->ifp->name);
323			d->maxbcnt = DEFAULTBCNT;
324			d->flags |= DEVFL_MAXBCNT;
325		}
326	}
327
328	skb->dev = d->ifp;
329	skb = skb_clone(skb, GFP_ATOMIC);
330	if (skb == NULL)
331		return;
332	if (d->sendq_hd)
333		d->sendq_tl->next = skb;
334	else
335		d->sendq_hd = skb;
336	d->sendq_tl = skb;
337}
338
339static int
340tsince(int tag)
341{
342	int n;
343
344	n = jiffies & 0xffff;
345	n -= tag & 0xffff;
346	if (n < 0)
347		n += 1<<16;
348	return n;
349}
350
351static void
352rexmit_timer(ulong vp)
353{
354	struct aoedev *d;
355	struct frame *f, *e;
356	struct sk_buff *sl;
357	register long timeout;
358	ulong flags, n;
359
360	d = (struct aoedev *) vp;
361	sl = NULL;
362
363	/* timeout is always ~150% of the moving average */
364	timeout = d->rttavg;
365	timeout += timeout >> 1;
366
367	spin_lock_irqsave(&d->lock, flags);
368
369	if (d->flags & DEVFL_TKILL) {
370		spin_unlock_irqrestore(&d->lock, flags);
371		return;
372	}
373	f = d->frames;
374	e = f + d->nframes;
375	for (; f<e; f++) {
376		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
377			n = f->waited += timeout;
378			n /= HZ;
379			if (n > aoe_deadsecs) { /* waited too long for response */
380				aoedev_downdev(d);
381				break;
382			}
383			rexmit(d, f);
384		}
385	}
386	if (d->flags & DEVFL_KICKME) {
387		d->flags &= ~DEVFL_KICKME;
388		aoecmd_work(d);
389	}
390
391	sl = d->sendq_hd;
392	d->sendq_hd = d->sendq_tl = NULL;
393	if (sl) {
394		n = d->rttavg <<= 1;
395		if (n > MAXTIMER)
396			d->rttavg = MAXTIMER;
397	}
398
399	d->timer.expires = jiffies + TIMERTICK;
400	add_timer(&d->timer);
401
402	spin_unlock_irqrestore(&d->lock, flags);
403
404	aoenet_xmit(sl);
405}
406
407/* this function performs work that has been deferred until sleeping is OK
408 */
409void
410aoecmd_sleepwork(void *vp)
411{
412	struct aoedev *d = (struct aoedev *) vp;
413
414	if (d->flags & DEVFL_GDALLOC)
415		aoeblk_gdalloc(d);
416
417	if (d->flags & DEVFL_NEWSIZE) {
418		struct block_device *bd;
419		unsigned long flags;
420		u64 ssize;
421
422		ssize = d->gd->capacity;
423		bd = bdget_disk(d->gd, 0);
424
425		if (bd) {
426			mutex_lock(&bd->bd_inode->i_mutex);
427			i_size_write(bd->bd_inode, (loff_t)ssize<<9);
428			mutex_unlock(&bd->bd_inode->i_mutex);
429			bdput(bd);
430		}
431		spin_lock_irqsave(&d->lock, flags);
432		d->flags |= DEVFL_UP;
433		d->flags &= ~DEVFL_NEWSIZE;
434		spin_unlock_irqrestore(&d->lock, flags);
435	}
436}
437
438static void
439ataid_complete(struct aoedev *d, unsigned char *id)
440{
441	u64 ssize;
442	u16 n;
443
444	/* word 83: command set supported */
445	n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
446
447	/* word 86: command set/feature enabled */
448	n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
449
450	if (n & (1<<10)) {	/* bit 10: LBA 48 */
451		d->flags |= DEVFL_EXT;
452
453		/* word 100: number lba48 sectors */
454		ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
455
456		/* set as in ide-disk.c:init_idedisk_capacity */
457		d->geo.cylinders = ssize;
458		d->geo.cylinders /= (255 * 63);
459		d->geo.heads = 255;
460		d->geo.sectors = 63;
461	} else {
462		d->flags &= ~DEVFL_EXT;
463
464		/* number lba28 sectors */
465		ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
466
467		/* NOTE: obsolete in ATA 6 */
468		d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
469		d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
470		d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
471	}
472
473	if (d->ssize != ssize)
474		iprintk("%012llx e%lu.%lu v%04x has %llu sectors\n",
475			(unsigned long long)mac_addr(d->addr),
476			d->aoemajor, d->aoeminor,
477			d->fw_ver, (long long)ssize);
478	d->ssize = ssize;
479	d->geo.start = 0;
480	if (d->gd != NULL) {
481		d->gd->capacity = ssize;
482		d->flags |= DEVFL_NEWSIZE;
483	} else {
484		if (d->flags & DEVFL_GDALLOC) {
485			eprintk("can't schedule work for e%lu.%lu, %s\n",
486			       d->aoemajor, d->aoeminor,
487			       "it's already on!  This shouldn't happen.\n");
488			return;
489		}
490		d->flags |= DEVFL_GDALLOC;
491	}
492	schedule_work(&d->work);
493}
494
495static void
496calc_rttavg(struct aoedev *d, int rtt)
497{
498	register long n;
499
500	n = rtt;
501	if (n < 0) {
502		n = -rtt;
503		if (n < MINTIMER)
504			n = MINTIMER;
505		else if (n > MAXTIMER)
506			n = MAXTIMER;
507		d->mintimer += (n - d->mintimer) >> 1;
508	} else if (n < d->mintimer)
509		n = d->mintimer;
510	else if (n > MAXTIMER)
511		n = MAXTIMER;
512
513	/* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
514	n -= d->rttavg;
515	d->rttavg += n >> 2;
516}
517
518void
519aoecmd_ata_rsp(struct sk_buff *skb)
520{
521	struct aoedev *d;
522	struct aoe_hdr *hin, *hout;
523	struct aoe_atahdr *ahin, *ahout;
524	struct frame *f;
525	struct buf *buf;
526	struct sk_buff *sl;
527	register long n;
528	ulong flags;
529	char ebuf[128];
530	u16 aoemajor;
531
532	hin = (struct aoe_hdr *) skb->mac.raw;
533	aoemajor = be16_to_cpu(hin->major);
534	d = aoedev_by_aoeaddr(aoemajor, hin->minor);
535	if (d == NULL) {
536		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
537			"for unknown device %d.%d\n",
538			 aoemajor, hin->minor);
539		aoechr_error(ebuf);
540		return;
541	}
542
543	spin_lock_irqsave(&d->lock, flags);
544
545	n = be32_to_cpu(hin->tag);
546	f = getframe(d, n);
547	if (f == NULL) {
548		calc_rttavg(d, -tsince(n));
549		spin_unlock_irqrestore(&d->lock, flags);
550		snprintf(ebuf, sizeof ebuf,
551			"%15s e%d.%d    tag=%08x@%08lx\n",
552			"unexpected rsp",
553			be16_to_cpu(hin->major),
554			hin->minor,
555			be32_to_cpu(hin->tag),
556			jiffies);
557		aoechr_error(ebuf);
558		return;
559	}
560
561	calc_rttavg(d, tsince(f->tag));
562
563	ahin = (struct aoe_atahdr *) (hin+1);
564	hout = (struct aoe_hdr *) f->skb->mac.raw;
565	ahout = (struct aoe_atahdr *) (hout+1);
566	buf = f->buf;
567
568	if (ahout->cmdstat == WIN_IDENTIFY)
569		d->flags &= ~DEVFL_PAUSE;
570	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
571		eprintk("ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
572			ahout->cmdstat, ahin->cmdstat,
573			d->aoemajor, d->aoeminor);
574		if (buf)
575			buf->flags |= BUFFL_FAIL;
576	} else {
577		n = ahout->scnt << 9;
578		switch (ahout->cmdstat) {
579		case WIN_READ:
580		case WIN_READ_EXT:
581			if (skb->len - sizeof *hin - sizeof *ahin < n) {
582				eprintk("runt data size in read.  skb->len=%d\n",
583					skb->len);
584				/* fail frame f?  just returning will rexmit. */
585				spin_unlock_irqrestore(&d->lock, flags);
586				return;
587			}
588			memcpy(f->bufaddr, ahin+1, n);
589		case WIN_WRITE:
590		case WIN_WRITE_EXT:
591			if (f->bcnt -= n) {
592				skb = f->skb;
593				f->bufaddr += n;
594				put_lba(ahout, f->lba += ahout->scnt);
595				n = f->bcnt;
596				if (n > DEFAULTBCNT)
597					n = DEFAULTBCNT;
598				ahout->scnt = n >> 9;
599				if (ahout->aflags & AOEAFL_WRITE) {
600					skb_fill_page_desc(skb, 0,
601						virt_to_page(f->bufaddr),
602						offset_in_page(f->bufaddr), n);
603					skb->len = sizeof *hout + sizeof *ahout + n;
604					skb->data_len = n;
605				}
606				f->tag = newtag(d);
607				hout->tag = cpu_to_be32(f->tag);
608				skb->dev = d->ifp;
609				skb = skb_clone(skb, GFP_ATOMIC);
610				spin_unlock_irqrestore(&d->lock, flags);
611				if (skb)
612					aoenet_xmit(skb);
613				return;
614			}
615			if (n > DEFAULTBCNT)
616				d->lostjumbo = 0;
617			break;
618		case WIN_IDENTIFY:
619			if (skb->len - sizeof *hin - sizeof *ahin < 512) {
620				iprintk("runt data size in ataid.  skb->len=%d\n",
621					skb->len);
622				spin_unlock_irqrestore(&d->lock, flags);
623				return;
624			}
625			ataid_complete(d, (char *) (ahin+1));
626			break;
627		default:
628			iprintk("unrecognized ata command %2.2Xh for %d.%d\n",
629				ahout->cmdstat,
630				be16_to_cpu(hin->major),
631				hin->minor);
632		}
633	}
634
635	if (buf) {
636		buf->nframesout -= 1;
637		if (buf->nframesout == 0 && buf->resid == 0) {
638			unsigned long duration = jiffies - buf->start_time;
639			unsigned long n_sect = buf->bio->bi_size >> 9;
640			struct gendisk *disk = d->gd;
641			const int rw = bio_data_dir(buf->bio);
642
643			disk_stat_inc(disk, ios[rw]);
644			disk_stat_add(disk, ticks[rw], duration);
645			disk_stat_add(disk, sectors[rw], n_sect);
646			disk_stat_add(disk, io_ticks, duration);
647			n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
648			bio_endio(buf->bio, buf->bio->bi_size, n);
649			mempool_free(buf, d->bufpool);
650		}
651	}
652
653	f->buf = NULL;
654	f->tag = FREETAG;
655
656	aoecmd_work(d);
657	sl = d->sendq_hd;
658	d->sendq_hd = d->sendq_tl = NULL;
659
660	spin_unlock_irqrestore(&d->lock, flags);
661	aoenet_xmit(sl);
662}
663
664void
665aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
666{
667	struct sk_buff *sl;
668
669	sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
670
671	aoenet_xmit(sl);
672}
673
674/*
675 * Since we only call this in one place (and it only prepares one frame)
676 * we just return the skb.  Usually we'd chain it up to the aoedev sendq.
677 */
678static struct sk_buff *
679aoecmd_ata_id(struct aoedev *d)
680{
681	struct aoe_hdr *h;
682	struct aoe_atahdr *ah;
683	struct frame *f;
684	struct sk_buff *skb;
685
686	f = freeframe(d);
687	if (f == NULL) {
688		eprintk("can't get a frame. This shouldn't happen.\n");
689		return NULL;
690	}
691
692	/* initialize the headers & frame */
693	skb = f->skb;
694	h = (struct aoe_hdr *) skb->mac.raw;
695	ah = (struct aoe_atahdr *) (h+1);
696	skb->len = ETH_ZLEN;
697	memset(h, 0, ETH_ZLEN);
698	f->tag = aoehdr_atainit(d, h);
699	f->waited = 0;
700
701	/* set up ata header */
702	ah->scnt = 1;
703	ah->cmdstat = WIN_IDENTIFY;
704	ah->lba3 = 0xa0;
705
706	skb->dev = d->ifp;
707
708	d->rttavg = MAXTIMER;
709	d->timer.function = rexmit_timer;
710
711	return skb_clone(skb, GFP_ATOMIC);
712}
713
714void
715aoecmd_cfg_rsp(struct sk_buff *skb)
716{
717	struct aoedev *d;
718	struct aoe_hdr *h;
719	struct aoe_cfghdr *ch;
720	ulong flags, sysminor, aoemajor;
721	struct sk_buff *sl;
722	enum { MAXFRAMES = 16 };
723	u16 n;
724
725	h = (struct aoe_hdr *) skb->mac.raw;
726	ch = (struct aoe_cfghdr *) (h+1);
727
728	/*
729	 * Enough people have their dip switches set backwards to
730	 * warrant a loud message for this special case.
731	 */
732	aoemajor = be16_to_cpu(h->major);
733	if (aoemajor == 0xfff) {
734		eprintk("Warning: shelf address is all ones.  "
735			"Check shelf dip switches.\n");
736		return;
737	}
738
739	sysminor = SYSMINOR(aoemajor, h->minor);
740	if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
741		iprintk("e%ld.%d: minor number too large\n",
742			aoemajor, (int) h->minor);
743		return;
744	}
745
746	n = be16_to_cpu(ch->bufcnt);
747	if (n > MAXFRAMES)	/* keep it reasonable */
748		n = MAXFRAMES;
749
750	d = aoedev_by_sysminor_m(sysminor, n);
751	if (d == NULL) {
752		iprintk("device sysminor_m failure\n");
753		return;
754	}
755
756	spin_lock_irqsave(&d->lock, flags);
757
758	/* permit device to migrate mac and network interface */
759	d->ifp = skb->dev;
760	memcpy(d->addr, h->src, sizeof d->addr);
761	if (!(d->flags & DEVFL_MAXBCNT)) {
762		n = d->ifp->mtu;
763		n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
764		n /= 512;
765		if (n > ch->scnt)
766			n = ch->scnt;
767		n = n ? n * 512 : DEFAULTBCNT;
768		if (n != d->maxbcnt) {
769			iprintk("e%ld.%ld: setting %d byte data frames on %s\n",
770				d->aoemajor, d->aoeminor, n, d->ifp->name);
771			d->maxbcnt = n;
772		}
773	}
774
775	/* don't change users' perspective */
776	if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
777		spin_unlock_irqrestore(&d->lock, flags);
778		return;
779	}
780	d->flags |= DEVFL_PAUSE;	/* force pause */
781	d->mintimer = MINTIMER;
782	d->fw_ver = be16_to_cpu(ch->fwver);
783
784	/* check for already outstanding ataid */
785	sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
786
787	spin_unlock_irqrestore(&d->lock, flags);
788
789	aoenet_xmit(sl);
790}
791
792