1/********************************************************************* 2 * 3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 4 * 5 * Copyright (c) 2001-2003 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 20 * MA 02111-1307 USA 21 * 22 ********************************************************************/ 23 24#include <linux/module.h> 25 26#define DRIVER_NAME "vlsi_ir" 27#define DRIVER_VERSION "v0.5" 28#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 29#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 30 31MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 32MODULE_AUTHOR(DRIVER_AUTHOR); 33MODULE_LICENSE("GPL"); 34 35/********************************************************/ 36 37#include <linux/kernel.h> 38#include <linux/init.h> 39#include <linux/interrupt.h> 40#include <linux/pci.h> 41#include <linux/slab.h> 42#include <linux/netdevice.h> 43#include <linux/skbuff.h> 44#include <linux/delay.h> 45#include <linux/time.h> 46#include <linux/proc_fs.h> 47#include <linux/seq_file.h> 48#include <linux/mutex.h> 49#include <asm/uaccess.h> 50#include <asm/byteorder.h> 51 52#include <net/irda/irda.h> 53#include <net/irda/irda_device.h> 54#include <net/irda/wrapper.h> 55#include <net/irda/crc.h> 56 57#include "vlsi_ir.h" 58 59/********************************************************/ 60 61static /* const */ char drivername[] = DRIVER_NAME; 62 63static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = { 64 { 65 .class = PCI_CLASS_WIRELESS_IRDA << 8, 66 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 67 .vendor = PCI_VENDOR_ID_VLSI, 68 .device = PCI_DEVICE_ID_VLSI_82C147, 69 .subvendor = PCI_ANY_ID, 70 .subdevice = PCI_ANY_ID, 71 }, 72 { /* all zeroes */ } 73}; 74 75MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 76 77/********************************************************/ 78 79/* clksrc: which clock source to be used 80 * 0: auto - try PLL, fallback to 40MHz XCLK 81 * 1: on-chip 48MHz PLL 82 * 2: external 48MHz XCLK 83 * 3: external 40MHz XCLK (HP OB-800) 84 */ 85 86static int clksrc = 0; /* default is 0(auto) */ 87module_param(clksrc, int, 0); 88MODULE_PARM_DESC(clksrc, "clock input source selection"); 89 90/* ringsize: size of the tx and rx descriptor rings 91 * independent for tx and rx 92 * specify as ringsize=tx[,rx] 93 * allowed values: 4, 8, 16, 32, 64 94 * Due to the IrDA 1.x max. allowed window size=7, 95 * there should be no gain when using rings larger than 8 96 */ 97 98static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 99module_param_array(ringsize, int, NULL, 0); 100MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 101 102/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 103 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 104 * 1: nominal 3/16 bittime width 105 * note: IrDA compliant peer devices should be happy regardless 106 * which one is used. Primary goal is to save some power 107 * on the sender's side - at 9.6kbaud for example the short 108 * pulse width saves more than 90% of the transmitted IR power. 109 */ 110 111static int sirpulse = 1; /* default is 3/16 bittime */ 112module_param(sirpulse, int, 0); 113MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 114 115/* qos_mtt_bits: encoded min-turn-time value we require the peer device 116 * to use before transmitting to us. "Type 1" (per-station) 117 * bitfield according to IrLAP definition (section 6.6.8) 118 * Don't know which transceiver is used by my OB800 - the 119 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 120 */ 121 122static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 123module_param(qos_mtt_bits, int, 0); 124MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 125 126/********************************************************/ 127 128static void vlsi_reg_debug(unsigned iobase, const char *s) 129{ 130 int i; 131 132 printk(KERN_DEBUG "%s: ", s); 133 for (i = 0; i < 0x20; i++) 134 printk("%02x", (unsigned)inb((iobase+i))); 135 printk("\n"); 136} 137 138static void vlsi_ring_debug(struct vlsi_ring *r) 139{ 140 struct ring_descr *rd; 141 unsigned i; 142 143 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 144 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 145 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 146 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 147 for (i = 0; i < r->size; i++) { 148 rd = &r->rd[i]; 149 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 150 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 151 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 152 __func__, (unsigned) rd_get_status(rd), 153 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 154 } 155} 156 157/********************************************************/ 158 159/* needed regardless of CONFIG_PROC_FS */ 160static struct proc_dir_entry *vlsi_proc_root = NULL; 161 162#ifdef CONFIG_PROC_FS 163 164static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 165{ 166 unsigned iobase = pci_resource_start(pdev, 0); 167 unsigned i; 168 169 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 170 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 171 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 172 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 173 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 174 seq_printf(seq, "hw registers: "); 175 for (i = 0; i < 0x20; i++) 176 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 177 seq_printf(seq, "\n"); 178} 179 180static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 181{ 182 vlsi_irda_dev_t *idev = netdev_priv(ndev); 183 u8 byte; 184 u16 word; 185 unsigned delta1, delta2; 186 struct timeval now; 187 unsigned iobase = ndev->base_addr; 188 189 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 190 netif_device_present(ndev) ? "attached" : "detached", 191 netif_running(ndev) ? "running" : "not running", 192 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 193 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 194 195 if (!netif_running(ndev)) 196 return; 197 198 seq_printf(seq, "\nhw-state:\n"); 199 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 200 seq_printf(seq, "IRMISC:%s%s%s uart%s", 201 (byte&IRMISC_IRRAIL) ? " irrail" : "", 202 (byte&IRMISC_IRPD) ? " irpd" : "", 203 (byte&IRMISC_UARTTST) ? " uarttest" : "", 204 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 205 if (byte&IRMISC_UARTEN) { 206 seq_printf(seq, "0x%s\n", 207 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 208 : ((byte&1) ? "3f8" : "2f8")); 209 } 210 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 211 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 212 (byte&CLKCTL_PD_INV) ? "powered" : "down", 213 (byte&CLKCTL_LOCK) ? " locked" : "", 214 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 215 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 216 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 217 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 218 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 219 220 byte = inb(iobase+VLSI_PIO_IRINTR); 221 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 222 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 223 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 224 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 225 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 226 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 227 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 228 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 229 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 230 word = inw(iobase+VLSI_PIO_RINGPTR); 231 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 232 word = inw(iobase+VLSI_PIO_RINGBASE); 233 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 234 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 235 word = inw(iobase+VLSI_PIO_RINGSIZE); 236 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 237 RINGSIZE_TO_TXSIZE(word)); 238 239 word = inw(iobase+VLSI_PIO_IRCFG); 240 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 241 (word&IRCFG_LOOP) ? " LOOP" : "", 242 (word&IRCFG_ENTX) ? " ENTX" : "", 243 (word&IRCFG_ENRX) ? " ENRX" : "", 244 (word&IRCFG_MSTR) ? " MSTR" : "", 245 (word&IRCFG_RXANY) ? " RXANY" : "", 246 (word&IRCFG_CRC16) ? " CRC16" : "", 247 (word&IRCFG_FIR) ? " FIR" : "", 248 (word&IRCFG_MIR) ? " MIR" : "", 249 (word&IRCFG_SIR) ? " SIR" : "", 250 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 251 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 252 (word&IRCFG_TXPOL) ? " TXPOL" : "", 253 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 254 word = inw(iobase+VLSI_PIO_IRENABLE); 255 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 256 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 257 (word&IRENABLE_CFGER) ? " CFGERR" : "", 258 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 259 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 260 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 261 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 262 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 263 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 264 word = inw(iobase+VLSI_PIO_PHYCTL); 265 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 266 (unsigned)PHYCTL_TO_BAUD(word), 267 (unsigned)PHYCTL_TO_PLSWID(word), 268 (unsigned)PHYCTL_TO_PREAMB(word)); 269 word = inw(iobase+VLSI_PIO_NPHYCTL); 270 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 271 (unsigned)PHYCTL_TO_BAUD(word), 272 (unsigned)PHYCTL_TO_PLSWID(word), 273 (unsigned)PHYCTL_TO_PREAMB(word)); 274 word = inw(iobase+VLSI_PIO_MAXPKT); 275 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 276 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 277 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 278 279 seq_printf(seq, "\nsw-state:\n"); 280 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 281 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 282 do_gettimeofday(&now); 283 if (now.tv_usec >= idev->last_rx.tv_usec) { 284 delta2 = now.tv_usec - idev->last_rx.tv_usec; 285 delta1 = 0; 286 } 287 else { 288 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec; 289 delta1 = 1; 290 } 291 seq_printf(seq, "last rx: %lu.%06u sec\n", 292 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 293 294 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 295 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 296 ndev->stats.rx_dropped); 297 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 298 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 299 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 300 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 301 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 302 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 303 304} 305 306static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 307{ 308 struct ring_descr *rd; 309 unsigned i, j; 310 int h, t; 311 312 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 313 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 314 h = atomic_read(&r->head) & r->mask; 315 t = atomic_read(&r->tail) & r->mask; 316 seq_printf(seq, "head = %d / tail = %d ", h, t); 317 if (h == t) 318 seq_printf(seq, "(empty)\n"); 319 else { 320 if (((t+1)&r->mask) == h) 321 seq_printf(seq, "(full)\n"); 322 else 323 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 324 rd = &r->rd[h]; 325 j = (unsigned) rd_get_count(rd); 326 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 327 h, (unsigned)rd_get_status(rd), j); 328 if (j > 0) { 329 seq_printf(seq, " data:"); 330 if (j > 20) 331 j = 20; 332 for (i = 0; i < j; i++) 333 seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]); 334 seq_printf(seq, "\n"); 335 } 336 } 337 for (i = 0; i < r->size; i++) { 338 rd = &r->rd[i]; 339 seq_printf(seq, "> ring descr %u: ", i); 340 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 341 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 342 (unsigned) rd_get_status(rd), 343 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 344 } 345} 346 347static int vlsi_seq_show(struct seq_file *seq, void *v) 348{ 349 struct net_device *ndev = seq->private; 350 vlsi_irda_dev_t *idev = netdev_priv(ndev); 351 unsigned long flags; 352 353 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 354 seq_printf(seq, "clksrc: %s\n", 355 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 356 : ((clksrc==1)?"48MHz PLL":"autodetect")); 357 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 358 ringsize[0], ringsize[1]); 359 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 360 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 361 362 spin_lock_irqsave(&idev->lock, flags); 363 if (idev->pdev != NULL) { 364 vlsi_proc_pdev(seq, idev->pdev); 365 366 if (idev->pdev->current_state == 0) 367 vlsi_proc_ndev(seq, ndev); 368 else 369 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 370 idev->resume_ok); 371 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 372 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 373 vlsi_proc_ring(seq, idev->rx_ring); 374 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 375 vlsi_proc_ring(seq, idev->tx_ring); 376 } 377 } 378 seq_printf(seq, "\n"); 379 spin_unlock_irqrestore(&idev->lock, flags); 380 381 return 0; 382} 383 384static int vlsi_seq_open(struct inode *inode, struct file *file) 385{ 386 return single_open(file, vlsi_seq_show, PDE(inode)->data); 387} 388 389static const struct file_operations vlsi_proc_fops = { 390 .owner = THIS_MODULE, 391 .open = vlsi_seq_open, 392 .read = seq_read, 393 .llseek = seq_lseek, 394 .release = single_release, 395}; 396 397#define VLSI_PROC_FOPS (&vlsi_proc_fops) 398 399#else /* CONFIG_PROC_FS */ 400#define VLSI_PROC_FOPS NULL 401#endif 402 403/********************************************************/ 404 405static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 406 unsigned size, unsigned len, int dir) 407{ 408 struct vlsi_ring *r; 409 struct ring_descr *rd; 410 unsigned i, j; 411 dma_addr_t busaddr; 412 413 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 414 return NULL; 415 416 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 417 if (!r) 418 return NULL; 419 memset(r, 0, sizeof(*r)); 420 421 r->pdev = pdev; 422 r->dir = dir; 423 r->len = len; 424 r->rd = (struct ring_descr *)(r+1); 425 r->mask = size - 1; 426 r->size = size; 427 atomic_set(&r->head, 0); 428 atomic_set(&r->tail, 0); 429 430 for (i = 0; i < size; i++) { 431 rd = r->rd + i; 432 memset(rd, 0, sizeof(*rd)); 433 rd->hw = hwmap + i; 434 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 435 if (rd->buf == NULL || 436 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 437 if (rd->buf) { 438 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 439 __func__, rd->buf); 440 kfree(rd->buf); 441 rd->buf = NULL; 442 } 443 for (j = 0; j < i; j++) { 444 rd = r->rd + j; 445 busaddr = rd_get_addr(rd); 446 rd_set_addr_status(rd, 0, 0); 447 if (busaddr) 448 pci_unmap_single(pdev, busaddr, len, dir); 449 kfree(rd->buf); 450 rd->buf = NULL; 451 } 452 kfree(r); 453 return NULL; 454 } 455 rd_set_addr_status(rd, busaddr, 0); 456 /* initially, the dma buffer is owned by the CPU */ 457 rd->skb = NULL; 458 } 459 return r; 460} 461 462static int vlsi_free_ring(struct vlsi_ring *r) 463{ 464 struct ring_descr *rd; 465 unsigned i; 466 dma_addr_t busaddr; 467 468 for (i = 0; i < r->size; i++) { 469 rd = r->rd + i; 470 if (rd->skb) 471 dev_kfree_skb_any(rd->skb); 472 busaddr = rd_get_addr(rd); 473 rd_set_addr_status(rd, 0, 0); 474 if (busaddr) 475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 476 kfree(rd->buf); 477 } 478 kfree(r); 479 return 0; 480} 481 482static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 483{ 484 char *ringarea; 485 struct ring_descr_hw *hwmap; 486 487 idev->virtaddr = NULL; 488 idev->busaddr = 0; 489 490 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); 491 if (!ringarea) { 492 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 493 __func__); 494 goto out; 495 } 496 memset(ringarea, 0, HW_RING_AREA_SIZE); 497 498 hwmap = (struct ring_descr_hw *)ringarea; 499 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 500 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 501 if (idev->rx_ring == NULL) 502 goto out_unmap; 503 504 hwmap += MAX_RING_DESCR; 505 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 506 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 507 if (idev->tx_ring == NULL) 508 goto out_free_rx; 509 510 idev->virtaddr = ringarea; 511 return 0; 512 513out_free_rx: 514 vlsi_free_ring(idev->rx_ring); 515out_unmap: 516 idev->rx_ring = idev->tx_ring = NULL; 517 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 518 idev->busaddr = 0; 519out: 520 return -ENOMEM; 521} 522 523static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 524{ 525 vlsi_free_ring(idev->rx_ring); 526 vlsi_free_ring(idev->tx_ring); 527 idev->rx_ring = idev->tx_ring = NULL; 528 529 if (idev->busaddr) 530 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 531 532 idev->virtaddr = NULL; 533 idev->busaddr = 0; 534 535 return 0; 536} 537 538/********************************************************/ 539 540static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 541{ 542 u16 status; 543 int crclen, len = 0; 544 struct sk_buff *skb; 545 int ret = 0; 546 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev); 547 vlsi_irda_dev_t *idev = netdev_priv(ndev); 548 549 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 550 /* dma buffer now owned by the CPU */ 551 status = rd_get_status(rd); 552 if (status & RD_RX_ERROR) { 553 if (status & RD_RX_OVER) 554 ret |= VLSI_RX_OVER; 555 if (status & RD_RX_LENGTH) 556 ret |= VLSI_RX_LENGTH; 557 if (status & RD_RX_PHYERR) 558 ret |= VLSI_RX_FRAME; 559 if (status & RD_RX_CRCERR) 560 ret |= VLSI_RX_CRC; 561 goto done; 562 } 563 564 len = rd_get_count(rd); 565 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 566 len -= crclen; /* remove trailing CRC */ 567 if (len <= 0) { 568 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); 569 ret |= VLSI_RX_DROP; 570 goto done; 571 } 572 573 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 574 575 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 576 * endian-adjustment there just in place will dirty a cache line 577 * which belongs to the map and thus we must be sure it will 578 * get flushed before giving the buffer back to hardware. 579 * vlsi_fill_rx() will do this anyway - but here we rely on. 580 */ 581 le16_to_cpus(rd->buf+len); 582 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 583 IRDA_DEBUG(0, "%s: crc error\n", __func__); 584 ret |= VLSI_RX_CRC; 585 goto done; 586 } 587 } 588 589 if (!rd->skb) { 590 IRDA_WARNING("%s: rx packet lost\n", __func__); 591 ret |= VLSI_RX_DROP; 592 goto done; 593 } 594 595 skb = rd->skb; 596 rd->skb = NULL; 597 skb->dev = ndev; 598 memcpy(skb_put(skb,len), rd->buf, len); 599 skb_reset_mac_header(skb); 600 if (in_interrupt()) 601 netif_rx(skb); 602 else 603 netif_rx_ni(skb); 604 605done: 606 rd_set_status(rd, 0); 607 rd_set_count(rd, 0); 608 /* buffer still owned by CPU */ 609 610 return (ret) ? -ret : len; 611} 612 613static void vlsi_fill_rx(struct vlsi_ring *r) 614{ 615 struct ring_descr *rd; 616 617 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 618 if (rd_is_active(rd)) { 619 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 620 __func__); 621 vlsi_ring_debug(r); 622 break; 623 } 624 if (!rd->skb) { 625 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 626 if (rd->skb) { 627 skb_reserve(rd->skb,1); 628 rd->skb->protocol = htons(ETH_P_IRDA); 629 } 630 else 631 break; /* probably not worth logging? */ 632 } 633 /* give dma buffer back to busmaster */ 634 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 635 rd_activate(rd); 636 } 637} 638 639static void vlsi_rx_interrupt(struct net_device *ndev) 640{ 641 vlsi_irda_dev_t *idev = netdev_priv(ndev); 642 struct vlsi_ring *r = idev->rx_ring; 643 struct ring_descr *rd; 644 int ret; 645 646 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 647 648 if (rd_is_active(rd)) 649 break; 650 651 ret = vlsi_process_rx(r, rd); 652 653 if (ret < 0) { 654 ret = -ret; 655 ndev->stats.rx_errors++; 656 if (ret & VLSI_RX_DROP) 657 ndev->stats.rx_dropped++; 658 if (ret & VLSI_RX_OVER) 659 ndev->stats.rx_over_errors++; 660 if (ret & VLSI_RX_LENGTH) 661 ndev->stats.rx_length_errors++; 662 if (ret & VLSI_RX_FRAME) 663 ndev->stats.rx_frame_errors++; 664 if (ret & VLSI_RX_CRC) 665 ndev->stats.rx_crc_errors++; 666 } 667 else if (ret > 0) { 668 ndev->stats.rx_packets++; 669 ndev->stats.rx_bytes += ret; 670 } 671 } 672 673 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */ 674 675 vlsi_fill_rx(r); 676 677 if (ring_first(r) == NULL) { 678 /* we are in big trouble, if this should ever happen */ 679 IRDA_ERROR("%s: rx ring exhausted!\n", __func__); 680 vlsi_ring_debug(r); 681 } 682 else 683 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 684} 685 686/* caller must have stopped the controller from busmastering */ 687 688static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 689{ 690 struct net_device *ndev = pci_get_drvdata(idev->pdev); 691 struct vlsi_ring *r = idev->rx_ring; 692 struct ring_descr *rd; 693 int ret; 694 695 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 696 697 ret = 0; 698 if (rd_is_active(rd)) { 699 rd_set_status(rd, 0); 700 if (rd_get_count(rd)) { 701 IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); 702 ret = -VLSI_RX_DROP; 703 } 704 rd_set_count(rd, 0); 705 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 706 if (rd->skb) { 707 dev_kfree_skb_any(rd->skb); 708 rd->skb = NULL; 709 } 710 } 711 else 712 ret = vlsi_process_rx(r, rd); 713 714 if (ret < 0) { 715 ret = -ret; 716 ndev->stats.rx_errors++; 717 if (ret & VLSI_RX_DROP) 718 ndev->stats.rx_dropped++; 719 if (ret & VLSI_RX_OVER) 720 ndev->stats.rx_over_errors++; 721 if (ret & VLSI_RX_LENGTH) 722 ndev->stats.rx_length_errors++; 723 if (ret & VLSI_RX_FRAME) 724 ndev->stats.rx_frame_errors++; 725 if (ret & VLSI_RX_CRC) 726 ndev->stats.rx_crc_errors++; 727 } 728 else if (ret > 0) { 729 ndev->stats.rx_packets++; 730 ndev->stats.rx_bytes += ret; 731 } 732 } 733} 734 735/********************************************************/ 736 737static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 738{ 739 u16 status; 740 int len; 741 int ret; 742 743 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 744 /* dma buffer now owned by the CPU */ 745 status = rd_get_status(rd); 746 if (status & RD_TX_UNDRN) 747 ret = VLSI_TX_FIFO; 748 else 749 ret = 0; 750 rd_set_status(rd, 0); 751 752 if (rd->skb) { 753 len = rd->skb->len; 754 dev_kfree_skb_any(rd->skb); 755 rd->skb = NULL; 756 } 757 else /* tx-skb already freed? - should never happen */ 758 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 759 760 rd_set_count(rd, 0); 761 /* dma buffer still owned by the CPU */ 762 763 return (ret) ? -ret : len; 764} 765 766static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 767{ 768 u16 nphyctl; 769 u16 config; 770 unsigned mode; 771 int ret; 772 int baudrate; 773 int fifocnt; 774 775 baudrate = idev->new_baud; 776 IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 777 if (baudrate == 4000000) { 778 mode = IFF_FIR; 779 config = IRCFG_FIR; 780 nphyctl = PHYCTL_FIR; 781 } 782 else if (baudrate == 1152000) { 783 mode = IFF_MIR; 784 config = IRCFG_MIR | IRCFG_CRC16; 785 nphyctl = PHYCTL_MIR(clksrc==3); 786 } 787 else { 788 mode = IFF_SIR; 789 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 790 switch(baudrate) { 791 default: 792 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 793 __func__, baudrate); 794 baudrate = 9600; 795 /* fallthru */ 796 case 2400: 797 case 9600: 798 case 19200: 799 case 38400: 800 case 57600: 801 case 115200: 802 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 803 break; 804 } 805 } 806 config |= IRCFG_MSTR | IRCFG_ENRX; 807 808 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 809 if (fifocnt != 0) { 810 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 811 } 812 813 outw(0, iobase+VLSI_PIO_IRENABLE); 814 outw(config, iobase+VLSI_PIO_IRCFG); 815 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 816 wmb(); 817 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 818 mb(); 819 820 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 821 822 /* read back settings for validation */ 823 824 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 825 826 if (mode == IFF_FIR) 827 config ^= IRENABLE_FIR_ON; 828 else if (mode == IFF_MIR) 829 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 830 else 831 config ^= IRENABLE_SIR_ON; 832 833 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 834 IRDA_WARNING("%s: failed to set %s mode!\n", __func__, 835 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 836 ret = -1; 837 } 838 else { 839 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 840 IRDA_WARNING("%s: failed to apply baudrate %d\n", 841 __func__, baudrate); 842 ret = -1; 843 } 844 else { 845 idev->mode = mode; 846 idev->baud = baudrate; 847 idev->new_baud = 0; 848 ret = 0; 849 } 850 } 851 852 if (ret) 853 vlsi_reg_debug(iobase,__func__); 854 855 return ret; 856} 857 858static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 859 struct net_device *ndev) 860{ 861 vlsi_irda_dev_t *idev = netdev_priv(ndev); 862 struct vlsi_ring *r = idev->tx_ring; 863 struct ring_descr *rd; 864 unsigned long flags; 865 unsigned iobase = ndev->base_addr; 866 u8 status; 867 u16 config; 868 int mtt; 869 int len, speed; 870 struct timeval now, ready; 871 char *msg = NULL; 872 873 speed = irda_get_next_speed(skb); 874 spin_lock_irqsave(&idev->lock, flags); 875 if (speed != -1 && speed != idev->baud) { 876 netif_stop_queue(ndev); 877 idev->new_baud = speed; 878 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 879 } 880 else 881 status = 0; 882 883 if (skb->len == 0) { 884 /* handle zero packets - should be speed change */ 885 if (status == 0) { 886 msg = "bogus zero-length packet"; 887 goto drop_unlock; 888 } 889 890 /* due to the completely asynch tx operation we might have 891 * IrLAP racing with the hardware here, f.e. if the controller 892 * is just sending the last packet with current speed while 893 * the LAP is already switching the speed using synchronous 894 * len=0 packet. Immediate execution would lead to hw lockup 895 * requiring a powercycle to reset. Good candidate to trigger 896 * this is the final UA:RSP packet after receiving a DISC:CMD 897 * when getting the LAP down. 898 * Note that we are not protected by the queue_stop approach 899 * because the final UA:RSP arrives _without_ request to apply 900 * new-speed-after-this-packet - hence the driver doesn't know 901 * this was the last packet and doesn't stop the queue. So the 902 * forced switch to default speed from LAP gets through as fast 903 * as only some 10 usec later while the UA:RSP is still processed 904 * by the hardware and we would get screwed. 905 */ 906 907 if (ring_first(idev->tx_ring) == NULL) { 908 /* no race - tx-ring already empty */ 909 vlsi_set_baud(idev, iobase); 910 netif_wake_queue(ndev); 911 } 912 else 913 ; 914 /* keep the speed change pending like it would 915 * for any len>0 packet. tx completion interrupt 916 * will apply it when the tx ring becomes empty. 917 */ 918 spin_unlock_irqrestore(&idev->lock, flags); 919 dev_kfree_skb_any(skb); 920 return NETDEV_TX_OK; 921 } 922 923 /* sanity checks - simply drop the packet */ 924 925 rd = ring_last(r); 926 if (!rd) { 927 msg = "ring full, but queue wasn't stopped"; 928 goto drop_unlock; 929 } 930 931 if (rd_is_active(rd)) { 932 msg = "entry still owned by hw"; 933 goto drop_unlock; 934 } 935 936 if (!rd->buf) { 937 msg = "tx ring entry without pci buffer"; 938 goto drop_unlock; 939 } 940 941 if (rd->skb) { 942 msg = "ring entry with old skb still attached"; 943 goto drop_unlock; 944 } 945 946 /* no need for serialization or interrupt disable during mtt */ 947 spin_unlock_irqrestore(&idev->lock, flags); 948 949 if ((mtt = irda_get_mtt(skb)) > 0) { 950 951 ready.tv_usec = idev->last_rx.tv_usec + mtt; 952 ready.tv_sec = idev->last_rx.tv_sec; 953 if (ready.tv_usec >= 1000000) { 954 ready.tv_usec -= 1000000; 955 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */ 956 } 957 for(;;) { 958 do_gettimeofday(&now); 959 if (now.tv_sec > ready.tv_sec || 960 (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 961 break; 962 udelay(100); 963 /* must not sleep here - called under netif_tx_lock! */ 964 } 965 } 966 967 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 968 * after subsequent tx-completion 969 */ 970 971 if (idev->mode == IFF_SIR) { 972 status |= RD_TX_DISCRC; /* no hw-crc creation */ 973 len = async_wrap_skb(skb, rd->buf, r->len); 974 975 /* Some rare worst case situation in SIR mode might lead to 976 * potential buffer overflow. The wrapper detects this, returns 977 * with a shortened frame (without FCS/EOF) but doesn't provide 978 * any error indication about the invalid packet which we are 979 * going to transmit. 980 * Therefore we log if the buffer got filled to the point, where the 981 * wrapper would abort, i.e. when there are less than 5 bytes left to 982 * allow appending the FCS/EOF. 983 */ 984 985 if (len >= r->len-5) 986 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 987 __func__); 988 } 989 else { 990 /* hw deals with MIR/FIR mode wrapping */ 991 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 992 len = skb->len; 993 if (len > r->len) { 994 msg = "frame exceeds tx buffer length"; 995 goto drop; 996 } 997 else 998 skb_copy_from_linear_data(skb, rd->buf, len); 999 } 1000 1001 rd->skb = skb; /* remember skb for tx-complete stats */ 1002 1003 rd_set_count(rd, len); 1004 rd_set_status(rd, status); /* not yet active! */ 1005 1006 /* give dma buffer back to busmaster-hw (flush caches to make 1007 * CPU-driven changes visible from the pci bus). 1008 */ 1009 1010 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 1011 1012/* Switching to TX mode here races with the controller 1013 * which may stop TX at any time when fetching an inactive descriptor 1014 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 1015 * _after_ the new descriptor was activated on the ring. This ensures 1016 * we will either find TX already stopped or we can be sure, there 1017 * will be a TX-complete interrupt even if the chip stopped doing 1018 * TX just after we found it still running. The ISR will then find 1019 * the non-empty ring and restart TX processing. The enclosing 1020 * spinlock provides the correct serialization to prevent race with isr. 1021 */ 1022 1023 spin_lock_irqsave(&idev->lock,flags); 1024 1025 rd_activate(rd); 1026 1027 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1028 int fifocnt; 1029 1030 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1031 if (fifocnt != 0) { 1032 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 1033 } 1034 1035 config = inw(iobase+VLSI_PIO_IRCFG); 1036 mb(); 1037 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1038 wmb(); 1039 outw(0, iobase+VLSI_PIO_PROMPT); 1040 } 1041 1042 if (ring_put(r) == NULL) { 1043 netif_stop_queue(ndev); 1044 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); 1045 } 1046 spin_unlock_irqrestore(&idev->lock, flags); 1047 1048 return NETDEV_TX_OK; 1049 1050drop_unlock: 1051 spin_unlock_irqrestore(&idev->lock, flags); 1052drop: 1053 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); 1054 dev_kfree_skb_any(skb); 1055 ndev->stats.tx_errors++; 1056 ndev->stats.tx_dropped++; 1057 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1058 * In fact any retval!=0 causes the packet scheduler to requeue the 1059 * packet for later retry of transmission - which isn't exactly 1060 * what we want after we've just called dev_kfree_skb_any ;-) 1061 */ 1062 return NETDEV_TX_OK; 1063} 1064 1065static void vlsi_tx_interrupt(struct net_device *ndev) 1066{ 1067 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1068 struct vlsi_ring *r = idev->tx_ring; 1069 struct ring_descr *rd; 1070 unsigned iobase; 1071 int ret; 1072 u16 config; 1073 1074 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1075 1076 if (rd_is_active(rd)) 1077 break; 1078 1079 ret = vlsi_process_tx(r, rd); 1080 1081 if (ret < 0) { 1082 ret = -ret; 1083 ndev->stats.tx_errors++; 1084 if (ret & VLSI_TX_DROP) 1085 ndev->stats.tx_dropped++; 1086 if (ret & VLSI_TX_FIFO) 1087 ndev->stats.tx_fifo_errors++; 1088 } 1089 else if (ret > 0){ 1090 ndev->stats.tx_packets++; 1091 ndev->stats.tx_bytes += ret; 1092 } 1093 } 1094 1095 iobase = ndev->base_addr; 1096 1097 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1098 vlsi_set_baud(idev, iobase); 1099 1100 config = inw(iobase+VLSI_PIO_IRCFG); 1101 if (rd == NULL) /* tx ring empty: re-enable rx */ 1102 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1103 1104 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1105 int fifocnt; 1106 1107 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1108 if (fifocnt != 0) { 1109 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1110 __func__, fifocnt); 1111 } 1112 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1113 } 1114 1115 outw(0, iobase+VLSI_PIO_PROMPT); 1116 1117 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1118 netif_wake_queue(ndev); 1119 IRDA_DEBUG(3, "%s: queue awoken\n", __func__); 1120 } 1121} 1122 1123/* caller must have stopped the controller from busmastering */ 1124 1125static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1126{ 1127 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1128 struct vlsi_ring *r = idev->tx_ring; 1129 struct ring_descr *rd; 1130 int ret; 1131 1132 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1133 1134 ret = 0; 1135 if (rd_is_active(rd)) { 1136 rd_set_status(rd, 0); 1137 rd_set_count(rd, 0); 1138 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1139 if (rd->skb) { 1140 dev_kfree_skb_any(rd->skb); 1141 rd->skb = NULL; 1142 } 1143 IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); 1144 ret = -VLSI_TX_DROP; 1145 } 1146 else 1147 ret = vlsi_process_tx(r, rd); 1148 1149 if (ret < 0) { 1150 ret = -ret; 1151 ndev->stats.tx_errors++; 1152 if (ret & VLSI_TX_DROP) 1153 ndev->stats.tx_dropped++; 1154 if (ret & VLSI_TX_FIFO) 1155 ndev->stats.tx_fifo_errors++; 1156 } 1157 else if (ret > 0){ 1158 ndev->stats.tx_packets++; 1159 ndev->stats.tx_bytes += ret; 1160 } 1161 } 1162 1163} 1164 1165/********************************************************/ 1166 1167static int vlsi_start_clock(struct pci_dev *pdev) 1168{ 1169 u8 clkctl, lock; 1170 int i, count; 1171 1172 if (clksrc < 2) { /* auto or PLL: try PLL */ 1173 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1174 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1175 1176 /* procedure to detect PLL lock synchronisation: 1177 * after 0.5 msec initial delay we expect to find 3 PLL lock 1178 * indications within 10 msec for successful PLL detection. 1179 */ 1180 udelay(500); 1181 count = 0; 1182 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1183 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1184 if (lock&CLKCTL_LOCK) { 1185 if (++count >= 3) 1186 break; 1187 } 1188 udelay(50); 1189 } 1190 if (count < 3) { 1191 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1192 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1193 __func__); 1194 clkctl = CLKCTL_CLKSTP; 1195 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1196 return -1; 1197 } 1198 else /* was: clksrc=0(auto) */ 1199 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1200 1201 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1202 __func__, clksrc); 1203 } 1204 else 1205 clksrc = 1; /* got successful PLL lock */ 1206 } 1207 1208 if (clksrc != 1) { 1209 /* we get here if either no PLL detected in auto-mode or 1210 an external clock source was explicitly specified */ 1211 1212 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1213 if (clksrc == 3) 1214 clkctl |= CLKCTL_XCKSEL; 1215 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1216 1217 /* no way to test for working XCLK */ 1218 } 1219 else 1220 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1221 1222 /* ok, now going to connect the chip with the clock source */ 1223 1224 clkctl &= ~CLKCTL_CLKSTP; 1225 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1226 1227 return 0; 1228} 1229 1230static void vlsi_stop_clock(struct pci_dev *pdev) 1231{ 1232 u8 clkctl; 1233 1234 /* disconnect chip from clock source */ 1235 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1236 clkctl |= CLKCTL_CLKSTP; 1237 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1238 1239 /* disable all clock sources */ 1240 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1241 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1242} 1243 1244/********************************************************/ 1245 1246/* writing all-zero to the VLSI PCI IO register area seems to prevent 1247 * some occasional situations where the hardware fails (symptoms are 1248 * what appears as stalled tx/rx state machines, i.e. everything ok for 1249 * receive or transmit but hw makes no progress or is unable to access 1250 * the bus memory locations). 1251 * Best place to call this is immediately after/before the internal clock 1252 * gets started/stopped. 1253 */ 1254 1255static inline void vlsi_clear_regs(unsigned iobase) 1256{ 1257 unsigned i; 1258 const unsigned chip_io_extent = 32; 1259 1260 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1261 outw(0, iobase + i); 1262} 1263 1264static int vlsi_init_chip(struct pci_dev *pdev) 1265{ 1266 struct net_device *ndev = pci_get_drvdata(pdev); 1267 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1268 unsigned iobase; 1269 u16 ptr; 1270 1271 /* start the clock and clean the registers */ 1272 1273 if (vlsi_start_clock(pdev)) { 1274 IRDA_ERROR("%s: no valid clock source\n", __func__); 1275 return -1; 1276 } 1277 iobase = ndev->base_addr; 1278 vlsi_clear_regs(iobase); 1279 1280 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1281 1282 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1283 1284 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1285 1286 outw(0, iobase+VLSI_PIO_IRCFG); 1287 wmb(); 1288 1289 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1290 1291 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1292 1293 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1294 iobase+VLSI_PIO_RINGSIZE); 1295 1296 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1297 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1298 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1299 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1300 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1301 1302 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1303 1304 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1305 wmb(); 1306 1307 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1308 * basically every received pulse fires an ACTIVITY-INT 1309 * leading to >>1000 INT's per second instead of few 10 1310 */ 1311 1312 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1313 1314 return 0; 1315} 1316 1317static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1318{ 1319 struct pci_dev *pdev = idev->pdev; 1320 struct net_device *ndev = pci_get_drvdata(pdev); 1321 unsigned iobase = ndev->base_addr; 1322 u8 byte; 1323 1324 /* we don't use the legacy UART, disable its address decoding */ 1325 1326 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1327 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1328 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1329 1330 /* enable PCI busmaster access to our 16MB page */ 1331 1332 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1333 pci_set_master(pdev); 1334 1335 if (vlsi_init_chip(pdev) < 0) { 1336 pci_disable_device(pdev); 1337 return -1; 1338 } 1339 1340 vlsi_fill_rx(idev->rx_ring); 1341 1342 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1343 1344 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1345 1346 return 0; 1347} 1348 1349static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1350{ 1351 struct pci_dev *pdev = idev->pdev; 1352 struct net_device *ndev = pci_get_drvdata(pdev); 1353 unsigned iobase = ndev->base_addr; 1354 unsigned long flags; 1355 1356 spin_lock_irqsave(&idev->lock,flags); 1357 outw(0, iobase+VLSI_PIO_IRENABLE); 1358 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1359 1360 /* disable and w/c irqs */ 1361 outb(0, iobase+VLSI_PIO_IRINTR); 1362 wmb(); 1363 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1364 spin_unlock_irqrestore(&idev->lock,flags); 1365 1366 vlsi_unarm_tx(idev); 1367 vlsi_unarm_rx(idev); 1368 1369 vlsi_clear_regs(iobase); 1370 vlsi_stop_clock(pdev); 1371 1372 pci_disable_device(pdev); 1373 1374 return 0; 1375} 1376 1377/**************************************************************/ 1378 1379static void vlsi_tx_timeout(struct net_device *ndev) 1380{ 1381 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1382 1383 1384 vlsi_reg_debug(ndev->base_addr, __func__); 1385 vlsi_ring_debug(idev->tx_ring); 1386 1387 if (netif_running(ndev)) 1388 netif_stop_queue(ndev); 1389 1390 vlsi_stop_hw(idev); 1391 1392 /* now simply restart the whole thing */ 1393 1394 if (!idev->new_baud) 1395 idev->new_baud = idev->baud; /* keep current baudrate */ 1396 1397 if (vlsi_start_hw(idev)) 1398 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1399 __func__, pci_name(idev->pdev), ndev->name); 1400 else 1401 netif_start_queue(ndev); 1402} 1403 1404static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1405{ 1406 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1407 struct if_irda_req *irq = (struct if_irda_req *) rq; 1408 unsigned long flags; 1409 u16 fifocnt; 1410 int ret = 0; 1411 1412 switch (cmd) { 1413 case SIOCSBANDWIDTH: 1414 if (!capable(CAP_NET_ADMIN)) { 1415 ret = -EPERM; 1416 break; 1417 } 1418 spin_lock_irqsave(&idev->lock, flags); 1419 idev->new_baud = irq->ifr_baudrate; 1420 /* when called from userland there might be a minor race window here 1421 * if the stack tries to change speed concurrently - which would be 1422 * pretty strange anyway with the userland having full control... 1423 */ 1424 vlsi_set_baud(idev, ndev->base_addr); 1425 spin_unlock_irqrestore(&idev->lock, flags); 1426 break; 1427 case SIOCSMEDIABUSY: 1428 if (!capable(CAP_NET_ADMIN)) { 1429 ret = -EPERM; 1430 break; 1431 } 1432 irda_device_set_media_busy(ndev, TRUE); 1433 break; 1434 case SIOCGRECEIVING: 1435 /* the best we can do: check whether there are any bytes in rx fifo. 1436 * The trustable window (in case some data arrives just afterwards) 1437 * may be as short as 1usec or so at 4Mbps. 1438 */ 1439 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1440 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1441 break; 1442 default: 1443 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1444 __func__, cmd); 1445 ret = -EOPNOTSUPP; 1446 } 1447 1448 return ret; 1449} 1450 1451/********************************************************/ 1452 1453static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1454{ 1455 struct net_device *ndev = dev_instance; 1456 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1457 unsigned iobase; 1458 u8 irintr; 1459 int boguscount = 5; 1460 unsigned long flags; 1461 int handled = 0; 1462 1463 iobase = ndev->base_addr; 1464 spin_lock_irqsave(&idev->lock,flags); 1465 do { 1466 irintr = inb(iobase+VLSI_PIO_IRINTR); 1467 mb(); 1468 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1469 1470 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1471 break; 1472 1473 handled = 1; 1474 1475 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1476 break; /* nothing todo if only activity */ 1477 1478 if (irintr&IRINTR_RPKTINT) 1479 vlsi_rx_interrupt(ndev); 1480 1481 if (irintr&IRINTR_TPKTINT) 1482 vlsi_tx_interrupt(ndev); 1483 1484 } while (--boguscount > 0); 1485 spin_unlock_irqrestore(&idev->lock,flags); 1486 1487 if (boguscount <= 0) 1488 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1489 __func__); 1490 return IRQ_RETVAL(handled); 1491} 1492 1493/********************************************************/ 1494 1495static int vlsi_open(struct net_device *ndev) 1496{ 1497 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1498 int err = -EAGAIN; 1499 char hwname[32]; 1500 1501 if (pci_request_regions(idev->pdev, drivername)) { 1502 IRDA_WARNING("%s: io resource busy\n", __func__); 1503 goto errout; 1504 } 1505 ndev->base_addr = pci_resource_start(idev->pdev,0); 1506 ndev->irq = idev->pdev->irq; 1507 1508 /* under some rare occasions the chip apparently comes up with 1509 * IRQ's pending. We better w/c pending IRQ and disable them all 1510 */ 1511 1512 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1513 1514 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1515 drivername, ndev)) { 1516 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1517 __func__, ndev->irq); 1518 goto errout_io; 1519 } 1520 1521 if ((err = vlsi_create_hwif(idev)) != 0) 1522 goto errout_irq; 1523 1524 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1525 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1526 if (!idev->irlap) 1527 goto errout_free_ring; 1528 1529 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1530 1531 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1532 1533 if ((err = vlsi_start_hw(idev)) != 0) 1534 goto errout_close_irlap; 1535 1536 netif_start_queue(ndev); 1537 1538 IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); 1539 1540 return 0; 1541 1542errout_close_irlap: 1543 irlap_close(idev->irlap); 1544errout_free_ring: 1545 vlsi_destroy_hwif(idev); 1546errout_irq: 1547 free_irq(ndev->irq,ndev); 1548errout_io: 1549 pci_release_regions(idev->pdev); 1550errout: 1551 return err; 1552} 1553 1554static int vlsi_close(struct net_device *ndev) 1555{ 1556 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1557 1558 netif_stop_queue(ndev); 1559 1560 if (idev->irlap) 1561 irlap_close(idev->irlap); 1562 idev->irlap = NULL; 1563 1564 vlsi_stop_hw(idev); 1565 1566 vlsi_destroy_hwif(idev); 1567 1568 free_irq(ndev->irq,ndev); 1569 1570 pci_release_regions(idev->pdev); 1571 1572 IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); 1573 1574 return 0; 1575} 1576 1577static const struct net_device_ops vlsi_netdev_ops = { 1578 .ndo_open = vlsi_open, 1579 .ndo_stop = vlsi_close, 1580 .ndo_start_xmit = vlsi_hard_start_xmit, 1581 .ndo_do_ioctl = vlsi_ioctl, 1582 .ndo_tx_timeout = vlsi_tx_timeout, 1583}; 1584 1585static int vlsi_irda_init(struct net_device *ndev) 1586{ 1587 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1588 struct pci_dev *pdev = idev->pdev; 1589 1590 ndev->irq = pdev->irq; 1591 ndev->base_addr = pci_resource_start(pdev,0); 1592 1593 /* PCI busmastering 1594 * see include file for details why we need these 2 masks, in this order! 1595 */ 1596 1597 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1598 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1599 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); 1600 return -1; 1601 } 1602 1603 irda_init_max_qos_capabilies(&idev->qos); 1604 1605 /* the VLSI82C147 does not support 576000! */ 1606 1607 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1608 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1609 | IR_1152000 | (IR_4000000 << 8); 1610 1611 idev->qos.min_turn_time.bits = qos_mtt_bits; 1612 1613 irda_qos_bits_to_value(&idev->qos); 1614 1615 /* currently no public media definitions for IrDA */ 1616 1617 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1618 ndev->if_port = IF_PORT_UNKNOWN; 1619 1620 ndev->netdev_ops = &vlsi_netdev_ops; 1621 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1622 1623 SET_NETDEV_DEV(ndev, &pdev->dev); 1624 1625 return 0; 1626} 1627 1628/**************************************************************/ 1629 1630static int __devinit 1631vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1632{ 1633 struct net_device *ndev; 1634 vlsi_irda_dev_t *idev; 1635 1636 if (pci_enable_device(pdev)) 1637 goto out; 1638 else 1639 pdev->current_state = 0; /* hw must be running now */ 1640 1641 IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", 1642 drivername, pci_name(pdev)); 1643 1644 if ( !pci_resource_start(pdev,0) || 1645 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1646 IRDA_ERROR("%s: bar 0 invalid", __func__); 1647 goto out_disable; 1648 } 1649 1650 ndev = alloc_irdadev(sizeof(*idev)); 1651 if (ndev==NULL) { 1652 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1653 __func__); 1654 goto out_disable; 1655 } 1656 1657 idev = netdev_priv(ndev); 1658 1659 spin_lock_init(&idev->lock); 1660 mutex_init(&idev->mtx); 1661 mutex_lock(&idev->mtx); 1662 idev->pdev = pdev; 1663 1664 if (vlsi_irda_init(ndev) < 0) 1665 goto out_freedev; 1666 1667 if (register_netdev(ndev) < 0) { 1668 IRDA_ERROR("%s: register_netdev failed\n", __func__); 1669 goto out_freedev; 1670 } 1671 1672 if (vlsi_proc_root != NULL) { 1673 struct proc_dir_entry *ent; 1674 1675 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1676 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1677 if (!ent) { 1678 IRDA_WARNING("%s: failed to create proc entry\n", 1679 __func__); 1680 } else { 1681 ent->size = 0; 1682 } 1683 idev->proc_entry = ent; 1684 } 1685 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1686 1687 pci_set_drvdata(pdev, ndev); 1688 mutex_unlock(&idev->mtx); 1689 1690 return 0; 1691 1692out_freedev: 1693 mutex_unlock(&idev->mtx); 1694 free_netdev(ndev); 1695out_disable: 1696 pci_disable_device(pdev); 1697out: 1698 pci_set_drvdata(pdev, NULL); 1699 return -ENODEV; 1700} 1701 1702static void __devexit vlsi_irda_remove(struct pci_dev *pdev) 1703{ 1704 struct net_device *ndev = pci_get_drvdata(pdev); 1705 vlsi_irda_dev_t *idev; 1706 1707 if (!ndev) { 1708 IRDA_ERROR("%s: lost netdevice?\n", drivername); 1709 return; 1710 } 1711 1712 unregister_netdev(ndev); 1713 1714 idev = netdev_priv(ndev); 1715 mutex_lock(&idev->mtx); 1716 if (idev->proc_entry) { 1717 remove_proc_entry(ndev->name, vlsi_proc_root); 1718 idev->proc_entry = NULL; 1719 } 1720 mutex_unlock(&idev->mtx); 1721 1722 free_netdev(ndev); 1723 1724 pci_set_drvdata(pdev, NULL); 1725 1726 IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); 1727} 1728 1729#ifdef CONFIG_PM 1730 1731/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1732 * Some of the Linux PCI-PM code however depends on this, for example in 1733 * pci_set_power_state(). So we have to take care to perform the required 1734 * operations on our own (particularly reflecting the pdev->current_state) 1735 * otherwise we might get cheated by pci-pm. 1736 */ 1737 1738 1739static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1740{ 1741 struct net_device *ndev = pci_get_drvdata(pdev); 1742 vlsi_irda_dev_t *idev; 1743 1744 if (!ndev) { 1745 IRDA_ERROR("%s - %s: no netdevice\n", 1746 __func__, pci_name(pdev)); 1747 return 0; 1748 } 1749 idev = netdev_priv(ndev); 1750 mutex_lock(&idev->mtx); 1751 if (pdev->current_state != 0) { /* already suspended */ 1752 if (state.event > pdev->current_state) { /* simply go deeper */ 1753 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1754 pdev->current_state = state.event; 1755 } 1756 else 1757 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); 1758 mutex_unlock(&idev->mtx); 1759 return 0; 1760 } 1761 1762 if (netif_running(ndev)) { 1763 netif_device_detach(ndev); 1764 vlsi_stop_hw(idev); 1765 pci_save_state(pdev); 1766 if (!idev->new_baud) 1767 /* remember speed settings to restore on resume */ 1768 idev->new_baud = idev->baud; 1769 } 1770 1771 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1772 pdev->current_state = state.event; 1773 idev->resume_ok = 1; 1774 mutex_unlock(&idev->mtx); 1775 return 0; 1776} 1777 1778static int vlsi_irda_resume(struct pci_dev *pdev) 1779{ 1780 struct net_device *ndev = pci_get_drvdata(pdev); 1781 vlsi_irda_dev_t *idev; 1782 1783 if (!ndev) { 1784 IRDA_ERROR("%s - %s: no netdevice\n", 1785 __func__, pci_name(pdev)); 1786 return 0; 1787 } 1788 idev = netdev_priv(ndev); 1789 mutex_lock(&idev->mtx); 1790 if (pdev->current_state == 0) { 1791 mutex_unlock(&idev->mtx); 1792 IRDA_WARNING("%s - %s: already resumed\n", 1793 __func__, pci_name(pdev)); 1794 return 0; 1795 } 1796 1797 pci_set_power_state(pdev, PCI_D0); 1798 pdev->current_state = PM_EVENT_ON; 1799 1800 if (!idev->resume_ok) { 1801 /* should be obsolete now - but used to happen due to: 1802 * - pci layer initially setting pdev->current_state = 4 (unknown) 1803 * - pci layer did not walk the save_state-tree (might be APM problem) 1804 * so we could not refuse to suspend from undefined state 1805 * - vlsi_irda_suspend detected invalid state and refused to save 1806 * configuration for resume - but was too late to stop suspending 1807 * - vlsi_irda_resume got screwed when trying to resume from garbage 1808 * 1809 * now we explicitly set pdev->current_state = 0 after enabling the 1810 * device and independently resume_ok should catch any garbage config. 1811 */ 1812 IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); 1813 mutex_unlock(&idev->mtx); 1814 return 0; 1815 } 1816 1817 if (netif_running(ndev)) { 1818 pci_restore_state(pdev); 1819 vlsi_start_hw(idev); 1820 netif_device_attach(ndev); 1821 } 1822 idev->resume_ok = 0; 1823 mutex_unlock(&idev->mtx); 1824 return 0; 1825} 1826 1827#endif /* CONFIG_PM */ 1828 1829/*********************************************************/ 1830 1831static struct pci_driver vlsi_irda_driver = { 1832 .name = drivername, 1833 .id_table = vlsi_irda_table, 1834 .probe = vlsi_irda_probe, 1835 .remove = __devexit_p(vlsi_irda_remove), 1836#ifdef CONFIG_PM 1837 .suspend = vlsi_irda_suspend, 1838 .resume = vlsi_irda_resume, 1839#endif 1840}; 1841 1842#define PROC_DIR ("driver/" DRIVER_NAME) 1843 1844static int __init vlsi_mod_init(void) 1845{ 1846 int i, ret; 1847 1848 if (clksrc < 0 || clksrc > 3) { 1849 IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); 1850 return -1; 1851 } 1852 1853 for (i = 0; i < 2; i++) { 1854 switch(ringsize[i]) { 1855 case 4: 1856 case 8: 1857 case 16: 1858 case 32: 1859 case 64: 1860 break; 1861 default: 1862 IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); 1863 ringsize[i] = 8; 1864 break; 1865 } 1866 } 1867 1868 sirpulse = !!sirpulse; 1869 1870 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1871 * Failure to create the procfs entry is handled like running 1872 * without procfs - it's not required for the driver to work. 1873 */ 1874 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1875 1876 ret = pci_register_driver(&vlsi_irda_driver); 1877 1878 if (ret && vlsi_proc_root) 1879 remove_proc_entry(PROC_DIR, NULL); 1880 return ret; 1881 1882} 1883 1884static void __exit vlsi_mod_exit(void) 1885{ 1886 pci_unregister_driver(&vlsi_irda_driver); 1887 if (vlsi_proc_root) 1888 remove_proc_entry(PROC_DIR, NULL); 1889} 1890 1891module_init(vlsi_mod_init); 1892module_exit(vlsi_mod_exit); 1893