1/* 2 * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux 3 * 4 * This software may be used and distributed according to the terms of the 5 * GNU General Public License. 6 * 7 * The author may be reached as romieu@cogenit.fr. 8 * Specific bug reports/asian food will be welcome. 9 * 10 * Special thanks to the nice people at CS-Telecom for the hardware and the 11 * access to the test/measure tools. 12 * 13 * 14 * Theory of Operation 15 * 16 * I. Board Compatibility 17 * 18 * This device driver is designed for the Siemens PEB20534 4 ports serial 19 * controller as found on Etinc PCISYNC cards. The documentation for the 20 * chipset is available at http://www.infineon.com: 21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with 22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1"; 23 * - Application Hint "Management of DSCC4 on-chip FIFO resources". 24 * - Errata sheet DS5 (courtesy of Michael Skerritt). 25 * Jens David has built an adapter based on the same chipset. Take a look 26 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific 27 * driver. 28 * Sample code (2 revisions) is available at Infineon. 29 * 30 * II. Board-specific settings 31 * 32 * Pcisync can transmit some clock signal to the outside world on the 33 * *first two* ports provided you put a quartz and a line driver on it and 34 * remove the jumpers. The operation is described on Etinc web site. If you 35 * go DCE on these ports, don't forget to use an adequate cable. 36 * 37 * Sharing of the PCI interrupt line for this board is possible. 38 * 39 * III. Driver operation 40 * 41 * The rx/tx operations are based on a linked list of descriptors. The driver 42 * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more 43 * I tried to fix it, the more it started to look like (convoluted) software 44 * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider 45 * this a rfc2119 MUST. 46 * 47 * Tx direction 48 * When the tx ring is full, the xmit routine issues a call to netdev_stop. 49 * The device is supposed to be enabled again during an ALLS irq (we could 50 * use HI but as it's easy to lose events, it's fscked). 51 * 52 * Rx direction 53 * The received frames aren't supposed to span over multiple receiving areas. 54 * I may implement it some day but it isn't the highest ranked item. 55 * 56 * IV. Notes 57 * The current error (XDU, RFO) recovery code is untested. 58 * So far, RDO takes his RX channel down and the right sequence to enable it 59 * again is still a mystery. If RDO happens, plan a reboot. More details 60 * in the code (NB: as this happens, TX still works). 61 * Don't mess the cables during operation, especially on DTE ports. I don't 62 * suggest it for DCE either but at least one can get some messages instead 63 * of a complete instant freeze. 64 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with 65 * the documentation/chipset releases. 66 * 67 * TODO: 68 * - test X25. 69 * - use polling at high irq/s, 70 * - performance analysis, 71 * - endianness. 72 * 73 * 2001/12/10 Daniela Squassoni <daniela@cyclades.com> 74 * - Contribution to support the new generic HDLC layer. 75 * 76 * 2002/01 Ueimor 77 * - old style interface removal 78 * - dscc4_release_ring fix (related to DMA mapping) 79 * - hard_start_xmit fix (hint: TxSizeMax) 80 * - misc crapectomy. 81 */ 82 83#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 84 85#include <linux/module.h> 86#include <linux/sched.h> 87#include <linux/types.h> 88#include <linux/errno.h> 89#include <linux/list.h> 90#include <linux/ioport.h> 91#include <linux/pci.h> 92#include <linux/kernel.h> 93#include <linux/mm.h> 94#include <linux/slab.h> 95 96#include <asm/cache.h> 97#include <asm/byteorder.h> 98#include <asm/uaccess.h> 99#include <asm/io.h> 100#include <asm/irq.h> 101 102#include <linux/init.h> 103#include <linux/interrupt.h> 104#include <linux/string.h> 105 106#include <linux/if_arp.h> 107#include <linux/netdevice.h> 108#include <linux/skbuff.h> 109#include <linux/delay.h> 110#include <linux/hdlc.h> 111#include <linux/mutex.h> 112 113/* Version */ 114static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; 115static int debug; 116static int quartz; 117 118#ifdef CONFIG_DSCC4_PCI_RST 119static DEFINE_MUTEX(dscc4_mutex); 120static u32 dscc4_pci_config_store[16]; 121#endif 122 123#define DRV_NAME "dscc4" 124 125#undef DSCC4_POLLING 126 127/* Module parameters */ 128 129MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); 130MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); 131MODULE_LICENSE("GPL"); 132module_param(debug, int, 0); 133MODULE_PARM_DESC(debug,"Enable/disable extra messages"); 134module_param(quartz, int, 0); 135MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)"); 136 137/* Structures */ 138 139struct thingie { 140 int define; 141 u32 bits; 142}; 143 144struct TxFD { 145 __le32 state; 146 __le32 next; 147 __le32 data; 148 __le32 complete; 149 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */ 150 /* FWIW, datasheet calls that "dummy" and says that card 151 * never looks at it; neither does the driver */ 152}; 153 154struct RxFD { 155 __le32 state1; 156 __le32 next; 157 __le32 data; 158 __le32 state2; 159 __le32 end; 160}; 161 162#define DUMMY_SKB_SIZE 64 163#define TX_LOW 8 164#define TX_RING_SIZE 32 165#define RX_RING_SIZE 32 166#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD) 167#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD) 168#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */ 169#define TX_TIMEOUT (HZ/10) 170#define DSCC4_HZ_MAX 33000000 171#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */ 172#define dev_per_card 4 173#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */ 174 175#define SOURCE_ID(flags) (((flags) >> 28) & 0x03) 176#define TO_SIZE(state) (((state) >> 16) & 0x1fff) 177 178/* 179 * Given the operating range of Linux HDLC, the 2 defines below could be 180 * made simpler. However they are a fine reminder for the limitations of 181 * the driver: it's better to stay < TxSizeMax and < RxSizeMax. 182 */ 183#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16) 184#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16) 185#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */ 186#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) 187 188struct dscc4_pci_priv { 189 __le32 *iqcfg; 190 int cfg_cur; 191 spinlock_t lock; 192 struct pci_dev *pdev; 193 194 struct dscc4_dev_priv *root; 195 dma_addr_t iqcfg_dma; 196 u32 xtal_hz; 197}; 198 199struct dscc4_dev_priv { 200 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 201 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 202 203 struct RxFD *rx_fd; 204 struct TxFD *tx_fd; 205 __le32 *iqrx; 206 __le32 *iqtx; 207 208 /* FIXME: check all the volatile are required */ 209 volatile u32 tx_current; 210 u32 rx_current; 211 u32 iqtx_current; 212 u32 iqrx_current; 213 214 volatile u32 tx_dirty; 215 volatile u32 ltda; 216 u32 rx_dirty; 217 u32 lrda; 218 219 dma_addr_t tx_fd_dma; 220 dma_addr_t rx_fd_dma; 221 dma_addr_t iqtx_dma; 222 dma_addr_t iqrx_dma; 223 224 u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */ 225 226 struct timer_list timer; 227 228 struct dscc4_pci_priv *pci_priv; 229 spinlock_t lock; 230 231 int dev_id; 232 volatile u32 flags; 233 u32 timer_help; 234 235 unsigned short encoding; 236 unsigned short parity; 237 struct net_device *dev; 238 sync_serial_settings settings; 239 void __iomem *base_addr; 240 u32 __pad __attribute__ ((aligned (4))); 241}; 242 243/* GLOBAL registers definitions */ 244#define GCMDR 0x00 245#define GSTAR 0x04 246#define GMODE 0x08 247#define IQLENR0 0x0C 248#define IQLENR1 0x10 249#define IQRX0 0x14 250#define IQTX0 0x24 251#define IQCFG 0x3c 252#define FIFOCR1 0x44 253#define FIFOCR2 0x48 254#define FIFOCR3 0x4c 255#define FIFOCR4 0x34 256#define CH0CFG 0x50 257#define CH0BRDA 0x54 258#define CH0BTDA 0x58 259#define CH0FRDA 0x98 260#define CH0FTDA 0xb0 261#define CH0LRDA 0xc8 262#define CH0LTDA 0xe0 263 264/* SCC registers definitions */ 265#define SCC_START 0x0100 266#define SCC_OFFSET 0x80 267#define CMDR 0x00 268#define STAR 0x04 269#define CCR0 0x08 270#define CCR1 0x0c 271#define CCR2 0x10 272#define BRR 0x2C 273#define RLCR 0x40 274#define IMR 0x54 275#define ISR 0x58 276 277#define GPDIR 0x0400 278#define GPDATA 0x0404 279#define GPIM 0x0408 280 281/* Bit masks */ 282#define EncodingMask 0x00700000 283#define CrcMask 0x00000003 284 285#define IntRxScc0 0x10000000 286#define IntTxScc0 0x01000000 287 288#define TxPollCmd 0x00000400 289#define RxActivate 0x08000000 290#define MTFi 0x04000000 291#define Rdr 0x00400000 292#define Rdt 0x00200000 293#define Idr 0x00100000 294#define Idt 0x00080000 295#define TxSccRes 0x01000000 296#define RxSccRes 0x00010000 297#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */ 298#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */ 299 300#define Ccr0ClockMask 0x0000003f 301#define Ccr1LoopMask 0x00000200 302#define IsrMask 0x000fffff 303#define BrrExpMask 0x00000f00 304#define BrrMultMask 0x0000003f 305#define EncodingMask 0x00700000 306#define Hold cpu_to_le32(0x40000000) 307#define SccBusy 0x10000000 308#define PowerUp 0x80000000 309#define Vis 0x00001000 310#define FrameOk (FrameVfr | FrameCrc) 311#define FrameVfr 0x80 312#define FrameRdo 0x40 313#define FrameCrc 0x20 314#define FrameRab 0x10 315#define FrameAborted cpu_to_le32(0x00000200) 316#define FrameEnd cpu_to_le32(0x80000000) 317#define DataComplete cpu_to_le32(0x40000000) 318#define LengthCheck 0x00008000 319#define SccEvt 0x02000000 320#define NoAck 0x00000200 321#define Action 0x00000001 322#define HiDesc cpu_to_le32(0x20000000) 323 324/* SCC events */ 325#define RxEvt 0xf0000000 326#define TxEvt 0x0f000000 327#define Alls 0x00040000 328#define Xdu 0x00010000 329#define Cts 0x00004000 330#define Xmr 0x00002000 331#define Xpr 0x00001000 332#define Rdo 0x00000080 333#define Rfs 0x00000040 334#define Cd 0x00000004 335#define Rfo 0x00000002 336#define Flex 0x00000001 337 338/* DMA core events */ 339#define Cfg 0x00200000 340#define Hi 0x00040000 341#define Fi 0x00020000 342#define Err 0x00010000 343#define Arf 0x00000002 344#define ArAck 0x00000001 345 346/* State flags */ 347#define Ready 0x00000000 348#define NeedIDR 0x00000001 349#define NeedIDT 0x00000002 350#define RdoSet 0x00000004 351#define FakeReset 0x00000008 352 353/* Don't mask RDO. Ever. */ 354#ifdef DSCC4_POLLING 355#define EventsMask 0xfffeef7f 356#else 357#define EventsMask 0xfffa8f7a 358#endif 359 360/* Functions prototypes */ 361static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); 362static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); 363static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr); 364static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); 365static int dscc4_open(struct net_device *); 366static netdev_tx_t dscc4_start_xmit(struct sk_buff *, 367 struct net_device *); 368static int dscc4_close(struct net_device *); 369static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 370static int dscc4_init_ring(struct net_device *); 371static void dscc4_release_ring(struct dscc4_dev_priv *); 372static void dscc4_timer(unsigned long); 373static void dscc4_tx_timeout(struct net_device *); 374static irqreturn_t dscc4_irq(int irq, void *dev_id); 375static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short); 376static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *); 377#ifdef DSCC4_POLLING 378static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *); 379#endif 380 381static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev) 382{ 383 return dev_to_hdlc(dev)->priv; 384} 385 386static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p) 387{ 388 return p->dev; 389} 390 391static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, 392 struct net_device *dev, int offset) 393{ 394 u32 state; 395 396 /* Cf scc_writel for concern regarding thread-safety */ 397 state = dpriv->scc_regs[offset >> 2]; 398 state &= ~mask; 399 state |= value; 400 dpriv->scc_regs[offset >> 2] = state; 401 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); 402} 403 404static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, 405 struct net_device *dev, int offset) 406{ 407 /* 408 * Thread-UNsafe. 409 * As of 2002/02/16, there are no thread racing for access. 410 */ 411 dpriv->scc_regs[offset >> 2] = bits; 412 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); 413} 414 415static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) 416{ 417 return dpriv->scc_regs[offset >> 2]; 418} 419 420static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) 421{ 422 /* Cf errata DS5 p.4 */ 423 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); 424 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); 425} 426 427static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, 428 struct net_device *dev) 429{ 430 dpriv->ltda = dpriv->tx_fd_dma + 431 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); 432 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); 433 /* Flush posted writes *NOW* */ 434 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); 435} 436 437static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, 438 struct net_device *dev) 439{ 440 dpriv->lrda = dpriv->rx_fd_dma + 441 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); 442 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); 443} 444 445static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) 446{ 447 return dpriv->tx_current == dpriv->tx_dirty; 448} 449 450static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, 451 struct net_device *dev) 452{ 453 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; 454} 455 456static int state_check(u32 state, struct dscc4_dev_priv *dpriv, 457 struct net_device *dev, const char *msg) 458{ 459 int ret = 0; 460 461 if (debug > 1) { 462 if (SOURCE_ID(state) != dpriv->dev_id) { 463 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", 464 dev->name, msg, SOURCE_ID(state), state ); 465 ret = -1; 466 } 467 if (state & 0x0df80c00) { 468 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", 469 dev->name, msg, state); 470 ret = -1; 471 } 472 } 473 return ret; 474} 475 476static void dscc4_tx_print(struct net_device *dev, 477 struct dscc4_dev_priv *dpriv, 478 char *msg) 479{ 480 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", 481 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); 482} 483 484static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) 485{ 486 struct pci_dev *pdev = dpriv->pci_priv->pdev; 487 struct TxFD *tx_fd = dpriv->tx_fd; 488 struct RxFD *rx_fd = dpriv->rx_fd; 489 struct sk_buff **skbuff; 490 int i; 491 492 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); 493 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); 494 495 skbuff = dpriv->tx_skbuff; 496 for (i = 0; i < TX_RING_SIZE; i++) { 497 if (*skbuff) { 498 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data), 499 (*skbuff)->len, PCI_DMA_TODEVICE); 500 dev_kfree_skb(*skbuff); 501 } 502 skbuff++; 503 tx_fd++; 504 } 505 506 skbuff = dpriv->rx_skbuff; 507 for (i = 0; i < RX_RING_SIZE; i++) { 508 if (*skbuff) { 509 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), 510 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 511 dev_kfree_skb(*skbuff); 512 } 513 skbuff++; 514 rx_fd++; 515 } 516} 517 518static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, 519 struct net_device *dev) 520{ 521 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; 522 struct RxFD *rx_fd = dpriv->rx_fd + dirty; 523 const int len = RX_MAX(HDLC_MAX_MRU); 524 struct sk_buff *skb; 525 int ret = 0; 526 527 skb = dev_alloc_skb(len); 528 dpriv->rx_skbuff[dirty] = skb; 529 if (skb) { 530 skb->protocol = hdlc_type_trans(skb, dev); 531 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, 532 skb->data, len, PCI_DMA_FROMDEVICE)); 533 } else { 534 rx_fd->data = 0; 535 ret = -1; 536 } 537 return ret; 538} 539 540/* 541 * IRQ/thread/whatever safe 542 */ 543static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, 544 struct net_device *dev, char *msg) 545{ 546 s8 i = 0; 547 548 do { 549 if (!(scc_readl_star(dpriv, dev) & SccBusy)) { 550 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, 551 msg, i); 552 goto done; 553 } 554 schedule_timeout_uninterruptible(10); 555 rmb(); 556 } while (++i > 0); 557 netdev_err(dev, "%s timeout\n", msg); 558done: 559 return (i >= 0) ? i : -EAGAIN; 560} 561 562static int dscc4_do_action(struct net_device *dev, char *msg) 563{ 564 void __iomem *ioaddr = dscc4_priv(dev)->base_addr; 565 s16 i = 0; 566 567 writel(Action, ioaddr + GCMDR); 568 ioaddr += GSTAR; 569 do { 570 u32 state = readl(ioaddr); 571 572 if (state & ArAck) { 573 netdev_dbg(dev, "%s ack\n", msg); 574 writel(ArAck, ioaddr); 575 goto done; 576 } else if (state & Arf) { 577 netdev_err(dev, "%s failed\n", msg); 578 writel(Arf, ioaddr); 579 i = -1; 580 goto done; 581 } 582 rmb(); 583 } while (++i > 0); 584 netdev_err(dev, "%s timeout\n", msg); 585done: 586 return i; 587} 588 589static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) 590{ 591 int cur = dpriv->iqtx_current%IRQ_RING_SIZE; 592 s8 i = 0; 593 594 do { 595 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || 596 (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) 597 break; 598 smp_rmb(); 599 schedule_timeout_uninterruptible(10); 600 } while (++i > 0); 601 602 return (i >= 0 ) ? i : -EAGAIN; 603} 604 605#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */ 606static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) 607{ 608 unsigned long flags; 609 610 spin_lock_irqsave(&dpriv->pci_priv->lock, flags); 611 /* Cf errata DS5 p.6 */ 612 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); 613 scc_patchl(PowerUp, 0, dpriv, dev, CCR0); 614 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); 615 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); 616 writel(Action, dpriv->base_addr + GCMDR); 617 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); 618} 619 620#endif 621 622#if 0 623static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) 624{ 625 u16 i = 0; 626 627 /* Cf errata DS5 p.7 */ 628 scc_patchl(PowerUp, 0, dpriv, dev, CCR0); 629 scc_writel(0x00050000, dpriv, dev, CCR2); 630 /* 631 * Must be longer than the time required to fill the fifo. 632 */ 633 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) { 634 udelay(1); 635 wmb(); 636 } 637 638 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); 639 if (dscc4_do_action(dev, "Rdt") < 0) 640 netdev_err(dev, "Tx reset failed\n"); 641} 642#endif 643 644/* TODO: (ab)use this function to refill a completely depleted RX ring. */ 645static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, 646 struct net_device *dev) 647{ 648 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; 649 struct pci_dev *pdev = dpriv->pci_priv->pdev; 650 struct sk_buff *skb; 651 int pkt_len; 652 653 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 654 if (!skb) { 655 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__); 656 goto refill; 657 } 658 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); 659 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), 660 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 661 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { 662 dev->stats.rx_packets++; 663 dev->stats.rx_bytes += pkt_len; 664 skb_put(skb, pkt_len); 665 if (netif_running(dev)) 666 skb->protocol = hdlc_type_trans(skb, dev); 667 netif_rx(skb); 668 } else { 669 if (skb->data[pkt_len] & FrameRdo) 670 dev->stats.rx_fifo_errors++; 671 else if (!(skb->data[pkt_len] & FrameCrc)) 672 dev->stats.rx_crc_errors++; 673 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) != 674 (FrameVfr | FrameRab)) 675 dev->stats.rx_length_errors++; 676 dev->stats.rx_errors++; 677 dev_kfree_skb_irq(skb); 678 } 679refill: 680 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { 681 if (try_get_rx_skb(dpriv, dev) < 0) 682 break; 683 dpriv->rx_dirty++; 684 } 685 dscc4_rx_update(dpriv, dev); 686 rx_fd->state2 = 0x00000000; 687 rx_fd->end = cpu_to_le32(0xbabeface); 688} 689 690static void dscc4_free1(struct pci_dev *pdev) 691{ 692 struct dscc4_pci_priv *ppriv; 693 struct dscc4_dev_priv *root; 694 int i; 695 696 ppriv = pci_get_drvdata(pdev); 697 root = ppriv->root; 698 699 for (i = 0; i < dev_per_card; i++) 700 unregister_hdlc_device(dscc4_to_dev(root + i)); 701 702 for (i = 0; i < dev_per_card; i++) 703 free_netdev(root[i].dev); 704 kfree(root); 705 kfree(ppriv); 706} 707 708static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 709{ 710 struct dscc4_pci_priv *priv; 711 struct dscc4_dev_priv *dpriv; 712 void __iomem *ioaddr; 713 int i, rc; 714 715 printk(KERN_DEBUG "%s", version); 716 717 rc = pci_enable_device(pdev); 718 if (rc < 0) 719 goto out; 720 721 rc = pci_request_region(pdev, 0, "registers"); 722 if (rc < 0) { 723 pr_err("can't reserve MMIO region (regs)\n"); 724 goto err_disable_0; 725 } 726 rc = pci_request_region(pdev, 1, "LBI interface"); 727 if (rc < 0) { 728 pr_err("can't reserve MMIO region (lbi)\n"); 729 goto err_free_mmio_region_1; 730 } 731 732 ioaddr = pci_ioremap_bar(pdev, 0); 733 if (!ioaddr) { 734 pr_err("cannot remap MMIO region %llx @ %llx\n", 735 (unsigned long long)pci_resource_len(pdev, 0), 736 (unsigned long long)pci_resource_start(pdev, 0)); 737 rc = -EIO; 738 goto err_free_mmio_regions_2; 739 } 740 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n", 741 (unsigned long long)pci_resource_start(pdev, 0), 742 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq); 743 744 /* Cf errata DS5 p.2 */ 745 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8); 746 pci_set_master(pdev); 747 748 rc = dscc4_found1(pdev, ioaddr); 749 if (rc < 0) 750 goto err_iounmap_3; 751 752 priv = pci_get_drvdata(pdev); 753 754 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root); 755 if (rc < 0) { 756 pr_warn("IRQ %d busy\n", pdev->irq); 757 goto err_release_4; 758 } 759 760 /* power up/little endian/dma core controlled via lrda/ltda */ 761 writel(0x00000001, ioaddr + GMODE); 762 /* Shared interrupt queue */ 763 { 764 u32 bits; 765 766 bits = (IRQ_RING_SIZE >> 5) - 1; 767 bits |= bits << 4; 768 bits |= bits << 8; 769 bits |= bits << 16; 770 writel(bits, ioaddr + IQLENR0); 771 } 772 /* Global interrupt queue */ 773 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); 774 775 rc = -ENOMEM; 776 777 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, 778 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); 779 if (!priv->iqcfg) 780 goto err_free_irq_5; 781 writel(priv->iqcfg_dma, ioaddr + IQCFG); 782 783 /* 784 * SCC 0-3 private rx/tx irq structures 785 * IQRX/TXi needs to be set soon. Learned it the hard way... 786 */ 787 for (i = 0; i < dev_per_card; i++) { 788 dpriv = priv->root + i; 789 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, 790 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); 791 if (!dpriv->iqtx) 792 goto err_free_iqtx_6; 793 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); 794 } 795 for (i = 0; i < dev_per_card; i++) { 796 dpriv = priv->root + i; 797 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, 798 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); 799 if (!dpriv->iqrx) 800 goto err_free_iqrx_7; 801 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); 802 } 803 804 /* Cf application hint. Beware of hard-lock condition on threshold. */ 805 writel(0x42104000, ioaddr + FIFOCR1); 806 //writel(0x9ce69800, ioaddr + FIFOCR2); 807 writel(0xdef6d800, ioaddr + FIFOCR2); 808 //writel(0x11111111, ioaddr + FIFOCR4); 809 writel(0x18181818, ioaddr + FIFOCR4); 810 // FIXME: should depend on the chipset revision 811 writel(0x0000000e, ioaddr + FIFOCR3); 812 813 writel(0xff200001, ioaddr + GCMDR); 814 815 rc = 0; 816out: 817 return rc; 818 819err_free_iqrx_7: 820 while (--i >= 0) { 821 dpriv = priv->root + i; 822 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 823 dpriv->iqrx, dpriv->iqrx_dma); 824 } 825 i = dev_per_card; 826err_free_iqtx_6: 827 while (--i >= 0) { 828 dpriv = priv->root + i; 829 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 830 dpriv->iqtx, dpriv->iqtx_dma); 831 } 832 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, 833 priv->iqcfg_dma); 834err_free_irq_5: 835 free_irq(pdev->irq, priv->root); 836err_release_4: 837 dscc4_free1(pdev); 838err_iounmap_3: 839 iounmap (ioaddr); 840err_free_mmio_regions_2: 841 pci_release_region(pdev, 1); 842err_free_mmio_region_1: 843 pci_release_region(pdev, 0); 844err_disable_0: 845 pci_disable_device(pdev); 846 goto out; 847}; 848 849/* 850 * Let's hope the default values are decent enough to protect my 851 * feet from the user's gun - Ueimor 852 */ 853static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, 854 struct net_device *dev) 855{ 856 /* No interrupts, SCC core disabled. Let's relax */ 857 scc_writel(0x00000000, dpriv, dev, CCR0); 858 859 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); 860 861 /* 862 * No address recognition/crc-CCITT/cts enabled 863 * Shared flags transmission disabled - cf errata DS5 p.11 864 * Carrier detect disabled - cf errata p.14 865 * FIXME: carrier detection/polarity may be handled more gracefully. 866 */ 867 scc_writel(0x02408000, dpriv, dev, CCR1); 868 869 /* crc not forwarded - Cf errata DS5 p.11 */ 870 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); 871 // crc forwarded 872 //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2); 873} 874 875static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) 876{ 877 int ret = 0; 878 879 if ((hz < 0) || (hz > DSCC4_HZ_MAX)) 880 ret = -EOPNOTSUPP; 881 else 882 dpriv->pci_priv->xtal_hz = hz; 883 884 return ret; 885} 886 887static const struct net_device_ops dscc4_ops = { 888 .ndo_open = dscc4_open, 889 .ndo_stop = dscc4_close, 890 .ndo_change_mtu = hdlc_change_mtu, 891 .ndo_start_xmit = hdlc_start_xmit, 892 .ndo_do_ioctl = dscc4_ioctl, 893 .ndo_tx_timeout = dscc4_tx_timeout, 894}; 895 896static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) 897{ 898 struct dscc4_pci_priv *ppriv; 899 struct dscc4_dev_priv *root; 900 int i, ret = -ENOMEM; 901 902 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); 903 if (!root) 904 goto err_out; 905 906 for (i = 0; i < dev_per_card; i++) { 907 root[i].dev = alloc_hdlcdev(root + i); 908 if (!root[i].dev) 909 goto err_free_dev; 910 } 911 912 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); 913 if (!ppriv) 914 goto err_free_dev; 915 916 ppriv->root = root; 917 spin_lock_init(&ppriv->lock); 918 919 for (i = 0; i < dev_per_card; i++) { 920 struct dscc4_dev_priv *dpriv = root + i; 921 struct net_device *d = dscc4_to_dev(dpriv); 922 hdlc_device *hdlc = dev_to_hdlc(d); 923 924 d->base_addr = (unsigned long)ioaddr; 925 d->irq = pdev->irq; 926 d->netdev_ops = &dscc4_ops; 927 d->watchdog_timeo = TX_TIMEOUT; 928 SET_NETDEV_DEV(d, &pdev->dev); 929 930 dpriv->dev_id = i; 931 dpriv->pci_priv = ppriv; 932 dpriv->base_addr = ioaddr; 933 spin_lock_init(&dpriv->lock); 934 935 hdlc->xmit = dscc4_start_xmit; 936 hdlc->attach = dscc4_hdlc_attach; 937 938 dscc4_init_registers(dpriv, d); 939 dpriv->parity = PARITY_CRC16_PR0_CCITT; 940 dpriv->encoding = ENCODING_NRZ; 941 942 ret = dscc4_init_ring(d); 943 if (ret < 0) 944 goto err_unregister; 945 946 ret = register_hdlc_device(d); 947 if (ret < 0) { 948 pr_err("unable to register\n"); 949 dscc4_release_ring(dpriv); 950 goto err_unregister; 951 } 952 } 953 954 ret = dscc4_set_quartz(root, quartz); 955 if (ret < 0) 956 goto err_unregister; 957 958 pci_set_drvdata(pdev, ppriv); 959 return ret; 960 961err_unregister: 962 while (i-- > 0) { 963 dscc4_release_ring(root + i); 964 unregister_hdlc_device(dscc4_to_dev(root + i)); 965 } 966 kfree(ppriv); 967 i = dev_per_card; 968err_free_dev: 969 while (i-- > 0) 970 free_netdev(root[i].dev); 971 kfree(root); 972err_out: 973 return ret; 974}; 975 976/* FIXME: get rid of the unneeded code */ 977static void dscc4_timer(unsigned long data) 978{ 979 struct net_device *dev = (struct net_device *)data; 980 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 981// struct dscc4_pci_priv *ppriv; 982 983 goto done; 984done: 985 dpriv->timer.expires = jiffies + TX_TIMEOUT; 986 add_timer(&dpriv->timer); 987} 988 989static void dscc4_tx_timeout(struct net_device *dev) 990{ 991 /* FIXME: something is missing there */ 992} 993 994static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) 995{ 996 sync_serial_settings *settings = &dpriv->settings; 997 998 if (settings->loopback && (settings->clock_type != CLOCK_INT)) { 999 struct net_device *dev = dscc4_to_dev(dpriv); 1000 1001 netdev_info(dev, "loopback requires clock\n"); 1002 return -1; 1003 } 1004 return 0; 1005} 1006 1007#ifdef CONFIG_DSCC4_PCI_RST 1008/* 1009 * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together 1010 * so as to provide a safe way to reset the asic while not the whole machine 1011 * rebooting. 1012 * 1013 * This code doesn't need to be efficient. Keep It Simple 1014 */ 1015static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) 1016{ 1017 int i; 1018 1019 mutex_lock(&dscc4_mutex); 1020 for (i = 0; i < 16; i++) 1021 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); 1022 1023 /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ 1024 writel(0x001c0000, ioaddr + GMODE); 1025 /* Configure GPIO port as output */ 1026 writel(0x0000ffff, ioaddr + GPDIR); 1027 /* Disable interruption */ 1028 writel(0x0000ffff, ioaddr + GPIM); 1029 1030 writel(0x0000ffff, ioaddr + GPDATA); 1031 writel(0x00000000, ioaddr + GPDATA); 1032 1033 /* Flush posted writes */ 1034 readl(ioaddr + GSTAR); 1035 1036 schedule_timeout_uninterruptible(10); 1037 1038 for (i = 0; i < 16; i++) 1039 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); 1040 mutex_unlock(&dscc4_mutex); 1041} 1042#else 1043#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) 1044#endif /* CONFIG_DSCC4_PCI_RST */ 1045 1046static int dscc4_open(struct net_device *dev) 1047{ 1048 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1049 struct dscc4_pci_priv *ppriv; 1050 int ret = -EAGAIN; 1051 1052 if ((dscc4_loopback_check(dpriv) < 0)) 1053 goto err; 1054 1055 if ((ret = hdlc_open(dev))) 1056 goto err; 1057 1058 ppriv = dpriv->pci_priv; 1059 1060 /* 1061 * Due to various bugs, there is no way to reliably reset a 1062 * specific port (manufacturer's dependent special PCI #RST wiring 1063 * apart: it affects all ports). Thus the device goes in the best 1064 * silent mode possible at dscc4_close() time and simply claims to 1065 * be up if it's opened again. It still isn't possible to change 1066 * the HDLC configuration without rebooting but at least the ports 1067 * can be up/down ifconfig'ed without killing the host. 1068 */ 1069 if (dpriv->flags & FakeReset) { 1070 dpriv->flags &= ~FakeReset; 1071 scc_patchl(0, PowerUp, dpriv, dev, CCR0); 1072 scc_patchl(0, 0x00050000, dpriv, dev, CCR2); 1073 scc_writel(EventsMask, dpriv, dev, IMR); 1074 netdev_info(dev, "up again\n"); 1075 goto done; 1076 } 1077 1078 /* IDT+IDR during XPR */ 1079 dpriv->flags = NeedIDR | NeedIDT; 1080 1081 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); 1082 1083 /* 1084 * The following is a bit paranoid... 1085 * 1086 * NB: the datasheet "...CEC will stay active if the SCC is in 1087 * power-down mode or..." and CCR2.RAC = 1 are two different 1088 * situations. 1089 */ 1090 if (scc_readl_star(dpriv, dev) & SccBusy) { 1091 netdev_err(dev, "busy - try later\n"); 1092 ret = -EAGAIN; 1093 goto err_out; 1094 } else 1095 netdev_info(dev, "available - good\n"); 1096 1097 scc_writel(EventsMask, dpriv, dev, IMR); 1098 1099 /* Posted write is flushed in the wait_ack loop */ 1100 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); 1101 1102 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) 1103 goto err_disable_scc_events; 1104 1105 /* 1106 * I would expect XPR near CE completion (before ? after ?). 1107 * At worst, this code won't see a late XPR and people 1108 * will have to re-issue an ifconfig (this is harmless). 1109 * WARNING, a really missing XPR usually means a hardware 1110 * reset is needed. Suggestions anyone ? 1111 */ 1112 if ((ret = dscc4_xpr_ack(dpriv)) < 0) { 1113 pr_err("XPR timeout\n"); 1114 goto err_disable_scc_events; 1115 } 1116 1117 if (debug > 2) 1118 dscc4_tx_print(dev, dpriv, "Open"); 1119 1120done: 1121 netif_start_queue(dev); 1122 1123 init_timer(&dpriv->timer); 1124 dpriv->timer.expires = jiffies + 10*HZ; 1125 dpriv->timer.data = (unsigned long)dev; 1126 dpriv->timer.function = dscc4_timer; 1127 add_timer(&dpriv->timer); 1128 netif_carrier_on(dev); 1129 1130 return 0; 1131 1132err_disable_scc_events: 1133 scc_writel(0xffffffff, dpriv, dev, IMR); 1134 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); 1135err_out: 1136 hdlc_close(dev); 1137err: 1138 return ret; 1139} 1140 1141#ifdef DSCC4_POLLING 1142static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) 1143{ 1144 /* FIXME: it's gonna be easy (TM), for sure */ 1145} 1146#endif /* DSCC4_POLLING */ 1147 1148static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, 1149 struct net_device *dev) 1150{ 1151 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1152 struct dscc4_pci_priv *ppriv = dpriv->pci_priv; 1153 struct TxFD *tx_fd; 1154 int next; 1155 1156 next = dpriv->tx_current%TX_RING_SIZE; 1157 dpriv->tx_skbuff[next] = skb; 1158 tx_fd = dpriv->tx_fd + next; 1159 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); 1160 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len, 1161 PCI_DMA_TODEVICE)); 1162 tx_fd->complete = 0x00000000; 1163 tx_fd->jiffies = jiffies; 1164 mb(); 1165 1166#ifdef DSCC4_POLLING 1167 spin_lock(&dpriv->lock); 1168 while (dscc4_tx_poll(dpriv, dev)); 1169 spin_unlock(&dpriv->lock); 1170#endif 1171 1172 if (debug > 2) 1173 dscc4_tx_print(dev, dpriv, "Xmit"); 1174 /* To be cleaned(unsigned int)/optimized. Later, ok ? */ 1175 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) 1176 netif_stop_queue(dev); 1177 1178 if (dscc4_tx_quiescent(dpriv, dev)) 1179 dscc4_do_tx(dpriv, dev); 1180 1181 return NETDEV_TX_OK; 1182} 1183 1184static int dscc4_close(struct net_device *dev) 1185{ 1186 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1187 1188 del_timer_sync(&dpriv->timer); 1189 netif_stop_queue(dev); 1190 1191 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); 1192 scc_patchl(0x00050000, 0, dpriv, dev, CCR2); 1193 scc_writel(0xffffffff, dpriv, dev, IMR); 1194 1195 dpriv->flags |= FakeReset; 1196 1197 hdlc_close(dev); 1198 1199 return 0; 1200} 1201 1202static inline int dscc4_check_clock_ability(int port) 1203{ 1204 int ret = 0; 1205 1206#ifdef CONFIG_DSCC4_PCISYNC 1207 if (port >= 2) 1208 ret = -1; 1209#endif 1210 return ret; 1211} 1212 1213/* 1214 * DS1 p.137: "There are a total of 13 different clocking modes..." 1215 * ^^ 1216 * Design choices: 1217 * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a). 1218 * Clock mode 3b _should_ work but the testing seems to make this point 1219 * dubious (DIY testing requires setting CCR0 at 0x00000033). 1220 * This is supposed to provide least surprise "DTE like" behavior. 1221 * - if line rate is specified, clocks are assumed to be locally generated. 1222 * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing 1223 * between these it automagically done according on the required frequency 1224 * scaling. Of course some rounding may take place. 1225 * - no high speed mode (40Mb/s). May be trivial to do but I don't have an 1226 * appropriate external clocking device for testing. 1227 * - no time-slot/clock mode 5: shameless laziness. 1228 * 1229 * The clock signals wiring can be (is ?) manufacturer dependent. Good luck. 1230 * 1231 * BIG FAT WARNING: if the device isn't provided enough clocking signal, it 1232 * won't pass the init sequence. For example, straight back-to-back DTE without 1233 * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is 1234 * called. 1235 * 1236 * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153 1237 * DS0 for example) 1238 * 1239 * Clock mode related bits of CCR0: 1240 * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only) 1241 * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b 1242 * | | +-------- High Speed: say 0 1243 * | | | +-+-+-- Clock Mode: 0..7 1244 * | | | | | | 1245 * -+-+-+-+-+-+-+-+ 1246 * x|x|5|4|3|2|1|0| lower bits 1247 * 1248 * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b) 1249 * +-+-+-+------------------ M (0..15) 1250 * | | | | +-+-+-+-+-+-- N (0..63) 1251 * 0 0 0 0 | | | | 0 0 | | | | | | 1252 * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 1253 * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits 1254 * 1255 */ 1256static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) 1257{ 1258 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1259 int ret = -1; 1260 u32 brr; 1261 1262 *state &= ~Ccr0ClockMask; 1263 if (*bps) { /* Clock generated - required for DCE */ 1264 u32 n = 0, m = 0, divider; 1265 int xtal; 1266 1267 xtal = dpriv->pci_priv->xtal_hz; 1268 if (!xtal) 1269 goto done; 1270 if (dscc4_check_clock_ability(dpriv->dev_id) < 0) 1271 goto done; 1272 divider = xtal / *bps; 1273 if (divider > BRR_DIVIDER_MAX) { 1274 divider >>= 4; 1275 *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ 1276 } else 1277 *state |= 0x00000037; /* Clock mode 7b (BRG) */ 1278 if (divider >> 22) { 1279 n = 63; 1280 m = 15; 1281 } else if (divider) { 1282 /* Extraction of the 6 highest weighted bits */ 1283 m = 0; 1284 while (0xffffffc0 & divider) { 1285 m++; 1286 divider >>= 1; 1287 } 1288 n = divider; 1289 } 1290 brr = (m << 8) | n; 1291 divider = n << m; 1292 if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */ 1293 divider <<= 4; 1294 *bps = xtal / divider; 1295 } else { 1296 /* 1297 * External clock - DTE 1298 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00). 1299 * Nothing more to be done 1300 */ 1301 brr = 0; 1302 } 1303 scc_writel(brr, dpriv, dev, BRR); 1304 ret = 0; 1305done: 1306 return ret; 1307} 1308 1309static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1310{ 1311 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 1312 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1313 const size_t size = sizeof(dpriv->settings); 1314 int ret = 0; 1315 1316 if (dev->flags & IFF_UP) 1317 return -EBUSY; 1318 1319 if (cmd != SIOCWANDEV) 1320 return -EOPNOTSUPP; 1321 1322 switch(ifr->ifr_settings.type) { 1323 case IF_GET_IFACE: 1324 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 1325 if (ifr->ifr_settings.size < size) { 1326 ifr->ifr_settings.size = size; /* data size wanted */ 1327 return -ENOBUFS; 1328 } 1329 if (copy_to_user(line, &dpriv->settings, size)) 1330 return -EFAULT; 1331 break; 1332 1333 case IF_IFACE_SYNC_SERIAL: 1334 if (!capable(CAP_NET_ADMIN)) 1335 return -EPERM; 1336 1337 if (dpriv->flags & FakeReset) { 1338 netdev_info(dev, "please reset the device before this command\n"); 1339 return -EPERM; 1340 } 1341 if (copy_from_user(&dpriv->settings, line, size)) 1342 return -EFAULT; 1343 ret = dscc4_set_iface(dpriv, dev); 1344 break; 1345 1346 default: 1347 ret = hdlc_ioctl(dev, ifr, cmd); 1348 break; 1349 } 1350 1351 return ret; 1352} 1353 1354static int dscc4_match(const struct thingie *p, int value) 1355{ 1356 int i; 1357 1358 for (i = 0; p[i].define != -1; i++) { 1359 if (value == p[i].define) 1360 break; 1361 } 1362 if (p[i].define == -1) 1363 return -1; 1364 else 1365 return i; 1366} 1367 1368static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, 1369 struct net_device *dev) 1370{ 1371 sync_serial_settings *settings = &dpriv->settings; 1372 int ret = -EOPNOTSUPP; 1373 u32 bps, state; 1374 1375 bps = settings->clock_rate; 1376 state = scc_readl(dpriv, CCR0); 1377 if (dscc4_set_clock(dev, &bps, &state) < 0) 1378 goto done; 1379 if (bps) { /* DCE */ 1380 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name); 1381 if (settings->clock_rate != bps) { 1382 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n", 1383 dev->name, settings->clock_rate, bps); 1384 settings->clock_rate = bps; 1385 } 1386 } else { /* DTE */ 1387 state |= PowerUp | Vis; 1388 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); 1389 } 1390 scc_writel(state, dpriv, dev, CCR0); 1391 ret = 0; 1392done: 1393 return ret; 1394} 1395 1396static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, 1397 struct net_device *dev) 1398{ 1399 static const struct thingie encoding[] = { 1400 { ENCODING_NRZ, 0x00000000 }, 1401 { ENCODING_NRZI, 0x00200000 }, 1402 { ENCODING_FM_MARK, 0x00400000 }, 1403 { ENCODING_FM_SPACE, 0x00500000 }, 1404 { ENCODING_MANCHESTER, 0x00600000 }, 1405 { -1, 0} 1406 }; 1407 int i, ret = 0; 1408 1409 i = dscc4_match(encoding, dpriv->encoding); 1410 if (i >= 0) 1411 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); 1412 else 1413 ret = -EOPNOTSUPP; 1414 return ret; 1415} 1416 1417static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, 1418 struct net_device *dev) 1419{ 1420 sync_serial_settings *settings = &dpriv->settings; 1421 u32 state; 1422 1423 state = scc_readl(dpriv, CCR1); 1424 if (settings->loopback) { 1425 printk(KERN_DEBUG "%s: loopback\n", dev->name); 1426 state |= 0x00000100; 1427 } else { 1428 printk(KERN_DEBUG "%s: normal\n", dev->name); 1429 state &= ~0x00000100; 1430 } 1431 scc_writel(state, dpriv, dev, CCR1); 1432 return 0; 1433} 1434 1435static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, 1436 struct net_device *dev) 1437{ 1438 static const struct thingie crc[] = { 1439 { PARITY_CRC16_PR0_CCITT, 0x00000010 }, 1440 { PARITY_CRC16_PR1_CCITT, 0x00000000 }, 1441 { PARITY_CRC32_PR0_CCITT, 0x00000011 }, 1442 { PARITY_CRC32_PR1_CCITT, 0x00000001 } 1443 }; 1444 int i, ret = 0; 1445 1446 i = dscc4_match(crc, dpriv->parity); 1447 if (i >= 0) 1448 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); 1449 else 1450 ret = -EOPNOTSUPP; 1451 return ret; 1452} 1453 1454static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) 1455{ 1456 struct { 1457 int (*action)(struct dscc4_dev_priv *, struct net_device *); 1458 } *p, do_setting[] = { 1459 { dscc4_encoding_setting }, 1460 { dscc4_clock_setting }, 1461 { dscc4_loopback_setting }, 1462 { dscc4_crc_setting }, 1463 { NULL } 1464 }; 1465 int ret = 0; 1466 1467 for (p = do_setting; p->action; p++) { 1468 if ((ret = p->action(dpriv, dev)) < 0) 1469 break; 1470 } 1471 return ret; 1472} 1473 1474static irqreturn_t dscc4_irq(int irq, void *token) 1475{ 1476 struct dscc4_dev_priv *root = token; 1477 struct dscc4_pci_priv *priv; 1478 struct net_device *dev; 1479 void __iomem *ioaddr; 1480 u32 state; 1481 unsigned long flags; 1482 int i, handled = 1; 1483 1484 priv = root->pci_priv; 1485 dev = dscc4_to_dev(root); 1486 1487 spin_lock_irqsave(&priv->lock, flags); 1488 1489 ioaddr = root->base_addr; 1490 1491 state = readl(ioaddr + GSTAR); 1492 if (!state) { 1493 handled = 0; 1494 goto out; 1495 } 1496 if (debug > 3) 1497 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state); 1498 writel(state, ioaddr + GSTAR); 1499 1500 if (state & Arf) { 1501 netdev_err(dev, "failure (Arf). Harass the maintainer\n"); 1502 goto out; 1503 } 1504 state &= ~ArAck; 1505 if (state & Cfg) { 1506 if (debug > 0) 1507 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); 1508 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf)) 1509 netdev_err(dev, "CFG failed\n"); 1510 if (!(state &= ~Cfg)) 1511 goto out; 1512 } 1513 if (state & RxEvt) { 1514 i = dev_per_card - 1; 1515 do { 1516 dscc4_rx_irq(priv, root + i); 1517 } while (--i >= 0); 1518 state &= ~RxEvt; 1519 } 1520 if (state & TxEvt) { 1521 i = dev_per_card - 1; 1522 do { 1523 dscc4_tx_irq(priv, root + i); 1524 } while (--i >= 0); 1525 state &= ~TxEvt; 1526 } 1527out: 1528 spin_unlock_irqrestore(&priv->lock, flags); 1529 return IRQ_RETVAL(handled); 1530} 1531 1532static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, 1533 struct dscc4_dev_priv *dpriv) 1534{ 1535 struct net_device *dev = dscc4_to_dev(dpriv); 1536 u32 state; 1537 int cur, loop = 0; 1538 1539try: 1540 cur = dpriv->iqtx_current%IRQ_RING_SIZE; 1541 state = le32_to_cpu(dpriv->iqtx[cur]); 1542 if (!state) { 1543 if (debug > 4) 1544 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, 1545 state); 1546 if ((debug > 1) && (loop > 1)) 1547 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); 1548 if (loop && netif_queue_stopped(dev)) 1549 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) 1550 netif_wake_queue(dev); 1551 1552 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && 1553 !dscc4_tx_done(dpriv)) 1554 dscc4_do_tx(dpriv, dev); 1555 return; 1556 } 1557 loop++; 1558 dpriv->iqtx[cur] = 0; 1559 dpriv->iqtx_current++; 1560 1561 if (state_check(state, dpriv, dev, "Tx") < 0) 1562 return; 1563 1564 if (state & SccEvt) { 1565 if (state & Alls) { 1566 struct sk_buff *skb; 1567 struct TxFD *tx_fd; 1568 1569 if (debug > 2) 1570 dscc4_tx_print(dev, dpriv, "Alls"); 1571 /* 1572 * DataComplete can't be trusted for Tx completion. 1573 * Cf errata DS5 p.8 1574 */ 1575 cur = dpriv->tx_dirty%TX_RING_SIZE; 1576 tx_fd = dpriv->tx_fd + cur; 1577 skb = dpriv->tx_skbuff[cur]; 1578 if (skb) { 1579 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data), 1580 skb->len, PCI_DMA_TODEVICE); 1581 if (tx_fd->state & FrameEnd) { 1582 dev->stats.tx_packets++; 1583 dev->stats.tx_bytes += skb->len; 1584 } 1585 dev_kfree_skb_irq(skb); 1586 dpriv->tx_skbuff[cur] = NULL; 1587 ++dpriv->tx_dirty; 1588 } else { 1589 if (debug > 1) 1590 netdev_err(dev, "Tx: NULL skb %d\n", 1591 cur); 1592 } 1593 /* 1594 * If the driver ends sending crap on the wire, it 1595 * will be way easier to diagnose than the (not so) 1596 * random freeze induced by null sized tx frames. 1597 */ 1598 tx_fd->data = tx_fd->next; 1599 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); 1600 tx_fd->complete = 0x00000000; 1601 tx_fd->jiffies = 0; 1602 1603 if (!(state &= ~Alls)) 1604 goto try; 1605 } 1606 /* 1607 * Transmit Data Underrun 1608 */ 1609 if (state & Xdu) { 1610 netdev_err(dev, "Tx Data Underrun. Ask maintainer\n"); 1611 dpriv->flags = NeedIDT; 1612 /* Tx reset */ 1613 writel(MTFi | Rdt, 1614 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); 1615 writel(Action, dpriv->base_addr + GCMDR); 1616 return; 1617 } 1618 if (state & Cts) { 1619 netdev_info(dev, "CTS transition\n"); 1620 if (!(state &= ~Cts)) /* DEBUG */ 1621 goto try; 1622 } 1623 if (state & Xmr) { 1624 /* Frame needs to be sent again - FIXME */ 1625 netdev_err(dev, "Tx ReTx. Ask maintainer\n"); 1626 if (!(state &= ~Xmr)) /* DEBUG */ 1627 goto try; 1628 } 1629 if (state & Xpr) { 1630 void __iomem *scc_addr; 1631 unsigned long ring; 1632 int i; 1633 1634 /* 1635 * - the busy condition happens (sometimes); 1636 * - it doesn't seem to make the handler unreliable. 1637 */ 1638 for (i = 1; i; i <<= 1) { 1639 if (!(scc_readl_star(dpriv, dev) & SccBusy)) 1640 break; 1641 } 1642 if (!i) 1643 netdev_info(dev, "busy in irq\n"); 1644 1645 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; 1646 /* Keep this order: IDT before IDR */ 1647 if (dpriv->flags & NeedIDT) { 1648 if (debug > 2) 1649 dscc4_tx_print(dev, dpriv, "Xpr"); 1650 ring = dpriv->tx_fd_dma + 1651 (dpriv->tx_dirty%TX_RING_SIZE)* 1652 sizeof(struct TxFD); 1653 writel(ring, scc_addr + CH0BTDA); 1654 dscc4_do_tx(dpriv, dev); 1655 writel(MTFi | Idt, scc_addr + CH0CFG); 1656 if (dscc4_do_action(dev, "IDT") < 0) 1657 goto err_xpr; 1658 dpriv->flags &= ~NeedIDT; 1659 } 1660 if (dpriv->flags & NeedIDR) { 1661 ring = dpriv->rx_fd_dma + 1662 (dpriv->rx_current%RX_RING_SIZE)* 1663 sizeof(struct RxFD); 1664 writel(ring, scc_addr + CH0BRDA); 1665 dscc4_rx_update(dpriv, dev); 1666 writel(MTFi | Idr, scc_addr + CH0CFG); 1667 if (dscc4_do_action(dev, "IDR") < 0) 1668 goto err_xpr; 1669 dpriv->flags &= ~NeedIDR; 1670 smp_wmb(); 1671 /* Activate receiver and misc */ 1672 scc_writel(0x08050008, dpriv, dev, CCR2); 1673 } 1674 err_xpr: 1675 if (!(state &= ~Xpr)) 1676 goto try; 1677 } 1678 if (state & Cd) { 1679 if (debug > 0) 1680 netdev_info(dev, "CD transition\n"); 1681 if (!(state &= ~Cd)) /* DEBUG */ 1682 goto try; 1683 } 1684 } else { /* ! SccEvt */ 1685 if (state & Hi) { 1686#ifdef DSCC4_POLLING 1687 while (!dscc4_tx_poll(dpriv, dev)); 1688#endif 1689 netdev_info(dev, "Tx Hi\n"); 1690 state &= ~Hi; 1691 } 1692 if (state & Err) { 1693 netdev_info(dev, "Tx ERR\n"); 1694 dev->stats.tx_errors++; 1695 state &= ~Err; 1696 } 1697 } 1698 goto try; 1699} 1700 1701static void dscc4_rx_irq(struct dscc4_pci_priv *priv, 1702 struct dscc4_dev_priv *dpriv) 1703{ 1704 struct net_device *dev = dscc4_to_dev(dpriv); 1705 u32 state; 1706 int cur; 1707 1708try: 1709 cur = dpriv->iqrx_current%IRQ_RING_SIZE; 1710 state = le32_to_cpu(dpriv->iqrx[cur]); 1711 if (!state) 1712 return; 1713 dpriv->iqrx[cur] = 0; 1714 dpriv->iqrx_current++; 1715 1716 if (state_check(state, dpriv, dev, "Rx") < 0) 1717 return; 1718 1719 if (!(state & SccEvt)){ 1720 struct RxFD *rx_fd; 1721 1722 if (debug > 4) 1723 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name, 1724 state); 1725 state &= 0x00ffffff; 1726 if (state & Err) { /* Hold or reset */ 1727 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name); 1728 cur = dpriv->rx_current%RX_RING_SIZE; 1729 rx_fd = dpriv->rx_fd + cur; 1730 /* 1731 * Presume we're not facing a DMAC receiver reset. 1732 * As We use the rx size-filtering feature of the 1733 * DSCC4, the beginning of a new frame is waiting in 1734 * the rx fifo. I bet a Receive Data Overflow will 1735 * happen most of time but let's try and avoid it. 1736 * Btw (as for RDO) if one experiences ERR whereas 1737 * the system looks rather idle, there may be a 1738 * problem with latency. In this case, increasing 1739 * RX_RING_SIZE may help. 1740 */ 1741 //while (dpriv->rx_needs_refill) { 1742 while (!(rx_fd->state1 & Hold)) { 1743 rx_fd++; 1744 cur++; 1745 if (!(cur = cur%RX_RING_SIZE)) 1746 rx_fd = dpriv->rx_fd; 1747 } 1748 //dpriv->rx_needs_refill--; 1749 try_get_rx_skb(dpriv, dev); 1750 if (!rx_fd->data) 1751 goto try; 1752 rx_fd->state1 &= ~Hold; 1753 rx_fd->state2 = 0x00000000; 1754 rx_fd->end = cpu_to_le32(0xbabeface); 1755 //} 1756 goto try; 1757 } 1758 if (state & Fi) { 1759 dscc4_rx_skb(dpriv, dev); 1760 goto try; 1761 } 1762 if (state & Hi ) { /* HI bit */ 1763 netdev_info(dev, "Rx Hi\n"); 1764 state &= ~Hi; 1765 goto try; 1766 } 1767 } else { /* SccEvt */ 1768 if (debug > 1) { 1769 //FIXME: verifier la presence de tous les evenements 1770 static struct { 1771 u32 mask; 1772 const char *irq_name; 1773 } evts[] = { 1774 { 0x00008000, "TIN"}, 1775 { 0x00000020, "RSC"}, 1776 { 0x00000010, "PCE"}, 1777 { 0x00000008, "PLLA"}, 1778 { 0, NULL} 1779 }, *evt; 1780 1781 for (evt = evts; evt->irq_name; evt++) { 1782 if (state & evt->mask) { 1783 printk(KERN_DEBUG "%s: %s\n", 1784 dev->name, evt->irq_name); 1785 if (!(state &= ~evt->mask)) 1786 goto try; 1787 } 1788 } 1789 } else { 1790 if (!(state &= ~0x0000c03c)) 1791 goto try; 1792 } 1793 if (state & Cts) { 1794 netdev_info(dev, "CTS transition\n"); 1795 if (!(state &= ~Cts)) /* DEBUG */ 1796 goto try; 1797 } 1798 /* 1799 * Receive Data Overflow (FIXME: fscked) 1800 */ 1801 if (state & Rdo) { 1802 struct RxFD *rx_fd; 1803 void __iomem *scc_addr; 1804 int cur; 1805 1806 //if (debug) 1807 // dscc4_rx_dump(dpriv); 1808 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; 1809 1810 scc_patchl(RxActivate, 0, dpriv, dev, CCR2); 1811 /* 1812 * This has no effect. Why ? 1813 * ORed with TxSccRes, one sees the CFG ack (for 1814 * the TX part only). 1815 */ 1816 scc_writel(RxSccRes, dpriv, dev, CMDR); 1817 dpriv->flags |= RdoSet; 1818 1819 /* 1820 * Let's try and save something in the received data. 1821 * rx_current must be incremented at least once to 1822 * avoid HOLD in the BRDA-to-be-pointed desc. 1823 */ 1824 do { 1825 cur = dpriv->rx_current++%RX_RING_SIZE; 1826 rx_fd = dpriv->rx_fd + cur; 1827 if (!(rx_fd->state2 & DataComplete)) 1828 break; 1829 if (rx_fd->state2 & FrameAborted) { 1830 dev->stats.rx_over_errors++; 1831 rx_fd->state1 |= Hold; 1832 rx_fd->state2 = 0x00000000; 1833 rx_fd->end = cpu_to_le32(0xbabeface); 1834 } else 1835 dscc4_rx_skb(dpriv, dev); 1836 } while (1); 1837 1838 if (debug > 0) { 1839 if (dpriv->flags & RdoSet) 1840 printk(KERN_DEBUG 1841 "%s: no RDO in Rx data\n", DRV_NAME); 1842 } 1843#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY 1844 /* 1845 * FIXME: must the reset be this violent ? 1846 */ 1847#warning "FIXME: CH0BRDA" 1848 writel(dpriv->rx_fd_dma + 1849 (dpriv->rx_current%RX_RING_SIZE)* 1850 sizeof(struct RxFD), scc_addr + CH0BRDA); 1851 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG); 1852 if (dscc4_do_action(dev, "RDR") < 0) { 1853 netdev_err(dev, "RDO recovery failed(RDR)\n"); 1854 goto rdo_end; 1855 } 1856 writel(MTFi|Idr, scc_addr + CH0CFG); 1857 if (dscc4_do_action(dev, "IDR") < 0) { 1858 netdev_err(dev, "RDO recovery failed(IDR)\n"); 1859 goto rdo_end; 1860 } 1861 rdo_end: 1862#endif 1863 scc_patchl(0, RxActivate, dpriv, dev, CCR2); 1864 goto try; 1865 } 1866 if (state & Cd) { 1867 netdev_info(dev, "CD transition\n"); 1868 if (!(state &= ~Cd)) /* DEBUG */ 1869 goto try; 1870 } 1871 if (state & Flex) { 1872 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME); 1873 if (!(state &= ~Flex)) 1874 goto try; 1875 } 1876 } 1877} 1878 1879/* 1880 * I had expected the following to work for the first descriptor 1881 * (tx_fd->state = 0xc0000000) 1882 * - Hold=1 (don't try and branch to the next descripto); 1883 * - No=0 (I want an empty data section, i.e. size=0); 1884 * - Fe=1 (required by No=0 or we got an Err irq and must reset). 1885 * It failed and locked solid. Thus the introduction of a dummy skb. 1886 * Problem is acknowledged in errata sheet DS5. Joy :o/ 1887 */ 1888static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) 1889{ 1890 struct sk_buff *skb; 1891 1892 skb = dev_alloc_skb(DUMMY_SKB_SIZE); 1893 if (skb) { 1894 int last = dpriv->tx_dirty%TX_RING_SIZE; 1895 struct TxFD *tx_fd = dpriv->tx_fd + last; 1896 1897 skb->len = DUMMY_SKB_SIZE; 1898 skb_copy_to_linear_data(skb, version, 1899 strlen(version) % DUMMY_SKB_SIZE); 1900 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); 1901 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, 1902 skb->data, DUMMY_SKB_SIZE, 1903 PCI_DMA_TODEVICE)); 1904 dpriv->tx_skbuff[last] = skb; 1905 } 1906 return skb; 1907} 1908 1909static int dscc4_init_ring(struct net_device *dev) 1910{ 1911 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1912 struct pci_dev *pdev = dpriv->pci_priv->pdev; 1913 struct TxFD *tx_fd; 1914 struct RxFD *rx_fd; 1915 void *ring; 1916 int i; 1917 1918 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); 1919 if (!ring) 1920 goto err_out; 1921 dpriv->rx_fd = rx_fd = (struct RxFD *) ring; 1922 1923 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); 1924 if (!ring) 1925 goto err_free_dma_rx; 1926 dpriv->tx_fd = tx_fd = (struct TxFD *) ring; 1927 1928 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); 1929 dpriv->tx_dirty = 0xffffffff; 1930 i = dpriv->tx_current = 0; 1931 do { 1932 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); 1933 tx_fd->complete = 0x00000000; 1934 /* FIXME: NULL should be ok - to be tried */ 1935 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); 1936 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + 1937 (++i%TX_RING_SIZE)*sizeof(*tx_fd)); 1938 } while (i < TX_RING_SIZE); 1939 1940 if (!dscc4_init_dummy_skb(dpriv)) 1941 goto err_free_dma_tx; 1942 1943 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); 1944 i = dpriv->rx_dirty = dpriv->rx_current = 0; 1945 do { 1946 /* size set by the host. Multiple of 4 bytes please */ 1947 rx_fd->state1 = HiDesc; 1948 rx_fd->state2 = 0x00000000; 1949 rx_fd->end = cpu_to_le32(0xbabeface); 1950 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU); 1951 // FIXME: return value verifiee mais traitement suspect 1952 if (try_get_rx_skb(dpriv, dev) >= 0) 1953 dpriv->rx_dirty++; 1954 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + 1955 (++i%RX_RING_SIZE)*sizeof(*rx_fd)); 1956 } while (i < RX_RING_SIZE); 1957 1958 return 0; 1959 1960err_free_dma_tx: 1961 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); 1962err_free_dma_rx: 1963 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); 1964err_out: 1965 return -ENOMEM; 1966} 1967 1968static void dscc4_remove_one(struct pci_dev *pdev) 1969{ 1970 struct dscc4_pci_priv *ppriv; 1971 struct dscc4_dev_priv *root; 1972 void __iomem *ioaddr; 1973 int i; 1974 1975 ppriv = pci_get_drvdata(pdev); 1976 root = ppriv->root; 1977 1978 ioaddr = root->base_addr; 1979 1980 dscc4_pci_reset(pdev, ioaddr); 1981 1982 free_irq(pdev->irq, root); 1983 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, 1984 ppriv->iqcfg_dma); 1985 for (i = 0; i < dev_per_card; i++) { 1986 struct dscc4_dev_priv *dpriv = root + i; 1987 1988 dscc4_release_ring(dpriv); 1989 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1990 dpriv->iqrx, dpriv->iqrx_dma); 1991 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1992 dpriv->iqtx, dpriv->iqtx_dma); 1993 } 1994 1995 dscc4_free1(pdev); 1996 1997 iounmap(ioaddr); 1998 1999 pci_release_region(pdev, 1); 2000 pci_release_region(pdev, 0); 2001 2002 pci_disable_device(pdev); 2003} 2004 2005static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding, 2006 unsigned short parity) 2007{ 2008 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 2009 2010 if (encoding != ENCODING_NRZ && 2011 encoding != ENCODING_NRZI && 2012 encoding != ENCODING_FM_MARK && 2013 encoding != ENCODING_FM_SPACE && 2014 encoding != ENCODING_MANCHESTER) 2015 return -EINVAL; 2016 2017 if (parity != PARITY_NONE && 2018 parity != PARITY_CRC16_PR0_CCITT && 2019 parity != PARITY_CRC16_PR1_CCITT && 2020 parity != PARITY_CRC32_PR0_CCITT && 2021 parity != PARITY_CRC32_PR1_CCITT) 2022 return -EINVAL; 2023 2024 dpriv->encoding = encoding; 2025 dpriv->parity = parity; 2026 return 0; 2027} 2028 2029#ifndef MODULE 2030static int __init dscc4_setup(char *str) 2031{ 2032 int *args[] = { &debug, &quartz, NULL }, **p = args; 2033 2034 while (*p && (get_option(&str, *p) == 2)) 2035 p++; 2036 return 1; 2037} 2038 2039__setup("dscc4.setup=", dscc4_setup); 2040#endif 2041 2042static const struct pci_device_id dscc4_pci_tbl[] = { 2043 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 2044 PCI_ANY_ID, PCI_ANY_ID, }, 2045 { 0,} 2046}; 2047MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl); 2048 2049static struct pci_driver dscc4_driver = { 2050 .name = DRV_NAME, 2051 .id_table = dscc4_pci_tbl, 2052 .probe = dscc4_init_one, 2053 .remove = dscc4_remove_one, 2054}; 2055 2056module_pci_driver(dscc4_driver); 2057