Lines Matching refs:self

168 	struct bfin_sir_self *self = netdev_priv(dev);
169 struct bfin_sir_port *port = self->sir_port;
173 return self->rx_buff.state != OUTSIDE_FRAME;
180 struct bfin_sir_self *self = netdev_priv(dev);
181 struct bfin_sir_port *port = self->sir_port;
183 if (self->tx_buff.len != 0) {
184 chr = *(self->tx_buff.data);
186 self->tx_buff.data++;
187 self->tx_buff.len--;
189 self->stats.tx_packets++;
190 self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
191 if (self->newspeed) {
192 bfin_sir_set_speed(port, self->newspeed);
193 self->speed = self->newspeed;
194 self->newspeed = 0;
205 struct bfin_sir_self *self = netdev_priv(dev);
206 struct bfin_sir_port *port = self->sir_port;
211 async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
218 struct bfin_sir_self *self = netdev_priv(dev);
219 struct bfin_sir_port *port = self->sir_port;
221 spin_lock(&self->lock);
224 spin_unlock(&self->lock);
232 struct bfin_sir_self *self = netdev_priv(dev);
233 struct bfin_sir_port *port = self->sir_port;
235 spin_lock(&self->lock);
238 spin_unlock(&self->lock);
247 struct bfin_sir_self *self = netdev_priv(dev);
248 struct bfin_sir_port *port = self->sir_port;
254 if (self->tx_buff.len == 0) {
255 self->stats.tx_packets++;
256 if (self->newspeed) {
257 bfin_sir_set_speed(port, self->newspeed);
258 self->speed = self->newspeed;
259 self->newspeed = 0;
267 blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
268 (unsigned long)(self->tx_buff.data+self->tx_buff.len));
274 (unsigned long)(self->tx_buff.data));
275 set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
283 struct bfin_sir_self *self = netdev_priv(dev);
284 struct bfin_sir_port *port = self->sir_port;
286 spin_lock(&self->lock);
291 self->stats.tx_packets++;
292 self->stats.tx_bytes += self->tx_buff.len;
293 self->tx_buff.len = 0;
294 if (self->newspeed) {
295 bfin_sir_set_speed(port, self->newspeed);
296 self->speed = self->newspeed;
297 self->newspeed = 0;
304 spin_unlock(&self->lock);
311 struct bfin_sir_self *self = netdev_priv(dev);
312 struct bfin_sir_port *port = self->sir_port;
318 async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
323 struct bfin_sir_self *self = netdev_priv(dev);
324 struct bfin_sir_port *port = self->sir_port;
328 spin_lock_irqsave(&self->lock, flags);
340 spin_unlock_irqrestore(&self->lock, flags);
346 struct bfin_sir_self *self = netdev_priv(dev);
347 struct bfin_sir_port *port = self->sir_port;
350 spin_lock(&self->lock);
363 spin_unlock(&self->lock);
457 struct bfin_sir_self *self;
464 self = netdev_priv(dev);
465 if (self->open) {
466 flush_work(&self->work);
467 bfin_sir_shutdown(self->sir_port, dev);
477 struct bfin_sir_self *self;
485 self = netdev_priv(dev);
486 port = self->sir_port;
487 if (self->open) {
488 if (self->newspeed) {
489 self->speed = self->newspeed;
490 self->newspeed = 0;
506 struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
507 struct net_device *dev = self->sir_port->dev;
508 struct bfin_sir_port *port = self->sir_port;
513 turnaround_delay(dev->last_rx, self->mtt);
528 /* bfin_sir_set_speed(port, self->speed); */
539 struct bfin_sir_self *self = netdev_priv(dev);
544 self->mtt = irda_get_mtt(skb);
546 if (speed != self->speed && speed != -1)
547 self->newspeed = speed;
549 self->tx_buff.data = self->tx_buff.head;
551 self->tx_buff.len = 0;
553 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
555 schedule_work(&self->work);
564 struct bfin_sir_self *self = netdev_priv(dev);
565 struct bfin_sir_port *port = self->sir_port;
571 if (self->open) {
603 struct bfin_sir_self *self = netdev_priv(dev);
605 return &self->stats;
610 struct bfin_sir_self *self = netdev_priv(dev);
611 struct bfin_sir_port *port = self->sir_port;
614 self->newspeed = 0;
615 self->speed = 9600;
617 spin_lock_init(&self->lock);
625 self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
626 if (!self->irlap) {
631 INIT_WORK(&self->work, bfin_sir_send_work);
636 self->open = 1;
644 self->open = 0;
652 struct bfin_sir_self *self = netdev_priv(dev);
654 flush_work(&self->work);
655 bfin_sir_shutdown(self->sir_port, dev);
657 if (self->rxskb) {
658 dev_kfree_skb(self->rxskb);
659 self->rxskb = NULL;
663 if (self->irlap) {
664 irlap_close(self->irlap);
665 self->irlap = NULL;
669 self->open = 0;
697 struct bfin_sir_self *self;
719 dev = alloc_irdadev(sizeof(*self));
723 self = netdev_priv(dev);
724 self->dev = &pdev->dev;
725 self->sir_port = sir_port;
728 err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
731 err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
738 irda_init_max_qos_capabilies(&self->qos);
757 self->qos.baud_rate.bits &= baudrate_mask;
759 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
761 irda_qos_bits_to_value(&self->qos);
766 kfree(self->tx_buff.head);
768 kfree(self->rx_buff.head);
785 struct bfin_sir_self *self;
791 self = netdev_priv(dev);
793 kfree(self->tx_buff.head);
794 kfree(self->rx_buff.head);