Lines Matching refs:bcs

53 hscx_empty_fifo(struct BCState *bcs, int count)
56 struct IsdnCardState *cs = bcs->cs;
61 if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) {
64 WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
65 bcs->hw.hscx.rcvidx = 0;
68 ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
69 bcs->hw.hscx.rcvidx += count;
70 READHSCXFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
71 WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
73 char *t = bcs->blog;
76 bcs->hw.hscx.hscx ? 'B' : 'A', count);
78 debugl1(cs, "%s", bcs->blog);
83 hscx_fill_fifo(struct BCState *bcs)
85 struct IsdnCardState *cs = bcs->cs;
93 if (!bcs->tx_skb)
95 if (bcs->tx_skb->len <= 0)
98 more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
99 if (bcs->tx_skb->len > fifo_size) {
103 count = bcs->tx_skb->len;
105 waitforXFW(cs, bcs->hw.hscx.hscx);
106 ptr = bcs->tx_skb->data;
107 skb_pull(bcs->tx_skb, count);
108 bcs->tx_cnt -= count;
109 bcs->hw.hscx.count += count;
110 WRITEHSCXFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
111 WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, more ? 0x8 : 0xa);
113 char *t = bcs->blog;
116 bcs->hw.hscx.hscx ? 'B' : 'A', count);
118 debugl1(cs, "%s", bcs->blog);
126 struct BCState *bcs = cs->bcs + hscx;
131 if (!test_bit(BC_FLG_INIT, &bcs->Flag))
141 bcs->err_inv++;
144 if ((r & 0x40) && bcs->mode) {
147 bcs->mode);
149 bcs->err_rdo++;
156 bcs->err_crc++;
165 hscx_empty_fifo(bcs, count);
166 if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
172 memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
173 skb_queue_tail(&bcs->rqueue, skb);
177 bcs->hw.hscx.rcvidx = 0;
178 schedule_event(bcs, B_RCVBUFREADY);
181 hscx_empty_fifo(bcs, fifo_size);
182 if (bcs->mode == L1_MODE_TRANS) {
187 memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
188 skb_queue_tail(&bcs->rqueue, skb);
190 bcs->hw.hscx.rcvidx = 0;
191 schedule_event(bcs, B_RCVBUFREADY);
195 if (bcs->tx_skb) {
196 if (bcs->tx_skb->len) {
197 hscx_fill_fifo(bcs);
200 if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
201 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
203 spin_lock_irqsave(&bcs->aclock, flags);
204 bcs->ackcnt += bcs->hw.hscx.count;
205 spin_unlock_irqrestore(&bcs->aclock, flags);
206 schedule_event(bcs, B_ACKPENDING);
208 dev_kfree_skb_irq(bcs->tx_skb);
209 bcs->hw.hscx.count = 0;
210 bcs->tx_skb = NULL;
213 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
214 bcs->hw.hscx.count = 0;
215 test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
216 hscx_fill_fifo(bcs);
218 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
219 schedule_event(bcs, B_XMTBUFREADY);
229 struct BCState *bcs;
232 bcs = cs->bcs + 1;
235 if (bcs->mode == 1)
236 hscx_fill_fifo(bcs);
239 bcs->err_tx++;
244 if (bcs->tx_skb) {
245 skb_push(bcs->tx_skb, bcs->hw.hscx.count);
246 bcs->tx_cnt += bcs->hw.hscx.count;
247 bcs->hw.hscx.count = 0;
249 WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
262 bcs = cs->bcs;
265 if (bcs->mode == L1_MODE_TRANS)
266 hscx_fill_fifo(bcs);
272 bcs->err_tx++;
274 if (bcs->tx_skb) {
275 skb_push(bcs->tx_skb, bcs->hw.hscx.count);
276 bcs->tx_cnt += bcs->hw.hscx.count;
277 bcs->hw.hscx.count = 0;
279 WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);