Lines Matching refs:i2400m

154 #include "i2400m.h"
176 * Goes over the list of queued reports in i2400m->rx_reports and
179 * NOTE: refcounts on i2400m are not needed because we flush the
180 * workqueue this runs on (i2400m->work_queue) before destroying
181 * i2400m.
185 struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
186 struct device *dev = i2400m_dev(i2400m);
192 spin_lock_irqsave(&i2400m->rx_lock, flags);
193 list_splice_init(&i2400m->rx_reports, &list);
194 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
201 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
214 void i2400m_report_hook_flush(struct i2400m *i2400m)
216 struct device *dev = i2400m_dev(i2400m);
222 spin_lock_irqsave(&i2400m->rx_lock, flags);
223 list_splice_init(&i2400m->rx_reports, &list);
224 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
237 * @i2400m: device descriptor
243 void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
246 struct device *dev = i2400m_dev(i2400m);
255 spin_lock_irqsave(&i2400m->rx_lock, flags);
256 list_add_tail(&args->list_node, &i2400m->rx_reports);
257 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
259 rmb(); /* see i2400m->ready's documentation */
260 if (likely(i2400m->ready)) /* only send if up */
261 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
273 * @i2400m: device descriptor
278 * for it in i2400m->msg_completion.
286 void i2400m_rx_ctl_ack(struct i2400m *i2400m,
289 struct device *dev = i2400m_dev(i2400m);
290 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
295 spin_lock_irqsave(&i2400m->rx_lock, flags);
296 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
300 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
305 spin_lock_irqsave(&i2400m->rx_lock, flags);
306 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
312 i2400m->ack_skb = ack_skb;
313 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
314 complete(&i2400m->msg_completion);
321 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
328 * @i2400m: device descriptor
358 void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
362 struct device *dev = i2400m_dev(i2400m);
366 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
397 * 'i2400m', we make sure to flush the
398 * i2400m->work_queue, so there are no issues.
400 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
401 if (unlikely(i2400m->trace_msg_from_user))
402 wimax_msg(&i2400m->wimax_dev, "echo",
404 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
410 i2400m_rx_ctl_ack(i2400m, payload, size);
419 * @i2400m: device descriptor
424 * THe i2400m might produce trace information (diagnostics) and we
434 void i2400m_rx_trace(struct i2400m *i2400m,
438 struct device *dev = i2400m_dev(i2400m);
439 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
443 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
505 unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq)
507 return ((unsigned long) roq - (unsigned long) i2400m->rx_roq)
553 void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index,
557 struct device *dev = i2400m_dev(i2400m);
588 void i2400m_roq_log_add(struct i2400m *i2400m,
595 int index = __i2400m_roq_index(i2400m, roq);
611 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
617 void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq)
621 int index = __i2400m_roq_index(i2400m, roq);
627 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
637 * @i2400m: device descriptor
657 void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
660 struct device *dev = i2400m_dev(i2400m);
665 d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n",
666 i2400m, roq, skb, sn, nsn);
724 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
725 i2400m, roq, skb, sn, nsn);
732 * @i2400m: device descriptor
741 unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
744 struct device *dev = i2400m_dev(i2400m);
766 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
779 * @i2400m: device descriptor
786 void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
788 struct device *dev = i2400m_dev(i2400m);
792 d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq);
793 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET,
801 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
804 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
811 * @i2400m: device descriptor
821 void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
824 struct device *dev = i2400m_dev(i2400m);
827 d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
828 i2400m, roq, skb, lbn);
834 i2400m_roq_log_dump(i2400m, roq);
835 i2400m_reset(i2400m, I2400M_RT_WARM);
837 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
838 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
841 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
842 i2400m, roq, skb, lbn);
850 * @i2400m: device descriptor
855 void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
858 struct device *dev = i2400m_dev(i2400m);
861 d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
864 nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
865 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
867 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
874 * @i2400m: device descriptor
884 void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
887 struct device *dev = i2400m_dev(i2400m);
890 d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n",
891 i2400m, roq, skb, sn);
907 i2400m_net_erx(i2400m, skb, roq_data->cs);
909 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
911 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
912 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
915 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
916 i2400m, roq, skb, sn);
930 struct i2400m *i2400m
931 = container_of(ref, struct i2400m, rx_roq_refcount);
933 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
934 kfree(i2400m->rx_roq[0].log);
935 kfree(i2400m->rx_roq);
936 i2400m->rx_roq = NULL;
942 * @i2400m: device descriptor
949 * Starting in v1.4 of the i2400m's firmware, the device can send data
960 * @i2400m: device descriptor
980 void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
983 struct device *dev = i2400m_dev(i2400m);
985 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
996 d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
997 "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
1031 spin_lock_irqsave(&i2400m->rx_lock, flags);
1032 if (i2400m->rx_roq == NULL) {
1034 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1037 roq = &i2400m->rx_roq[ro_cin];
1038 kref_get(&i2400m->rx_roq_refcount);
1039 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1051 i2400m_roq_reset(i2400m, roq);
1055 i2400m_roq_queue(i2400m, roq, skb, ro_sn);
1058 i2400m_roq_update_ws(i2400m, roq, ro_sn);
1062 i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
1068 spin_lock_irqsave(&i2400m->rx_lock, flags);
1069 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1070 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1073 i2400m_net_erx(i2400m, skb, cs);
1076 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
1077 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1084 * @i2400m: device instance
1097 void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
1101 struct device *dev = i2400m_dev(i2400m);
1112 i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
1115 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
1118 i2400m_rx_trace(i2400m, payload, pl_size);
1122 i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
1135 * @i2400m: device descriptor
1143 int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
1148 struct device *dev = i2400m_dev(i2400m);
1178 * @i2400m: device descriptor
1188 int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
1193 struct device *dev = i2400m_dev(i2400m);
1197 if (pl_size > i2400m->bus_pl_size_max) {
1200 pl_itr, pl_size, i2400m->bus_pl_size_max);
1224 * @i2400m: device descriptor
1244 int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1247 struct device *dev = i2400m_dev(i2400m);
1254 d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
1255 i2400m, skb, skb_len);
1258 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1276 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
1281 i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
1288 spin_lock_irqsave(&i2400m->rx_lock, flags);
1289 i2400m->rx_pl_num += i;
1290 if (i > i2400m->rx_pl_max)
1291 i2400m->rx_pl_max = i;
1292 if (i < i2400m->rx_pl_min)
1293 i2400m->rx_pl_min = i;
1294 i2400m->rx_num++;
1295 i2400m->rx_size_acc += skb_len;
1296 if (skb_len < i2400m->rx_size_min)
1297 i2400m->rx_size_min = skb_len;
1298 if (skb_len > i2400m->rx_size_max)
1299 i2400m->rx_size_max = skb_len;
1300 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1304 d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
1305 i2400m, skb, skb_len, result);
1311 void i2400m_unknown_barker(struct i2400m *i2400m,
1314 struct device *dev = i2400m_dev(i2400m);
1340 * i2400m->rx_reorder switch before taking a decission.
1346 int i2400m_rx_setup(struct i2400m *i2400m)
1350 i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
1351 if (i2400m->rx_reorder) {
1357 i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
1358 sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
1359 if (i2400m->rx_roq == NULL)
1362 rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
1370 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1371 i2400m->rx_roq[itr].log = &rd[itr];
1373 kref_init(&i2400m->rx_roq_refcount);
1378 kfree(i2400m->rx_roq);
1385 void i2400m_rx_release(struct i2400m *i2400m)
1389 if (i2400m->rx_reorder) {
1390 spin_lock_irqsave(&i2400m->rx_lock, flags);
1391 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1392 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1395 i2400m_report_hook_flush(i2400m);