Lines Matching refs:sp

135 	struct pvr2_stream *sp = bp->stream;
138 cnt = &sp->i_count;
139 bcnt = &sp->i_bcount;
143 cnt = &sp->q_count;
144 bcnt = &sp->q_bcount;
148 cnt = &sp->r_count;
149 bcnt = &sp->r_bcount;
168 struct pvr2_stream *sp;
170 sp = bp->stream;
176 spin_lock_irqsave(&sp->list_lock,irq_flags);
178 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
185 struct pvr2_stream *sp;
187 sp = bp->stream;
193 spin_lock_irqsave(&sp->list_lock,irq_flags);
194 fl = (sp->r_count == 0);
196 list_add_tail(&bp->list_overhead,&sp->ready_list);
198 (sp->r_count)++;
199 sp->r_bcount += bp->used_count;
204 sp->r_bcount,sp->r_count);
205 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
212 struct pvr2_stream *sp;
214 sp = bp->stream;
220 spin_lock_irqsave(&sp->list_lock,irq_flags);
222 list_add_tail(&bp->list_overhead,&sp->idle_list);
224 (sp->i_count)++;
225 sp->i_bcount += bp->max_count;
230 sp->i_bcount,sp->i_count);
231 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
237 struct pvr2_stream *sp;
239 sp = bp->stream;
245 spin_lock_irqsave(&sp->list_lock,irq_flags);
247 list_add_tail(&bp->list_overhead,&sp->queued_list);
249 (sp->q_count)++;
250 sp->q_bcount += bp->max_count;
255 sp->q_bcount,sp->q_count);
256 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
267 struct pvr2_stream *sp,
274 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
275 bp->stream = sp;
300 static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
306 if (cnt == sp->buffer_total_count) return 0;
311 sp,
312 sp->buffer_total_count,
313 cnt-sp->buffer_total_count);
318 if (cnt > sp->buffer_total_count) {
319 if (scnt > sp->buffer_slot_count) {
323 if (sp->buffer_slot_count) {
324 memcpy(nb,sp->buffers,
325 sp->buffer_slot_count * sizeof(*nb));
326 kfree(sp->buffers);
328 sp->buffers = nb;
329 sp->buffer_slot_count = scnt;
331 while (sp->buffer_total_count < cnt) {
335 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
340 sp->buffers[sp->buffer_total_count] = bp;
341 (sp->buffer_total_count)++;
345 while (sp->buffer_total_count > cnt) {
347 bp = sp->buffers[sp->buffer_total_count - 1];
349 sp->buffers[sp->buffer_total_count - 1] = NULL;
350 (sp->buffer_total_count)--;
354 if (scnt < sp->buffer_slot_count) {
357 nb = kmemdup(sp->buffers, scnt * sizeof(*nb),
361 kfree(sp->buffers);
362 sp->buffers = nb;
363 sp->buffer_slot_count = scnt;
369 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
374 if (sp->buffer_total_count == sp->buffer_target_count) return 0;
379 sp,sp->buffer_total_count,sp->buffer_target_count);
381 if (sp->buffer_total_count < sp->buffer_target_count) {
382 return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
386 while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
387 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
392 pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
398 static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
402 while ((lp = sp->queued_list.next) != &sp->queued_list) {
413 if (sp->buffer_total_count != sp->buffer_target_count) {
414 pvr2_stream_achieve_buffer_count(sp);
418 static void pvr2_stream_init(struct pvr2_stream *sp)
420 spin_lock_init(&sp->list_lock);
421 mutex_init(&sp->mutex);
422 INIT_LIST_HEAD(&sp->queued_list);
423 INIT_LIST_HEAD(&sp->ready_list);
424 INIT_LIST_HEAD(&sp->idle_list);
427 static void pvr2_stream_done(struct pvr2_stream *sp)
429 mutex_lock(&sp->mutex); do {
430 pvr2_stream_internal_flush(sp);
431 pvr2_stream_buffer_count(sp,0);
432 } while (0); mutex_unlock(&sp->mutex);
438 struct pvr2_stream *sp;
441 sp = bp->stream;
447 spin_lock_irqsave(&sp->list_lock,irq_flags);
452 (sp->buffers_processed)++;
453 sp->bytes_processed += urb->actual_length;
455 if (sp->fail_count) {
458 " - fail count reset",sp);
459 sp->fail_count = 0;
461 } else if (sp->fail_count < sp->fail_tolerance) {
464 (sp->fail_count)++;
465 (sp->buffers_failed)++;
469 sp,urb->status,sp->fail_count);
471 (sp->buffers_failed)++;
474 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
476 if (sp && sp->callback_func) {
477 sp->callback_func(sp->callback_data);
483 struct pvr2_stream *sp;
484 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
485 if (!sp) return sp;
486 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
487 pvr2_stream_init(sp);
488 return sp;
491 void pvr2_stream_destroy(struct pvr2_stream *sp)
493 if (!sp) return;
494 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
495 pvr2_stream_done(sp);
496 kfree(sp);
499 void pvr2_stream_setup(struct pvr2_stream *sp,
504 mutex_lock(&sp->mutex); do {
505 pvr2_stream_internal_flush(sp);
506 sp->dev = dev;
507 sp->endpoint = endpoint;
508 sp->fail_tolerance = tolerance;
509 } while(0); mutex_unlock(&sp->mutex);
512 void pvr2_stream_set_callback(struct pvr2_stream *sp,
517 mutex_lock(&sp->mutex); do {
518 spin_lock_irqsave(&sp->list_lock,irq_flags);
519 sp->callback_data = data;
520 sp->callback_func = func;
521 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
522 } while(0); mutex_unlock(&sp->mutex);
525 void pvr2_stream_get_stats(struct pvr2_stream *sp,
530 spin_lock_irqsave(&sp->list_lock,irq_flags);
532 stats->buffers_in_queue = sp->q_count;
533 stats->buffers_in_idle = sp->i_count;
534 stats->buffers_in_ready = sp->r_count;
535 stats->buffers_processed = sp->buffers_processed;
536 stats->buffers_failed = sp->buffers_failed;
537 stats->bytes_processed = sp->bytes_processed;
540 sp->buffers_processed = 0;
541 sp->buffers_failed = 0;
542 sp->bytes_processed = 0;
544 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
548 int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
550 return sp->buffer_target_count;
553 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
556 if (sp->buffer_target_count == cnt) return 0;
557 mutex_lock(&sp->mutex); do {
558 sp->buffer_target_count = cnt;
559 ret = pvr2_stream_achieve_buffer_count(sp);
560 } while(0); mutex_unlock(&sp->mutex);
564 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
566 struct list_head *lp = sp->idle_list.next;
567 if (lp == &sp->idle_list) return NULL;
571 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
573 struct list_head *lp = sp->ready_list.next;
574 if (lp == &sp->ready_list) return NULL;
578 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
581 if (id >= sp->buffer_total_count) return NULL;
582 return sp->buffers[id];
585 int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
587 return sp->r_count;
590 void pvr2_stream_kill(struct pvr2_stream *sp)
593 mutex_lock(&sp->mutex); do {
594 pvr2_stream_internal_flush(sp);
595 while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) {
598 if (sp->buffer_total_count != sp->buffer_target_count) {
599 pvr2_stream_achieve_buffer_count(sp);
601 } while(0); mutex_unlock(&sp->mutex);
612 struct pvr2_stream *sp;
614 sp = bp->stream;
615 mutex_lock(&sp->mutex); do {
617 if (!sp->dev) {
631 sp->dev, // struct usb_device *dev
633 usb_rcvbulkpipe(sp->dev,sp->endpoint),
639 } while(0); mutex_unlock(&sp->mutex);
647 struct pvr2_stream *sp;
649 sp = bp->stream;
650 mutex_lock(&sp->mutex); do {
651 spin_lock_irqsave(&sp->list_lock,irq_flags);
666 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
667 } while(0); mutex_unlock(&sp->mutex);