ethreading.c revision ba6c59e9d7d7013b3906b6f4230b663422681848
1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include "onyx_int.h" 12#include "vp8/common/threading.h" 13#include "vp8/common/common.h" 14#include "vp8/common/extend.h" 15#include "bitstream.h" 16#include "encodeframe.h" 17 18#if CONFIG_MULTITHREAD 19 20extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip); 21 22extern void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm); 23 24static THREAD_FUNCTION thread_loopfilter(void *p_data) 25{ 26 VP8_COMP *cpi = (VP8_COMP *)(((LPFTHREAD_DATA *)p_data)->ptr1); 27 VP8_COMMON *cm = &cpi->common; 28 29 while (1) 30 { 31 if (cpi->b_multi_threaded == 0) 32 break; 33 34 if (sem_wait(&cpi->h_event_start_lpf) == 0) 35 { 36 if (cpi->b_multi_threaded == 0) /* we're shutting down */ 37 break; 38 39 vp8_loopfilter_frame(cpi, cm); 40 41 sem_post(&cpi->h_event_end_lpf); 42 } 43 } 44 45 return 0; 46} 47 48static 49THREAD_FUNCTION thread_encoding_proc(void *p_data) 50{ 51 int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread; 52 VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1); 53 MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2); 54 ENTROPY_CONTEXT_PLANES mb_row_left_context; 55 56 while (1) 57 { 58 if (cpi->b_multi_threaded == 0) 59 break; 60 61 if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) 62 { 63 const int nsync = cpi->mt_sync_range; 64 VP8_COMMON *cm = &cpi->common; 65 int mb_row; 66 MACROBLOCK *x = &mbri->mb; 67 MACROBLOCKD *xd = &x->e_mbd; 68 TOKENEXTRA *tp ; 69#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 70 TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24); 71 const int num_part = (1 << cm->multi_token_partition); 72#endif 73 74 int *segment_counts = mbri->segment_counts; 75 int *totalrate = &mbri->totalrate; 76 77 if (cpi->b_multi_threaded == 0) /* we're shutting down */ 78 break; 79 80 for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) 81 { 82 83 int recon_yoffset, recon_uvoffset; 84 int mb_col; 85 int ref_fb_idx = cm->lst_fb_idx; 86 int dst_fb_idx = cm->new_fb_idx; 87 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; 88 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; 89 int map_index = (mb_row * cm->mb_cols); 90 volatile const int *last_row_current_mb_col; 91 volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; 92 93#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 94 vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)]; 95#else 96 tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); 97 cpi->tplist[mb_row].start = tp; 98#endif 99 100 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; 101 102 /* reset above block coeffs */ 103 xd->above_context = cm->above_context; 104 xd->left_context = &mb_row_left_context; 105 106 vp8_zero(mb_row_left_context); 107 108 xd->up_available = (mb_row != 0); 109 recon_yoffset = (mb_row * recon_y_stride * 16); 110 recon_uvoffset = (mb_row * recon_uv_stride * 8); 111 112 /* Set the mb activity pointer to the start of the row. */ 113 x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; 114 115 /* for each macroblock col in image */ 116 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) 117 { 118 *current_mb_col = mb_col - 1; 119 120 if ((mb_col & (nsync - 1)) == 0) 121 { 122 while (mb_col > (*last_row_current_mb_col - nsync)) 123 { 124 x86_pause_hint(); 125 thread_sleep(0); 126 } 127 } 128 129#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 130 tp = tp_start; 131#endif 132 133 /* Distance of Mb to the various image edges. 134 * These specified to 8th pel as they are always compared 135 * to values that are in 1/8th pel units 136 */ 137 xd->mb_to_left_edge = -((mb_col * 16) << 3); 138 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; 139 xd->mb_to_top_edge = -((mb_row * 16) << 3); 140 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; 141 142 /* Set up limit values for motion vectors used to prevent 143 * them extending outside the UMV borders 144 */ 145 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); 146 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); 147 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); 148 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); 149 150 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; 151 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; 152 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; 153 xd->left_available = (mb_col != 0); 154 155 x->rddiv = cpi->RDDIV; 156 x->rdmult = cpi->RDMULT; 157 158 /* Copy current mb to a buffer */ 159 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); 160 161 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) 162 vp8_activity_masking(cpi, x); 163 164 /* Is segmentation enabled */ 165 /* MB level adjustment to quantizer */ 166 if (xd->segmentation_enabled) 167 { 168 /* Code to set segment id in xd->mbmi.segment_id for 169 * current MB (with range checking) 170 */ 171 if (cpi->segmentation_map[map_index + mb_col] <= 3) 172 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col]; 173 else 174 xd->mode_info_context->mbmi.segment_id = 0; 175 176 vp8cx_mb_init_quantizer(cpi, x, 1); 177 } 178 else 179 /* Set to Segment 0 by default */ 180 xd->mode_info_context->mbmi.segment_id = 0; 181 182 x->active_ptr = cpi->active_map + map_index + mb_col; 183 184 if (cm->frame_type == KEY_FRAME) 185 { 186 *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp); 187#ifdef MODE_STATS 188 y_modes[xd->mbmi.mode] ++; 189#endif 190 } 191 else 192 { 193 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); 194 195#ifdef MODE_STATS 196 inter_y_modes[xd->mbmi.mode] ++; 197 198 if (xd->mbmi.mode == SPLITMV) 199 { 200 int b; 201 202 for (b = 0; b < xd->mbmi.partition_count; b++) 203 { 204 inter_b_modes[x->partition->bmi[b].mode] ++; 205 } 206 } 207 208#endif 209 // Keep track of how many (consecutive) times a block 210 // is coded as ZEROMV_LASTREF, for base layer frames. 211 // Reset to 0 if its coded as anything else. 212 if (cpi->current_layer == 0) { 213 if (xd->mode_info_context->mbmi.mode == ZEROMV && 214 xd->mode_info_context->mbmi.ref_frame == 215 LAST_FRAME) { 216 // Increment, check for wrap-around. 217 if (cpi->consec_zero_last[map_index+mb_col] < 255) 218 cpi->consec_zero_last[map_index+mb_col] += 219 1; 220 } else { 221 cpi->consec_zero_last[map_index+mb_col] = 0; 222 } 223 } 224 225 /* Special case code for cyclic refresh 226 * If cyclic update enabled then copy 227 * xd->mbmi.segment_id; (which may have been updated 228 * based on mode during 229 * vp8cx_encode_inter_macroblock()) back into the 230 * global segmentation map 231 */ 232 if ((cpi->current_layer == 0) && 233 (cpi->cyclic_refresh_mode_enabled && 234 xd->segmentation_enabled)) 235 { 236 const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; 237 cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id; 238 239 /* If the block has been refreshed mark it as clean 240 * (the magnitude of the -ve influences how long it 241 * will be before we consider another refresh): 242 * Else if it was coded (last frame 0,0) and has 243 * not already been refreshed then mark it as a 244 * candidate for cleanup next time (marked 0) else 245 * mark it as dirty (1). 246 */ 247 if (mbmi->segment_id) 248 cpi->cyclic_refresh_map[map_index + mb_col] = -1; 249 else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME)) 250 { 251 if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) 252 cpi->cyclic_refresh_map[map_index + mb_col] = 0; 253 } 254 else 255 cpi->cyclic_refresh_map[map_index + mb_col] = 1; 256 257 } 258 } 259 260#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 261 /* pack tokens for this MB */ 262 { 263 int tok_count = tp - tp_start; 264 pack_tokens(w, tp_start, tok_count); 265 } 266#else 267 cpi->tplist[mb_row].stop = tp; 268#endif 269 /* Increment pointer into gf usage flags structure. */ 270 x->gf_active_ptr++; 271 272 /* Increment the activity mask pointers. */ 273 x->mb_activity_ptr++; 274 275 /* adjust to the next column of macroblocks */ 276 x->src.y_buffer += 16; 277 x->src.u_buffer += 8; 278 x->src.v_buffer += 8; 279 280 recon_yoffset += 16; 281 recon_uvoffset += 8; 282 283 /* Keep track of segment usage */ 284 segment_counts[xd->mode_info_context->mbmi.segment_id]++; 285 286 /* skip to next mb */ 287 xd->mode_info_context++; 288 x->partition_info++; 289 xd->above_context++; 290 } 291 292 vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], 293 xd->dst.y_buffer + 16, 294 xd->dst.u_buffer + 8, 295 xd->dst.v_buffer + 8); 296 297 *current_mb_col = mb_col + nsync; 298 299 /* this is to account for the border */ 300 xd->mode_info_context++; 301 x->partition_info++; 302 303 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; 304 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; 305 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; 306 307 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; 308 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; 309 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; 310 311 if (mb_row == cm->mb_rows - 1) 312 { 313 sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ 314 } 315 } 316 } 317 } 318 319 /* printf("exit thread %d\n", ithread); */ 320 return 0; 321} 322 323static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) 324{ 325 326 MACROBLOCK *x = mbsrc; 327 MACROBLOCK *z = mbdst; 328 int i; 329 330 z->ss = x->ss; 331 z->ss_count = x->ss_count; 332 z->searches_per_step = x->searches_per_step; 333 z->errorperbit = x->errorperbit; 334 335 z->sadperbit16 = x->sadperbit16; 336 z->sadperbit4 = x->sadperbit4; 337 338 /* 339 z->mv_col_min = x->mv_col_min; 340 z->mv_col_max = x->mv_col_max; 341 z->mv_row_min = x->mv_row_min; 342 z->mv_row_max = x->mv_row_max; 343 */ 344 345 z->short_fdct4x4 = x->short_fdct4x4; 346 z->short_fdct8x4 = x->short_fdct8x4; 347 z->short_walsh4x4 = x->short_walsh4x4; 348 z->quantize_b = x->quantize_b; 349 z->quantize_b_pair = x->quantize_b_pair; 350 z->optimize = x->optimize; 351 352 /* 353 z->mvc = x->mvc; 354 z->src.y_buffer = x->src.y_buffer; 355 z->src.u_buffer = x->src.u_buffer; 356 z->src.v_buffer = x->src.v_buffer; 357 */ 358 359 z->mvcost[0] = x->mvcost[0]; 360 z->mvcost[1] = x->mvcost[1]; 361 z->mvsadcost[0] = x->mvsadcost[0]; 362 z->mvsadcost[1] = x->mvsadcost[1]; 363 364 z->token_costs = x->token_costs; 365 z->inter_bmode_costs = x->inter_bmode_costs; 366 z->mbmode_cost = x->mbmode_cost; 367 z->intra_uv_mode_cost = x->intra_uv_mode_cost; 368 z->bmode_costs = x->bmode_costs; 369 370 for (i = 0; i < 25; i++) 371 { 372 z->block[i].quant = x->block[i].quant; 373 z->block[i].quant_fast = x->block[i].quant_fast; 374 z->block[i].quant_shift = x->block[i].quant_shift; 375 z->block[i].zbin = x->block[i].zbin; 376 z->block[i].zrun_zbin_boost = x->block[i].zrun_zbin_boost; 377 z->block[i].round = x->block[i].round; 378 z->block[i].src_stride = x->block[i].src_stride; 379 } 380 381 z->q_index = x->q_index; 382 z->act_zbin_adj = x->act_zbin_adj; 383 z->last_act_zbin_adj = x->last_act_zbin_adj; 384 385 { 386 MACROBLOCKD *xd = &x->e_mbd; 387 MACROBLOCKD *zd = &z->e_mbd; 388 389 /* 390 zd->mode_info_context = xd->mode_info_context; 391 zd->mode_info = xd->mode_info; 392 393 zd->mode_info_stride = xd->mode_info_stride; 394 zd->frame_type = xd->frame_type; 395 zd->up_available = xd->up_available ; 396 zd->left_available = xd->left_available; 397 zd->left_context = xd->left_context; 398 zd->last_frame_dc = xd->last_frame_dc; 399 zd->last_frame_dccons = xd->last_frame_dccons; 400 zd->gold_frame_dc = xd->gold_frame_dc; 401 zd->gold_frame_dccons = xd->gold_frame_dccons; 402 zd->mb_to_left_edge = xd->mb_to_left_edge; 403 zd->mb_to_right_edge = xd->mb_to_right_edge; 404 zd->mb_to_top_edge = xd->mb_to_top_edge ; 405 zd->mb_to_bottom_edge = xd->mb_to_bottom_edge; 406 zd->gf_active_ptr = xd->gf_active_ptr; 407 zd->frames_since_golden = xd->frames_since_golden; 408 zd->frames_till_alt_ref_frame = xd->frames_till_alt_ref_frame; 409 */ 410 zd->subpixel_predict = xd->subpixel_predict; 411 zd->subpixel_predict8x4 = xd->subpixel_predict8x4; 412 zd->subpixel_predict8x8 = xd->subpixel_predict8x8; 413 zd->subpixel_predict16x16 = xd->subpixel_predict16x16; 414 zd->segmentation_enabled = xd->segmentation_enabled; 415 zd->mb_segement_abs_delta = xd->mb_segement_abs_delta; 416 vpx_memcpy(zd->segment_feature_data, xd->segment_feature_data, 417 sizeof(xd->segment_feature_data)); 418 419 vpx_memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, 420 sizeof(xd->dequant_y1_dc)); 421 vpx_memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); 422 vpx_memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); 423 vpx_memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); 424 425#if 1 426 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until 427 * the quantizer code uses a passed in pointer to the dequant constants. 428 * This will also require modifications to the x86 and neon assembly. 429 * */ 430 for (i = 0; i < 16; i++) 431 zd->block[i].dequant = zd->dequant_y1; 432 for (i = 16; i < 24; i++) 433 zd->block[i].dequant = zd->dequant_uv; 434 zd->block[24].dequant = zd->dequant_y2; 435#endif 436 437 438 vpx_memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes)); 439 vpx_memcpy(z->rd_thresh_mult, x->rd_thresh_mult, 440 sizeof(x->rd_thresh_mult)); 441 442 z->zbin_over_quant = x->zbin_over_quant; 443 z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; 444 z->zbin_mode_boost = x->zbin_mode_boost; 445 446 vpx_memset(z->error_bins, 0, sizeof(z->error_bins)); 447 } 448} 449 450void vp8cx_init_mbrthread_data(VP8_COMP *cpi, 451 MACROBLOCK *x, 452 MB_ROW_COMP *mbr_ei, 453 int count 454 ) 455{ 456 457 VP8_COMMON *const cm = & cpi->common; 458 MACROBLOCKD *const xd = & x->e_mbd; 459 int i; 460 461 for (i = 0; i < count; i++) 462 { 463 MACROBLOCK *mb = & mbr_ei[i].mb; 464 MACROBLOCKD *mbd = &mb->e_mbd; 465 466 mbd->subpixel_predict = xd->subpixel_predict; 467 mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; 468 mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; 469 mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; 470 mb->gf_active_ptr = x->gf_active_ptr; 471 472 vpx_memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts)); 473 mbr_ei[i].totalrate = 0; 474 475 mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1); 476 477 mbd->mode_info_context = cm->mi + x->e_mbd.mode_info_stride * (i + 1); 478 mbd->mode_info_stride = cm->mode_info_stride; 479 480 mbd->frame_type = cm->frame_type; 481 482 mb->src = * cpi->Source; 483 mbd->pre = cm->yv12_fb[cm->lst_fb_idx]; 484 mbd->dst = cm->yv12_fb[cm->new_fb_idx]; 485 486 mb->src.y_buffer += 16 * x->src.y_stride * (i + 1); 487 mb->src.u_buffer += 8 * x->src.uv_stride * (i + 1); 488 mb->src.v_buffer += 8 * x->src.uv_stride * (i + 1); 489 490 vp8_build_block_offsets(mb); 491 492 mbd->left_context = &cm->left_context; 493 mb->mvc = cm->fc.mvc; 494 495 setup_mbby_copy(&mbr_ei[i].mb, x); 496 497 mbd->fullpixel_mask = 0xffffffff; 498 if(cm->full_pixel) 499 mbd->fullpixel_mask = 0xfffffff8; 500 501 vp8_zero(mb->coef_counts); 502 vp8_zero(x->ymode_count); 503 mb->skip_true_count = 0; 504 vp8_zero(mb->MVcount); 505 mb->prediction_error = 0; 506 mb->intra_error = 0; 507 vp8_zero(mb->count_mb_ref_frame_usage); 508 mb->mbs_tested_so_far = 0; 509 } 510} 511 512int vp8cx_create_encoder_threads(VP8_COMP *cpi) 513{ 514 const VP8_COMMON * cm = &cpi->common; 515 516 cpi->b_multi_threaded = 0; 517 cpi->encoding_thread_count = 0; 518 cpi->b_lpf_running = 0; 519 520 if (cm->processor_core_count > 1 && cpi->oxcf.multi_threaded > 1) 521 { 522 int ithread; 523 int th_count = cpi->oxcf.multi_threaded - 1; 524 int rc = 0; 525 526 /* don't allocate more threads than cores available */ 527 if (cpi->oxcf.multi_threaded > cm->processor_core_count) 528 th_count = cm->processor_core_count - 1; 529 530 /* we have th_count + 1 (main) threads processing one row each */ 531 /* no point to have more threads than the sync range allows */ 532 if(th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) 533 { 534 th_count = (cm->mb_cols / cpi->mt_sync_range) - 1; 535 } 536 537 if(th_count == 0) 538 return 0; 539 540 CHECK_MEM_ERROR(cpi->h_encoding_thread, 541 vpx_malloc(sizeof(pthread_t) * th_count)); 542 CHECK_MEM_ERROR(cpi->h_event_start_encoding, 543 vpx_malloc(sizeof(sem_t) * th_count)); 544 CHECK_MEM_ERROR(cpi->mb_row_ei, 545 vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count)); 546 vpx_memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count); 547 CHECK_MEM_ERROR(cpi->en_thread_data, 548 vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count)); 549 550 sem_init(&cpi->h_event_end_encoding, 0, 0); 551 552 cpi->b_multi_threaded = 1; 553 cpi->encoding_thread_count = th_count; 554 555 /* 556 printf("[VP8:] multi_threaded encoding is enabled with %d threads\n\n", 557 (cpi->encoding_thread_count +1)); 558 */ 559 560 for (ithread = 0; ithread < th_count; ithread++) 561 { 562 ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread]; 563 564 /* Setup block ptrs and offsets */ 565 vp8_setup_block_ptrs(&cpi->mb_row_ei[ithread].mb); 566 vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd); 567 568 sem_init(&cpi->h_event_start_encoding[ithread], 0, 0); 569 570 ethd->ithread = ithread; 571 ethd->ptr1 = (void *)cpi; 572 ethd->ptr2 = (void *)&cpi->mb_row_ei[ithread]; 573 574 rc = pthread_create(&cpi->h_encoding_thread[ithread], 0, 575 thread_encoding_proc, ethd); 576 if(rc) 577 break; 578 } 579 580 if(rc) 581 { 582 /* shutdown other threads */ 583 cpi->b_multi_threaded = 0; 584 for(--ithread; ithread >= 0; ithread--) 585 { 586 pthread_join(cpi->h_encoding_thread[ithread], 0); 587 sem_destroy(&cpi->h_event_start_encoding[ithread]); 588 } 589 sem_destroy(&cpi->h_event_end_encoding); 590 591 /* free thread related resources */ 592 vpx_free(cpi->h_event_start_encoding); 593 vpx_free(cpi->h_encoding_thread); 594 vpx_free(cpi->mb_row_ei); 595 vpx_free(cpi->en_thread_data); 596 597 return -1; 598 } 599 600 601 { 602 LPFTHREAD_DATA * lpfthd = &cpi->lpf_thread_data; 603 604 sem_init(&cpi->h_event_start_lpf, 0, 0); 605 sem_init(&cpi->h_event_end_lpf, 0, 0); 606 607 lpfthd->ptr1 = (void *)cpi; 608 rc = pthread_create(&cpi->h_filter_thread, 0, thread_loopfilter, 609 lpfthd); 610 611 if(rc) 612 { 613 /* shutdown other threads */ 614 cpi->b_multi_threaded = 0; 615 for(--ithread; ithread >= 0; ithread--) 616 { 617 sem_post(&cpi->h_event_start_encoding[ithread]); 618 pthread_join(cpi->h_encoding_thread[ithread], 0); 619 sem_destroy(&cpi->h_event_start_encoding[ithread]); 620 } 621 sem_destroy(&cpi->h_event_end_encoding); 622 sem_destroy(&cpi->h_event_end_lpf); 623 sem_destroy(&cpi->h_event_start_lpf); 624 625 /* free thread related resources */ 626 vpx_free(cpi->h_event_start_encoding); 627 vpx_free(cpi->h_encoding_thread); 628 vpx_free(cpi->mb_row_ei); 629 vpx_free(cpi->en_thread_data); 630 631 return -2; 632 } 633 } 634 } 635 return 0; 636} 637 638void vp8cx_remove_encoder_threads(VP8_COMP *cpi) 639{ 640 if (cpi->b_multi_threaded) 641 { 642 /* shutdown other threads */ 643 cpi->b_multi_threaded = 0; 644 { 645 int i; 646 647 for (i = 0; i < cpi->encoding_thread_count; i++) 648 { 649 sem_post(&cpi->h_event_start_encoding[i]); 650 pthread_join(cpi->h_encoding_thread[i], 0); 651 652 sem_destroy(&cpi->h_event_start_encoding[i]); 653 } 654 655 sem_post(&cpi->h_event_start_lpf); 656 pthread_join(cpi->h_filter_thread, 0); 657 } 658 659 sem_destroy(&cpi->h_event_end_encoding); 660 sem_destroy(&cpi->h_event_end_lpf); 661 sem_destroy(&cpi->h_event_start_lpf); 662 663 /* free thread related resources */ 664 vpx_free(cpi->h_event_start_encoding); 665 vpx_free(cpi->h_encoding_thread); 666 vpx_free(cpi->mb_row_ei); 667 vpx_free(cpi->en_thread_data); 668 } 669} 670#endif 671