Lines Matching defs:parts

189     fragment_parts_t parts;
195 build_scanline_prolog(parts, needs);
208 MOV(AL, 0, parts.count.reg,
209 reg_imm(parts.count.reg, ROR, GGL_DITHER_ORDER_SHIFT));
210 ADD(AL, 0, parts.count.reg, parts.count.reg,
212 MOV(AL, 0, parts.count.reg,
213 reg_imm(parts.count.reg, ROR, 32 - GGL_DITHER_ORDER_SHIFT));
222 build_depth_test(parts, Z_TEST|Z_WRITE);
226 build_depth_test(parts, Z_TEST);
233 build_textures(parts, regs);
247 load(parts.cbPtr, mDstPixel);
255 if (directTex | parts.packed) {
258 pixel = directTex ? parts.texel[directTex-1] : parts.iterated;
264 parts.dither = reg_t(regs.obtain());
265 AND(AL, 0, parts.dither.reg, parts.count.reg, imm(mask));
266 ADDR_ADD(AL, 0, parts.dither.reg, ctxtReg, parts.dither.reg);
267 LDRB(AL, parts.dither.reg, parts.dither.reg,
274 build_component(pixel, parts, GGLFormat::ALPHA, regs);
279 build_depth_test(parts, Z_WRITE);
282 build_component(pixel, parts, GGLFormat::RED, regs);
283 build_component(pixel, parts, GGLFormat::GREEN, regs);
284 build_component(pixel, parts, GGLFormat::BLUE, regs);
306 store(parts.cbPtr, pixel, WRITE_BACK);
314 if (parts.reload != 3) {
315 build_smooth_shade(parts);
319 build_iterate_z(parts);
322 build_iterate_f(parts);
324 SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
332 build_iterate_texture_coordinates(parts);
335 build_smooth_shade(parts);
336 build_iterate_z(parts);
337 build_iterate_f(parts);
339 ADDR_ADD(AL, 0, parts.cbPtr.reg, parts.cbPtr.reg, imm(parts.cbPtr.size>>3));
341 SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
352 fragment_parts_t& parts, const needs_t& needs)
359 parts.count.setTo(obtainReg());
363 CONTEXT_LOAD(parts.count.reg, iterators.xr);
366 // parts.count = iterators.xr - Rx
367 SUB(AL, 0, parts.count.reg, parts.count.reg, Rx);
368 SUB(AL, 0, parts.count.reg, parts.count.reg, imm(1));
371 // parts.count.reg = 0xNNNNXXDD
381 ORR(AL, 0, parts.count.reg, tx, reg_imm(parts.count.reg, LSL, 16));
383 // parts.count.reg = 0xNNNN0000
385 MOV(AL, 0, parts.count.reg, reg_imm(parts.count.reg, LSL, 16));
393 parts.cbPtr.setTo(obtainReg(), cb_bits);
395 CONTEXT_ADDR_LOAD(parts.cbPtr.reg, state.buffers.color.data);
397 base_offset(parts.cbPtr, parts.cbPtr, Rs);
417 parts.z = reg_t(obtainReg());
421 int ydzdy = parts.z.reg;
424 MLA(AL, 0, parts.z.reg, Rx, dzdx, ydzdy);
426 // we're going to index zbase of parts.count
433 ADD(AL, 0, Rs, Rs, reg_imm(parts.count.reg, LSR, 16));
439 init_textures(parts.coords, reg_t(Rx), reg_t(Ry));
443 init_iterated_color(parts, reg_t(Rx));
447 parts.covPtr.setTo(obtainReg(), 16);
448 CONTEXT_ADDR_LOAD(parts.covPtr.reg, state.buffers.coverage);
449 ADDR_ADD(AL, 0, parts.covPtr.reg, parts.covPtr.reg, reg_imm(Rx, LSL, 1));
456 const fragment_parts_t& parts,
469 parts, component, scratches, regs);
477 downshift(pixel, component, temp, parts.dither);
484 const fragment_parts_t& parts,
509 (parts.texel[i].component_size(component) < dst_size)) {
535 build_iterated_color(fragment, parts, component, regs);
538 build_texture_environment(fragment, parts, component, regs);
562 build_coverage_application(fragment, parts, regs);
565 build_alpha_test(fragment, parts);
604 temp = component_t(parts.iterated, component);
610 temp = component_t(parts.texel[i], component);
629 void GGLAssembler::build_smooth_shade(const fragment_parts_t& parts)
631 if (mSmooth && !parts.iterated_packed) {
636 const int reload = parts.reload;
641 int c = parts.argb[i].reg;
642 int dx = parts.argb_dx[i].reg;
671 const fragment_parts_t& parts, Scratch& regs)
683 LDRH(AL, cf, parts.covPtr.reg, immed8_post(2));
697 const fragment_parts_t& /*parts*/)
724 const fragment_parts_t& parts, uint32_t mask)
766 int z = parts.z.reg;
769 ADDR_SUB(AL, 0, zbase, zbase, reg_imm(parts.count.reg, LSR, 15));
788 void GGLAssembler::build_iterate_z(const fragment_parts_t& parts)
795 ADD(AL, 0, parts.z.reg, parts.z.reg, dzdx);
799 void GGLAssembler::build_iterate_f(const fragment_parts_t& /*parts*/)