1/*
2 *    Stack-less Just-In-Time compiler
3 *
4 *    Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 *   1. Redistributions of source code must retain the above copyright notice, this list of
10 *      conditions and the following disclaimer.
11 *
12 *   2. Redistributions in binary form must reproduce the above copyright notice, this list
13 *      of conditions and the following disclaimer in the documentation and/or other materials
14 *      provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/* x86 32-bit arch dependent functions. */
28
29static sljit_si emit_do_imm(struct sljit_compiler *compiler, sljit_ub opcode, sljit_sw imm)
30{
31	sljit_ub *inst;
32
33	inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_sw));
34	FAIL_IF(!inst);
35	INC_SIZE(1 + sizeof(sljit_sw));
36	*inst++ = opcode;
37	*(sljit_sw*)inst = imm;
38	return SLJIT_SUCCESS;
39}
40
41static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type)
42{
43	if (type == SLJIT_JUMP) {
44		*code_ptr++ = JMP_i32;
45		jump->addr++;
46	}
47	else if (type >= SLJIT_FAST_CALL) {
48		*code_ptr++ = CALL_i32;
49		jump->addr++;
50	}
51	else {
52		*code_ptr++ = GROUP_0F;
53		*code_ptr++ = get_jump_code(type);
54		jump->addr += 2;
55	}
56
57	if (jump->flags & JUMP_LABEL)
58		jump->flags |= PATCH_MW;
59	else
60		*(sljit_sw*)code_ptr = jump->u.target - (jump->addr + 4);
61	code_ptr += 4;
62
63	return code_ptr;
64}
65
66SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler,
67	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
68	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size)
69{
70	sljit_si size;
71	sljit_ub *inst;
72
73	CHECK_ERROR();
74	check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
75
76	compiler->options = options;
77	compiler->scratches = scratches;
78	compiler->saveds = saveds;
79	compiler->fscratches = fscratches;
80	compiler->fsaveds = fsaveds;
81	compiler->args = args;
82	compiler->flags_saved = 0;
83#if (defined SLJIT_DEBUG && SLJIT_DEBUG)
84	compiler->logical_local_size = local_size;
85#endif
86
87	size = 1 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3);
88#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
89	size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0);
90#else
91	size += (args > 0 ? (2 + args * 3) : 0);
92#endif
93	inst = (sljit_ub*)ensure_buf(compiler, 1 + size);
94	FAIL_IF(!inst);
95
96	INC_SIZE(size);
97	PUSH_REG(reg_map[TMP_REG1]);
98#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
99	if (args > 0) {
100		*inst++ = MOV_r_rm;
101		*inst++ = MOD_REG | (reg_map[TMP_REG1] << 3) | 0x4 /* esp */;
102	}
103#endif
104	if (saveds > 2 || scratches > 7)
105		PUSH_REG(reg_map[SLJIT_S2]);
106	if (saveds > 1 || scratches > 8)
107		PUSH_REG(reg_map[SLJIT_S1]);
108	if (saveds > 0 || scratches > 9)
109		PUSH_REG(reg_map[SLJIT_S0]);
110
111#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
112	if (args > 0) {
113		*inst++ = MOV_r_rm;
114		*inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | reg_map[SLJIT_R2];
115	}
116	if (args > 1) {
117		*inst++ = MOV_r_rm;
118		*inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | reg_map[SLJIT_R1];
119	}
120	if (args > 2) {
121		*inst++ = MOV_r_rm;
122		*inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */;
123		*inst++ = 0x24;
124		*inst++ = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */
125	}
126#else
127	if (args > 0) {
128		*inst++ = MOV_r_rm;
129		*inst++ = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | reg_map[TMP_REG1];
130		*inst++ = sizeof(sljit_sw) * 2;
131	}
132	if (args > 1) {
133		*inst++ = MOV_r_rm;
134		*inst++ = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | reg_map[TMP_REG1];
135		*inst++ = sizeof(sljit_sw) * 3;
136	}
137	if (args > 2) {
138		*inst++ = MOV_r_rm;
139		*inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | reg_map[TMP_REG1];
140		*inst++ = sizeof(sljit_sw) * 4;
141	}
142#endif
143
144	SLJIT_COMPILE_ASSERT(FIXED_LOCALS_OFFSET >= (2 + 4) * sizeof(sljit_uw), require_at_least_two_words);
145#if defined(__APPLE__)
146	/* Ignore pushed registers and FIXED_LOCALS_OFFSET when
147	computing the aligned local size. */
148	saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
149	local_size = ((FIXED_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
150#else
151	local_size = FIXED_LOCALS_OFFSET + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
152#endif
153
154	compiler->local_size = local_size;
155#ifdef _WIN32
156	if (local_size > 1024) {
157#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
158		FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size));
159#else
160		local_size -= FIXED_LOCALS_OFFSET;
161		FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size));
162		FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
163			SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, FIXED_LOCALS_OFFSET));
164#endif
165		FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
166	}
167#endif
168
169	SLJIT_ASSERT(local_size > 0);
170	return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
171		SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size);
172}
173
174SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler,
175	sljit_si options, sljit_si args, sljit_si scratches, sljit_si saveds,
176	sljit_si fscratches, sljit_si fsaveds, sljit_si local_size)
177{
178	CHECK_ERROR_VOID();
179	check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
180
181	compiler->options = options;
182	compiler->scratches = scratches;
183	compiler->saveds = saveds;
184	compiler->fscratches = fscratches;
185	compiler->fsaveds = fsaveds;
186	compiler->args = args;
187#if (defined SLJIT_DEBUG && SLJIT_DEBUG)
188	compiler->logical_local_size = local_size;
189#endif
190
191#if defined(__APPLE__)
192	saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
193	compiler->local_size = ((FIXED_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
194#else
195	compiler->local_size = FIXED_LOCALS_OFFSET + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
196#endif
197}
198
199SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw)
200{
201	sljit_si size;
202	sljit_ub *inst;
203
204	CHECK_ERROR();
205	check_sljit_emit_return(compiler, op, src, srcw);
206	SLJIT_ASSERT(compiler->args >= 0);
207
208	compiler->flags_saved = 0;
209	FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
210
211	SLJIT_ASSERT(compiler->local_size > 0);
212	FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
213		SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
214
215	size = 2 + (compiler->scratches > 7 ? (compiler->scratches - 7) : 0) +
216		(compiler->saveds <= 3 ? compiler->saveds : 3);
217#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
218	if (compiler->args > 2)
219		size += 2;
220#else
221	if (compiler->args > 0)
222		size += 2;
223#endif
224	inst = (sljit_ub*)ensure_buf(compiler, 1 + size);
225	FAIL_IF(!inst);
226
227	INC_SIZE(size);
228
229	if (compiler->saveds > 0 || compiler->scratches > 9)
230		POP_REG(reg_map[SLJIT_S0]);
231	if (compiler->saveds > 1 || compiler->scratches > 8)
232		POP_REG(reg_map[SLJIT_S1]);
233	if (compiler->saveds > 2 || compiler->scratches > 7)
234		POP_REG(reg_map[SLJIT_S2]);
235	POP_REG(reg_map[TMP_REG1]);
236#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
237	if (compiler->args > 2)
238		RET_I16(sizeof(sljit_sw));
239	else
240		RET();
241#else
242	RET();
243#endif
244
245	return SLJIT_SUCCESS;
246}
247
248/* --------------------------------------------------------------------- */
249/*  Operators                                                            */
250/* --------------------------------------------------------------------- */
251
252/* Size contains the flags as well. */
253static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si size,
254	/* The register or immediate operand. */
255	sljit_si a, sljit_sw imma,
256	/* The general operand (not immediate). */
257	sljit_si b, sljit_sw immb)
258{
259	sljit_ub *inst;
260	sljit_ub *buf_ptr;
261	sljit_si flags = size & ~0xf;
262	sljit_si inst_size;
263
264	/* Both cannot be switched on. */
265	SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
266	/* Size flags not allowed for typed instructions. */
267	SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
268	/* Both size flags cannot be switched on. */
269	SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
270	/* SSE2 and immediate is not possible. */
271	SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
272	SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
273		&& (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
274		&& (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
275
276	size &= 0xf;
277	inst_size = size;
278
279	if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
280		inst_size++;
281	if (flags & EX86_PREF_66)
282		inst_size++;
283
284	/* Calculate size of b. */
285	inst_size += 1; /* mod r/m byte. */
286	if (b & SLJIT_MEM) {
287		if ((b & REG_MASK) == SLJIT_UNUSED)
288			inst_size += sizeof(sljit_sw);
289		else if (immb != 0 && !(b & OFFS_REG_MASK)) {
290			/* Immediate operand. */
291			if (immb <= 127 && immb >= -128)
292				inst_size += sizeof(sljit_sb);
293			else
294				inst_size += sizeof(sljit_sw);
295		}
296
297		if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK))
298			b |= TO_OFFS_REG(SLJIT_SP);
299
300		if ((b & OFFS_REG_MASK) != SLJIT_UNUSED)
301			inst_size += 1; /* SIB byte. */
302	}
303
304	/* Calculate size of a. */
305	if (a & SLJIT_IMM) {
306		if (flags & EX86_BIN_INS) {
307			if (imma <= 127 && imma >= -128) {
308				inst_size += 1;
309				flags |= EX86_BYTE_ARG;
310			} else
311				inst_size += 4;
312		}
313		else if (flags & EX86_SHIFT_INS) {
314			imma &= 0x1f;
315			if (imma != 1) {
316				inst_size ++;
317				flags |= EX86_BYTE_ARG;
318			}
319		} else if (flags & EX86_BYTE_ARG)
320			inst_size++;
321		else if (flags & EX86_HALF_ARG)
322			inst_size += sizeof(short);
323		else
324			inst_size += sizeof(sljit_sw);
325	}
326	else
327		SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
328
329	inst = (sljit_ub*)ensure_buf(compiler, 1 + inst_size);
330	PTR_FAIL_IF(!inst);
331
332	/* Encoding the byte. */
333	INC_SIZE(inst_size);
334	if (flags & EX86_PREF_F2)
335		*inst++ = 0xf2;
336	if (flags & EX86_PREF_F3)
337		*inst++ = 0xf3;
338	if (flags & EX86_PREF_66)
339		*inst++ = 0x66;
340
341	buf_ptr = inst + size;
342
343	/* Encode mod/rm byte. */
344	if (!(flags & EX86_SHIFT_INS)) {
345		if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
346			*inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
347
348		if ((a & SLJIT_IMM) || (a == 0))
349			*buf_ptr = 0;
350		else if (!(flags & EX86_SSE2_OP1))
351			*buf_ptr = reg_map[a] << 3;
352		else
353			*buf_ptr = a << 3;
354	}
355	else {
356		if (a & SLJIT_IMM) {
357			if (imma == 1)
358				*inst = GROUP_SHIFT_1;
359			else
360				*inst = GROUP_SHIFT_N;
361		} else
362			*inst = GROUP_SHIFT_CL;
363		*buf_ptr = 0;
364	}
365
366	if (!(b & SLJIT_MEM))
367		*buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_map[b] : b);
368	else if ((b & REG_MASK) != SLJIT_UNUSED) {
369		if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
370			if (immb != 0) {
371				if (immb <= 127 && immb >= -128)
372					*buf_ptr |= 0x40;
373				else
374					*buf_ptr |= 0x80;
375			}
376
377			if ((b & OFFS_REG_MASK) == SLJIT_UNUSED)
378				*buf_ptr++ |= reg_map[b & REG_MASK];
379			else {
380				*buf_ptr++ |= 0x04;
381				*buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3);
382			}
383
384			if (immb != 0) {
385				if (immb <= 127 && immb >= -128)
386					*buf_ptr++ = immb; /* 8 bit displacement. */
387				else {
388					*(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */
389					buf_ptr += sizeof(sljit_sw);
390				}
391			}
392		}
393		else {
394			*buf_ptr++ |= 0x04;
395			*buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3) | (immb << 6);
396		}
397	}
398	else {
399		*buf_ptr++ |= 0x05;
400		*(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */
401		buf_ptr += sizeof(sljit_sw);
402	}
403
404	if (a & SLJIT_IMM) {
405		if (flags & EX86_BYTE_ARG)
406			*buf_ptr = imma;
407		else if (flags & EX86_HALF_ARG)
408			*(short*)buf_ptr = imma;
409		else if (!(flags & EX86_SHIFT_INS))
410			*(sljit_sw*)buf_ptr = imma;
411	}
412
413	return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
414}
415
416/* --------------------------------------------------------------------- */
417/*  Call / return instructions                                           */
418/* --------------------------------------------------------------------- */
419
420static SLJIT_INLINE sljit_si call_with_args(struct sljit_compiler *compiler, sljit_si type)
421{
422	sljit_ub *inst;
423
424#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
425	inst = (sljit_ub*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2);
426	FAIL_IF(!inst);
427	INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2);
428
429	if (type >= SLJIT_CALL3)
430		PUSH_REG(reg_map[SLJIT_R2]);
431	*inst++ = MOV_r_rm;
432	*inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0];
433#else
434	inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0));
435	FAIL_IF(!inst);
436	INC_SIZE(4 * (type - SLJIT_CALL0));
437
438	*inst++ = MOV_rm_r;
439	*inst++ = MOD_DISP8 | (reg_map[SLJIT_R0] << 3) | 0x4 /* SIB */;
440	*inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
441	*inst++ = 0;
442	if (type >= SLJIT_CALL2) {
443		*inst++ = MOV_rm_r;
444		*inst++ = MOD_DISP8 | (reg_map[SLJIT_R1] << 3) | 0x4 /* SIB */;
445		*inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
446		*inst++ = sizeof(sljit_sw);
447	}
448	if (type >= SLJIT_CALL3) {
449		*inst++ = MOV_rm_r;
450		*inst++ = MOD_DISP8 | (reg_map[SLJIT_R2] << 3) | 0x4 /* SIB */;
451		*inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
452		*inst++ = 2 * sizeof(sljit_sw);
453	}
454#endif
455	return SLJIT_SUCCESS;
456}
457
458SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw)
459{
460	sljit_ub *inst;
461
462	CHECK_ERROR();
463	check_sljit_emit_fast_enter(compiler, dst, dstw);
464	ADJUST_LOCAL_OFFSET(dst, dstw);
465
466	CHECK_EXTRA_REGS(dst, dstw, (void)0);
467
468	/* For UNUSED dst. Uncommon, but possible. */
469	if (dst == SLJIT_UNUSED)
470		dst = TMP_REG1;
471
472	if (FAST_IS_REG(dst)) {
473		/* Unused dest is possible here. */
474		inst = (sljit_ub*)ensure_buf(compiler, 1 + 1);
475		FAIL_IF(!inst);
476
477		INC_SIZE(1);
478		POP_REG(reg_map[dst]);
479		return SLJIT_SUCCESS;
480	}
481
482	/* Memory. */
483	inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
484	FAIL_IF(!inst);
485	*inst++ = POP_rm;
486	return SLJIT_SUCCESS;
487}
488
489SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw)
490{
491	sljit_ub *inst;
492
493	CHECK_ERROR();
494	check_sljit_emit_fast_return(compiler, src, srcw);
495	ADJUST_LOCAL_OFFSET(src, srcw);
496
497	CHECK_EXTRA_REGS(src, srcw, (void)0);
498
499	if (FAST_IS_REG(src)) {
500		inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1);
501		FAIL_IF(!inst);
502
503		INC_SIZE(1 + 1);
504		PUSH_REG(reg_map[src]);
505	}
506	else if (src & SLJIT_MEM) {
507		inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
508		FAIL_IF(!inst);
509		*inst++ = GROUP_FF;
510		*inst |= PUSH_rm;
511
512		inst = (sljit_ub*)ensure_buf(compiler, 1 + 1);
513		FAIL_IF(!inst);
514		INC_SIZE(1);
515	}
516	else {
517		/* SLJIT_IMM. */
518		inst = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1);
519		FAIL_IF(!inst);
520
521		INC_SIZE(5 + 1);
522		*inst++ = PUSH_i32;
523		*(sljit_sw*)inst = srcw;
524		inst += sizeof(sljit_sw);
525	}
526
527	RET();
528	return SLJIT_SUCCESS;
529}
530