1/*
2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
4 *
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/bitops.h>
20#include <linux/bug.h>
21#include <linux/compiler.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/stop_machine.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29
30#include <asm/cacheflush.h>
31#include <asm/debug-monitors.h>
32#include <asm/fixmap.h>
33#include <asm/insn.h>
34
35#define AARCH64_INSN_SF_BIT	BIT(31)
36#define AARCH64_INSN_N_BIT	BIT(22)
37
38static int aarch64_insn_encoding_class[] = {
39	AARCH64_INSN_CLS_UNKNOWN,
40	AARCH64_INSN_CLS_UNKNOWN,
41	AARCH64_INSN_CLS_UNKNOWN,
42	AARCH64_INSN_CLS_UNKNOWN,
43	AARCH64_INSN_CLS_LDST,
44	AARCH64_INSN_CLS_DP_REG,
45	AARCH64_INSN_CLS_LDST,
46	AARCH64_INSN_CLS_DP_FPSIMD,
47	AARCH64_INSN_CLS_DP_IMM,
48	AARCH64_INSN_CLS_DP_IMM,
49	AARCH64_INSN_CLS_BR_SYS,
50	AARCH64_INSN_CLS_BR_SYS,
51	AARCH64_INSN_CLS_LDST,
52	AARCH64_INSN_CLS_DP_REG,
53	AARCH64_INSN_CLS_LDST,
54	AARCH64_INSN_CLS_DP_FPSIMD,
55};
56
57enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58{
59	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60}
61
62/* NOP is an alias of HINT */
63bool __kprobes aarch64_insn_is_nop(u32 insn)
64{
65	if (!aarch64_insn_is_hint(insn))
66		return false;
67
68	switch (insn & 0xFE0) {
69	case AARCH64_INSN_HINT_YIELD:
70	case AARCH64_INSN_HINT_WFE:
71	case AARCH64_INSN_HINT_WFI:
72	case AARCH64_INSN_HINT_SEV:
73	case AARCH64_INSN_HINT_SEVL:
74		return false;
75	default:
76		return true;
77	}
78}
79
80static DEFINE_SPINLOCK(patch_lock);
81
82static void __kprobes *patch_map(void *addr, int fixmap)
83{
84	unsigned long uintaddr = (uintptr_t) addr;
85	bool module = !core_kernel_text(uintaddr);
86	struct page *page;
87
88	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
89		page = vmalloc_to_page(addr);
90	else
91		page = virt_to_page(addr);
92
93	BUG_ON(!page);
94	set_fixmap(fixmap, page_to_phys(page));
95
96	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
97}
98
99static void __kprobes patch_unmap(int fixmap)
100{
101	clear_fixmap(fixmap);
102}
103/*
104 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
105 * little-endian.
106 */
107int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
108{
109	int ret;
110	u32 val;
111
112	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
113	if (!ret)
114		*insnp = le32_to_cpu(val);
115
116	return ret;
117}
118
119static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
120{
121	void *waddr = addr;
122	unsigned long flags = 0;
123	int ret;
124
125	spin_lock_irqsave(&patch_lock, flags);
126	waddr = patch_map(addr, FIX_TEXT_POKE0);
127
128	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
129
130	patch_unmap(FIX_TEXT_POKE0);
131	spin_unlock_irqrestore(&patch_lock, flags);
132
133	return ret;
134}
135
136int __kprobes aarch64_insn_write(void *addr, u32 insn)
137{
138	insn = cpu_to_le32(insn);
139	return __aarch64_insn_write(addr, insn);
140}
141
142static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
143{
144	if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
145		return false;
146
147	return	aarch64_insn_is_b(insn) ||
148		aarch64_insn_is_bl(insn) ||
149		aarch64_insn_is_svc(insn) ||
150		aarch64_insn_is_hvc(insn) ||
151		aarch64_insn_is_smc(insn) ||
152		aarch64_insn_is_brk(insn) ||
153		aarch64_insn_is_nop(insn);
154}
155
156/*
157 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
158 * Section B2.6.5 "Concurrent modification and execution of instructions":
159 * Concurrent modification and execution of instructions can lead to the
160 * resulting instruction performing any behavior that can be achieved by
161 * executing any sequence of instructions that can be executed from the
162 * same Exception level, except where the instruction before modification
163 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
164 * or SMC instruction.
165 */
166bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
167{
168	return __aarch64_insn_hotpatch_safe(old_insn) &&
169	       __aarch64_insn_hotpatch_safe(new_insn);
170}
171
172int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
173{
174	u32 *tp = addr;
175	int ret;
176
177	/* A64 instructions must be word aligned */
178	if ((uintptr_t)tp & 0x3)
179		return -EINVAL;
180
181	ret = aarch64_insn_write(tp, insn);
182	if (ret == 0)
183		flush_icache_range((uintptr_t)tp,
184				   (uintptr_t)tp + AARCH64_INSN_SIZE);
185
186	return ret;
187}
188
189struct aarch64_insn_patch {
190	void		**text_addrs;
191	u32		*new_insns;
192	int		insn_cnt;
193	atomic_t	cpu_count;
194};
195
196static int __kprobes aarch64_insn_patch_text_cb(void *arg)
197{
198	int i, ret = 0;
199	struct aarch64_insn_patch *pp = arg;
200
201	/* The first CPU becomes master */
202	if (atomic_inc_return(&pp->cpu_count) == 1) {
203		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
204			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
205							     pp->new_insns[i]);
206		/*
207		 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
208		 * which ends with "dsb; isb" pair guaranteeing global
209		 * visibility.
210		 */
211		/* Notify other processors with an additional increment. */
212		atomic_inc(&pp->cpu_count);
213	} else {
214		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
215			cpu_relax();
216		isb();
217	}
218
219	return ret;
220}
221
222int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
223{
224	struct aarch64_insn_patch patch = {
225		.text_addrs = addrs,
226		.new_insns = insns,
227		.insn_cnt = cnt,
228		.cpu_count = ATOMIC_INIT(0),
229	};
230
231	if (cnt <= 0)
232		return -EINVAL;
233
234	return stop_machine(aarch64_insn_patch_text_cb, &patch,
235			    cpu_online_mask);
236}
237
238int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
239{
240	int ret;
241	u32 insn;
242
243	/* Unsafe to patch multiple instructions without synchronizaiton */
244	if (cnt == 1) {
245		ret = aarch64_insn_read(addrs[0], &insn);
246		if (ret)
247			return ret;
248
249		if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
250			/*
251			 * ARMv8 architecture doesn't guarantee all CPUs see
252			 * the new instruction after returning from function
253			 * aarch64_insn_patch_text_nosync(). So send IPIs to
254			 * all other CPUs to achieve instruction
255			 * synchronization.
256			 */
257			ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
258			kick_all_cpus_sync();
259			return ret;
260		}
261	}
262
263	return aarch64_insn_patch_text_sync(addrs, insns, cnt);
264}
265
266u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
267				  u32 insn, u64 imm)
268{
269	u32 immlo, immhi, lomask, himask, mask;
270	int shift;
271
272	switch (type) {
273	case AARCH64_INSN_IMM_ADR:
274		lomask = 0x3;
275		himask = 0x7ffff;
276		immlo = imm & lomask;
277		imm >>= 2;
278		immhi = imm & himask;
279		imm = (immlo << 24) | (immhi);
280		mask = (lomask << 24) | (himask);
281		shift = 5;
282		break;
283	case AARCH64_INSN_IMM_26:
284		mask = BIT(26) - 1;
285		shift = 0;
286		break;
287	case AARCH64_INSN_IMM_19:
288		mask = BIT(19) - 1;
289		shift = 5;
290		break;
291	case AARCH64_INSN_IMM_16:
292		mask = BIT(16) - 1;
293		shift = 5;
294		break;
295	case AARCH64_INSN_IMM_14:
296		mask = BIT(14) - 1;
297		shift = 5;
298		break;
299	case AARCH64_INSN_IMM_12:
300		mask = BIT(12) - 1;
301		shift = 10;
302		break;
303	case AARCH64_INSN_IMM_9:
304		mask = BIT(9) - 1;
305		shift = 12;
306		break;
307	case AARCH64_INSN_IMM_7:
308		mask = BIT(7) - 1;
309		shift = 15;
310		break;
311	case AARCH64_INSN_IMM_6:
312	case AARCH64_INSN_IMM_S:
313		mask = BIT(6) - 1;
314		shift = 10;
315		break;
316	case AARCH64_INSN_IMM_R:
317		mask = BIT(6) - 1;
318		shift = 16;
319		break;
320	default:
321		pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
322			type);
323		return 0;
324	}
325
326	/* Update the immediate field. */
327	insn &= ~(mask << shift);
328	insn |= (imm & mask) << shift;
329
330	return insn;
331}
332
333static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
334					u32 insn,
335					enum aarch64_insn_register reg)
336{
337	int shift;
338
339	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
340		pr_err("%s: unknown register encoding %d\n", __func__, reg);
341		return 0;
342	}
343
344	switch (type) {
345	case AARCH64_INSN_REGTYPE_RT:
346	case AARCH64_INSN_REGTYPE_RD:
347		shift = 0;
348		break;
349	case AARCH64_INSN_REGTYPE_RN:
350		shift = 5;
351		break;
352	case AARCH64_INSN_REGTYPE_RT2:
353	case AARCH64_INSN_REGTYPE_RA:
354		shift = 10;
355		break;
356	case AARCH64_INSN_REGTYPE_RM:
357		shift = 16;
358		break;
359	default:
360		pr_err("%s: unknown register type encoding %d\n", __func__,
361		       type);
362		return 0;
363	}
364
365	insn &= ~(GENMASK(4, 0) << shift);
366	insn |= reg << shift;
367
368	return insn;
369}
370
371static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
372					 u32 insn)
373{
374	u32 size;
375
376	switch (type) {
377	case AARCH64_INSN_SIZE_8:
378		size = 0;
379		break;
380	case AARCH64_INSN_SIZE_16:
381		size = 1;
382		break;
383	case AARCH64_INSN_SIZE_32:
384		size = 2;
385		break;
386	case AARCH64_INSN_SIZE_64:
387		size = 3;
388		break;
389	default:
390		pr_err("%s: unknown size encoding %d\n", __func__, type);
391		return 0;
392	}
393
394	insn &= ~GENMASK(31, 30);
395	insn |= size << 30;
396
397	return insn;
398}
399
400static inline long branch_imm_common(unsigned long pc, unsigned long addr,
401				     long range)
402{
403	long offset;
404
405	/*
406	 * PC: A 64-bit Program Counter holding the address of the current
407	 * instruction. A64 instructions must be word-aligned.
408	 */
409	BUG_ON((pc & 0x3) || (addr & 0x3));
410
411	offset = ((long)addr - (long)pc);
412	BUG_ON(offset < -range || offset >= range);
413
414	return offset;
415}
416
417u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
418					  enum aarch64_insn_branch_type type)
419{
420	u32 insn;
421	long offset;
422
423	/*
424	 * B/BL support [-128M, 128M) offset
425	 * ARM64 virtual address arrangement guarantees all kernel and module
426	 * texts are within +/-128M.
427	 */
428	offset = branch_imm_common(pc, addr, SZ_128M);
429
430	switch (type) {
431	case AARCH64_INSN_BRANCH_LINK:
432		insn = aarch64_insn_get_bl_value();
433		break;
434	case AARCH64_INSN_BRANCH_NOLINK:
435		insn = aarch64_insn_get_b_value();
436		break;
437	default:
438		BUG_ON(1);
439		return AARCH64_BREAK_FAULT;
440	}
441
442	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
443					     offset >> 2);
444}
445
446u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
447				     enum aarch64_insn_register reg,
448				     enum aarch64_insn_variant variant,
449				     enum aarch64_insn_branch_type type)
450{
451	u32 insn;
452	long offset;
453
454	offset = branch_imm_common(pc, addr, SZ_1M);
455
456	switch (type) {
457	case AARCH64_INSN_BRANCH_COMP_ZERO:
458		insn = aarch64_insn_get_cbz_value();
459		break;
460	case AARCH64_INSN_BRANCH_COMP_NONZERO:
461		insn = aarch64_insn_get_cbnz_value();
462		break;
463	default:
464		BUG_ON(1);
465		return AARCH64_BREAK_FAULT;
466	}
467
468	switch (variant) {
469	case AARCH64_INSN_VARIANT_32BIT:
470		break;
471	case AARCH64_INSN_VARIANT_64BIT:
472		insn |= AARCH64_INSN_SF_BIT;
473		break;
474	default:
475		BUG_ON(1);
476		return AARCH64_BREAK_FAULT;
477	}
478
479	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
480
481	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
482					     offset >> 2);
483}
484
485u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
486				     enum aarch64_insn_condition cond)
487{
488	u32 insn;
489	long offset;
490
491	offset = branch_imm_common(pc, addr, SZ_1M);
492
493	insn = aarch64_insn_get_bcond_value();
494
495	BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
496	insn |= cond;
497
498	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
499					     offset >> 2);
500}
501
502u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
503{
504	return aarch64_insn_get_hint_value() | op;
505}
506
507u32 __kprobes aarch64_insn_gen_nop(void)
508{
509	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
510}
511
512u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
513				enum aarch64_insn_branch_type type)
514{
515	u32 insn;
516
517	switch (type) {
518	case AARCH64_INSN_BRANCH_NOLINK:
519		insn = aarch64_insn_get_br_value();
520		break;
521	case AARCH64_INSN_BRANCH_LINK:
522		insn = aarch64_insn_get_blr_value();
523		break;
524	case AARCH64_INSN_BRANCH_RETURN:
525		insn = aarch64_insn_get_ret_value();
526		break;
527	default:
528		BUG_ON(1);
529		return AARCH64_BREAK_FAULT;
530	}
531
532	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
533}
534
535u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
536				    enum aarch64_insn_register base,
537				    enum aarch64_insn_register offset,
538				    enum aarch64_insn_size_type size,
539				    enum aarch64_insn_ldst_type type)
540{
541	u32 insn;
542
543	switch (type) {
544	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
545		insn = aarch64_insn_get_ldr_reg_value();
546		break;
547	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
548		insn = aarch64_insn_get_str_reg_value();
549		break;
550	default:
551		BUG_ON(1);
552		return AARCH64_BREAK_FAULT;
553	}
554
555	insn = aarch64_insn_encode_ldst_size(size, insn);
556
557	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
558
559	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
560					    base);
561
562	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
563					    offset);
564}
565
566u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
567				     enum aarch64_insn_register reg2,
568				     enum aarch64_insn_register base,
569				     int offset,
570				     enum aarch64_insn_variant variant,
571				     enum aarch64_insn_ldst_type type)
572{
573	u32 insn;
574	int shift;
575
576	switch (type) {
577	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
578		insn = aarch64_insn_get_ldp_pre_value();
579		break;
580	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
581		insn = aarch64_insn_get_stp_pre_value();
582		break;
583	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
584		insn = aarch64_insn_get_ldp_post_value();
585		break;
586	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
587		insn = aarch64_insn_get_stp_post_value();
588		break;
589	default:
590		BUG_ON(1);
591		return AARCH64_BREAK_FAULT;
592	}
593
594	switch (variant) {
595	case AARCH64_INSN_VARIANT_32BIT:
596		/* offset must be multiples of 4 in the range [-256, 252] */
597		BUG_ON(offset & 0x3);
598		BUG_ON(offset < -256 || offset > 252);
599		shift = 2;
600		break;
601	case AARCH64_INSN_VARIANT_64BIT:
602		/* offset must be multiples of 8 in the range [-512, 504] */
603		BUG_ON(offset & 0x7);
604		BUG_ON(offset < -512 || offset > 504);
605		shift = 3;
606		insn |= AARCH64_INSN_SF_BIT;
607		break;
608	default:
609		BUG_ON(1);
610		return AARCH64_BREAK_FAULT;
611	}
612
613	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
614					    reg1);
615
616	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
617					    reg2);
618
619	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
620					    base);
621
622	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
623					     offset >> shift);
624}
625
626u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
627				 enum aarch64_insn_register src,
628				 int imm, enum aarch64_insn_variant variant,
629				 enum aarch64_insn_adsb_type type)
630{
631	u32 insn;
632
633	switch (type) {
634	case AARCH64_INSN_ADSB_ADD:
635		insn = aarch64_insn_get_add_imm_value();
636		break;
637	case AARCH64_INSN_ADSB_SUB:
638		insn = aarch64_insn_get_sub_imm_value();
639		break;
640	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
641		insn = aarch64_insn_get_adds_imm_value();
642		break;
643	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
644		insn = aarch64_insn_get_subs_imm_value();
645		break;
646	default:
647		BUG_ON(1);
648		return AARCH64_BREAK_FAULT;
649	}
650
651	switch (variant) {
652	case AARCH64_INSN_VARIANT_32BIT:
653		break;
654	case AARCH64_INSN_VARIANT_64BIT:
655		insn |= AARCH64_INSN_SF_BIT;
656		break;
657	default:
658		BUG_ON(1);
659		return AARCH64_BREAK_FAULT;
660	}
661
662	BUG_ON(imm & ~(SZ_4K - 1));
663
664	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
665
666	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
667
668	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
669}
670
671u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
672			      enum aarch64_insn_register src,
673			      int immr, int imms,
674			      enum aarch64_insn_variant variant,
675			      enum aarch64_insn_bitfield_type type)
676{
677	u32 insn;
678	u32 mask;
679
680	switch (type) {
681	case AARCH64_INSN_BITFIELD_MOVE:
682		insn = aarch64_insn_get_bfm_value();
683		break;
684	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
685		insn = aarch64_insn_get_ubfm_value();
686		break;
687	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
688		insn = aarch64_insn_get_sbfm_value();
689		break;
690	default:
691		BUG_ON(1);
692		return AARCH64_BREAK_FAULT;
693	}
694
695	switch (variant) {
696	case AARCH64_INSN_VARIANT_32BIT:
697		mask = GENMASK(4, 0);
698		break;
699	case AARCH64_INSN_VARIANT_64BIT:
700		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
701		mask = GENMASK(5, 0);
702		break;
703	default:
704		BUG_ON(1);
705		return AARCH64_BREAK_FAULT;
706	}
707
708	BUG_ON(immr & ~mask);
709	BUG_ON(imms & ~mask);
710
711	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
712
713	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
714
715	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
716
717	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
718}
719
720u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
721			      int imm, int shift,
722			      enum aarch64_insn_variant variant,
723			      enum aarch64_insn_movewide_type type)
724{
725	u32 insn;
726
727	switch (type) {
728	case AARCH64_INSN_MOVEWIDE_ZERO:
729		insn = aarch64_insn_get_movz_value();
730		break;
731	case AARCH64_INSN_MOVEWIDE_KEEP:
732		insn = aarch64_insn_get_movk_value();
733		break;
734	case AARCH64_INSN_MOVEWIDE_INVERSE:
735		insn = aarch64_insn_get_movn_value();
736		break;
737	default:
738		BUG_ON(1);
739		return AARCH64_BREAK_FAULT;
740	}
741
742	BUG_ON(imm & ~(SZ_64K - 1));
743
744	switch (variant) {
745	case AARCH64_INSN_VARIANT_32BIT:
746		BUG_ON(shift != 0 && shift != 16);
747		break;
748	case AARCH64_INSN_VARIANT_64BIT:
749		insn |= AARCH64_INSN_SF_BIT;
750		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
751		       shift != 48);
752		break;
753	default:
754		BUG_ON(1);
755		return AARCH64_BREAK_FAULT;
756	}
757
758	insn |= (shift >> 4) << 21;
759
760	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
761
762	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
763}
764
765u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
766					 enum aarch64_insn_register src,
767					 enum aarch64_insn_register reg,
768					 int shift,
769					 enum aarch64_insn_variant variant,
770					 enum aarch64_insn_adsb_type type)
771{
772	u32 insn;
773
774	switch (type) {
775	case AARCH64_INSN_ADSB_ADD:
776		insn = aarch64_insn_get_add_value();
777		break;
778	case AARCH64_INSN_ADSB_SUB:
779		insn = aarch64_insn_get_sub_value();
780		break;
781	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
782		insn = aarch64_insn_get_adds_value();
783		break;
784	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
785		insn = aarch64_insn_get_subs_value();
786		break;
787	default:
788		BUG_ON(1);
789		return AARCH64_BREAK_FAULT;
790	}
791
792	switch (variant) {
793	case AARCH64_INSN_VARIANT_32BIT:
794		BUG_ON(shift & ~(SZ_32 - 1));
795		break;
796	case AARCH64_INSN_VARIANT_64BIT:
797		insn |= AARCH64_INSN_SF_BIT;
798		BUG_ON(shift & ~(SZ_64 - 1));
799		break;
800	default:
801		BUG_ON(1);
802		return AARCH64_BREAK_FAULT;
803	}
804
805
806	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
807
808	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
809
810	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
811
812	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
813}
814
815u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
816			   enum aarch64_insn_register src,
817			   enum aarch64_insn_variant variant,
818			   enum aarch64_insn_data1_type type)
819{
820	u32 insn;
821
822	switch (type) {
823	case AARCH64_INSN_DATA1_REVERSE_16:
824		insn = aarch64_insn_get_rev16_value();
825		break;
826	case AARCH64_INSN_DATA1_REVERSE_32:
827		insn = aarch64_insn_get_rev32_value();
828		break;
829	case AARCH64_INSN_DATA1_REVERSE_64:
830		BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
831		insn = aarch64_insn_get_rev64_value();
832		break;
833	default:
834		BUG_ON(1);
835		return AARCH64_BREAK_FAULT;
836	}
837
838	switch (variant) {
839	case AARCH64_INSN_VARIANT_32BIT:
840		break;
841	case AARCH64_INSN_VARIANT_64BIT:
842		insn |= AARCH64_INSN_SF_BIT;
843		break;
844	default:
845		BUG_ON(1);
846		return AARCH64_BREAK_FAULT;
847	}
848
849	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
850
851	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
852}
853
854u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
855			   enum aarch64_insn_register src,
856			   enum aarch64_insn_register reg,
857			   enum aarch64_insn_variant variant,
858			   enum aarch64_insn_data2_type type)
859{
860	u32 insn;
861
862	switch (type) {
863	case AARCH64_INSN_DATA2_UDIV:
864		insn = aarch64_insn_get_udiv_value();
865		break;
866	case AARCH64_INSN_DATA2_SDIV:
867		insn = aarch64_insn_get_sdiv_value();
868		break;
869	case AARCH64_INSN_DATA2_LSLV:
870		insn = aarch64_insn_get_lslv_value();
871		break;
872	case AARCH64_INSN_DATA2_LSRV:
873		insn = aarch64_insn_get_lsrv_value();
874		break;
875	case AARCH64_INSN_DATA2_ASRV:
876		insn = aarch64_insn_get_asrv_value();
877		break;
878	case AARCH64_INSN_DATA2_RORV:
879		insn = aarch64_insn_get_rorv_value();
880		break;
881	default:
882		BUG_ON(1);
883		return AARCH64_BREAK_FAULT;
884	}
885
886	switch (variant) {
887	case AARCH64_INSN_VARIANT_32BIT:
888		break;
889	case AARCH64_INSN_VARIANT_64BIT:
890		insn |= AARCH64_INSN_SF_BIT;
891		break;
892	default:
893		BUG_ON(1);
894		return AARCH64_BREAK_FAULT;
895	}
896
897	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
898
899	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
900
901	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
902}
903
904u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
905			   enum aarch64_insn_register src,
906			   enum aarch64_insn_register reg1,
907			   enum aarch64_insn_register reg2,
908			   enum aarch64_insn_variant variant,
909			   enum aarch64_insn_data3_type type)
910{
911	u32 insn;
912
913	switch (type) {
914	case AARCH64_INSN_DATA3_MADD:
915		insn = aarch64_insn_get_madd_value();
916		break;
917	case AARCH64_INSN_DATA3_MSUB:
918		insn = aarch64_insn_get_msub_value();
919		break;
920	default:
921		BUG_ON(1);
922		return AARCH64_BREAK_FAULT;
923	}
924
925	switch (variant) {
926	case AARCH64_INSN_VARIANT_32BIT:
927		break;
928	case AARCH64_INSN_VARIANT_64BIT:
929		insn |= AARCH64_INSN_SF_BIT;
930		break;
931	default:
932		BUG_ON(1);
933		return AARCH64_BREAK_FAULT;
934	}
935
936	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
937
938	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
939
940	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
941					    reg1);
942
943	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
944					    reg2);
945}
946
947u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
948					 enum aarch64_insn_register src,
949					 enum aarch64_insn_register reg,
950					 int shift,
951					 enum aarch64_insn_variant variant,
952					 enum aarch64_insn_logic_type type)
953{
954	u32 insn;
955
956	switch (type) {
957	case AARCH64_INSN_LOGIC_AND:
958		insn = aarch64_insn_get_and_value();
959		break;
960	case AARCH64_INSN_LOGIC_BIC:
961		insn = aarch64_insn_get_bic_value();
962		break;
963	case AARCH64_INSN_LOGIC_ORR:
964		insn = aarch64_insn_get_orr_value();
965		break;
966	case AARCH64_INSN_LOGIC_ORN:
967		insn = aarch64_insn_get_orn_value();
968		break;
969	case AARCH64_INSN_LOGIC_EOR:
970		insn = aarch64_insn_get_eor_value();
971		break;
972	case AARCH64_INSN_LOGIC_EON:
973		insn = aarch64_insn_get_eon_value();
974		break;
975	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
976		insn = aarch64_insn_get_ands_value();
977		break;
978	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
979		insn = aarch64_insn_get_bics_value();
980		break;
981	default:
982		BUG_ON(1);
983		return AARCH64_BREAK_FAULT;
984	}
985
986	switch (variant) {
987	case AARCH64_INSN_VARIANT_32BIT:
988		BUG_ON(shift & ~(SZ_32 - 1));
989		break;
990	case AARCH64_INSN_VARIANT_64BIT:
991		insn |= AARCH64_INSN_SF_BIT;
992		BUG_ON(shift & ~(SZ_64 - 1));
993		break;
994	default:
995		BUG_ON(1);
996		return AARCH64_BREAK_FAULT;
997	}
998
999
1000	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1001
1002	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1003
1004	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1005
1006	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1007}
1008
1009bool aarch32_insn_is_wide(u32 insn)
1010{
1011	return insn >= 0xe800;
1012}
1013
1014/*
1015 * Macros/defines for extracting register numbers from instruction.
1016 */
1017u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1018{
1019	return (insn & (0xf << offset)) >> offset;
1020}
1021
1022#define OPC2_MASK	0x7
1023#define OPC2_OFFSET	5
1024u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1025{
1026	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1027}
1028
1029#define CRM_MASK	0xf
1030u32 aarch32_insn_mcr_extract_crm(u32 insn)
1031{
1032	return insn & CRM_MASK;
1033}
1034