1/*
2 * handling privileged instructions
3 *
4 * Copyright IBM Corp. 2008, 2013
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
11 *               Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
15#include <linux/gfp.h>
16#include <linux/errno.h>
17#include <linux/compat.h>
18#include <asm/asm-offsets.h>
19#include <asm/facility.h>
20#include <asm/current.h>
21#include <asm/debug.h>
22#include <asm/ebcdic.h>
23#include <asm/sysinfo.h>
24#include <asm/pgtable.h>
25#include <asm/pgalloc.h>
26#include <asm/io.h>
27#include <asm/ptrace.h>
28#include <asm/compat.h>
29#include "gaccess.h"
30#include "kvm-s390.h"
31#include "trace.h"
32
33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{
36	struct kvm_vcpu *cpup;
37	s64 hostclk, val;
38	int i, rc;
39	u64 op2;
40
41	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44	op2 = kvm_s390_get_base_disp_s(vcpu);
45	if (op2 & 7)	/* Operand must be on a doubleword boundary */
46		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47	rc = read_guest(vcpu, op2, &val, sizeof(val));
48	if (rc)
49		return kvm_s390_inject_prog_cond(vcpu, rc);
50
51	if (store_tod_clock(&hostclk)) {
52		kvm_s390_set_psw_cc(vcpu, 3);
53		return 0;
54	}
55	val = (val - hostclk) & ~0x3fUL;
56
57	mutex_lock(&vcpu->kvm->lock);
58	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
59		cpup->arch.sie_block->epoch = val;
60	mutex_unlock(&vcpu->kvm->lock);
61
62	kvm_s390_set_psw_cc(vcpu, 0);
63	return 0;
64}
65
66static int handle_set_prefix(struct kvm_vcpu *vcpu)
67{
68	u64 operand2;
69	u32 address;
70	int rc;
71
72	vcpu->stat.instruction_spx++;
73
74	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76
77	operand2 = kvm_s390_get_base_disp_s(vcpu);
78
79	/* must be word boundary */
80	if (operand2 & 3)
81		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82
83	/* get the value */
84	rc = read_guest(vcpu, operand2, &address, sizeof(address));
85	if (rc)
86		return kvm_s390_inject_prog_cond(vcpu, rc);
87
88	address &= 0x7fffe000u;
89
90	/*
91	 * Make sure the new value is valid memory. We only need to check the
92	 * first page, since address is 8k aligned and memory pieces are always
93	 * at least 1MB aligned and have at least a size of 1MB.
94	 */
95	if (kvm_is_error_gpa(vcpu->kvm, address))
96		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
97
98	kvm_s390_set_prefix(vcpu, address);
99
100	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
101	trace_kvm_s390_handle_prefix(vcpu, 1, address);
102	return 0;
103}
104
105static int handle_store_prefix(struct kvm_vcpu *vcpu)
106{
107	u64 operand2;
108	u32 address;
109	int rc;
110
111	vcpu->stat.instruction_stpx++;
112
113	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
114		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
115
116	operand2 = kvm_s390_get_base_disp_s(vcpu);
117
118	/* must be word boundary */
119	if (operand2 & 3)
120		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
121
122	address = kvm_s390_get_prefix(vcpu);
123
124	/* get the value */
125	rc = write_guest(vcpu, operand2, &address, sizeof(address));
126	if (rc)
127		return kvm_s390_inject_prog_cond(vcpu, rc);
128
129	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
130	trace_kvm_s390_handle_prefix(vcpu, 0, address);
131	return 0;
132}
133
134static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
135{
136	u16 vcpu_id = vcpu->vcpu_id;
137	u64 ga;
138	int rc;
139
140	vcpu->stat.instruction_stap++;
141
142	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
143		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
144
145	ga = kvm_s390_get_base_disp_s(vcpu);
146
147	if (ga & 1)
148		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
149
150	rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
151	if (rc)
152		return kvm_s390_inject_prog_cond(vcpu, rc);
153
154	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
155	trace_kvm_s390_handle_stap(vcpu, ga);
156	return 0;
157}
158
159static void __skey_check_enable(struct kvm_vcpu *vcpu)
160{
161	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
162		return;
163
164	s390_enable_skey();
165	trace_kvm_s390_skey_related_inst(vcpu);
166	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
167}
168
169
170static int handle_skey(struct kvm_vcpu *vcpu)
171{
172	__skey_check_enable(vcpu);
173
174	vcpu->stat.instruction_storage_key++;
175
176	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
177		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
178
179	vcpu->arch.sie_block->gpsw.addr =
180		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
181	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
182	return 0;
183}
184
185static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
186{
187	psw_t *psw = &vcpu->arch.sie_block->gpsw;
188
189	vcpu->stat.instruction_ipte_interlock++;
190	if (psw_bits(*psw).p)
191		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
192	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
193	psw->addr = __rewind_psw(*psw, 4);
194	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
195	return 0;
196}
197
198static int handle_test_block(struct kvm_vcpu *vcpu)
199{
200	gpa_t addr;
201	int reg2;
202
203	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
204		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
205
206	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
207	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
208	addr = kvm_s390_logical_to_effective(vcpu, addr);
209	if (kvm_s390_check_low_addr_protection(vcpu, addr))
210		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
211	addr = kvm_s390_real_to_abs(vcpu, addr);
212
213	if (kvm_is_error_gpa(vcpu->kvm, addr))
214		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
215	/*
216	 * We don't expect errors on modern systems, and do not care
217	 * about storage keys (yet), so let's just clear the page.
218	 */
219	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
220		return -EFAULT;
221	kvm_s390_set_psw_cc(vcpu, 0);
222	vcpu->run->s.regs.gprs[0] = 0;
223	return 0;
224}
225
226static int handle_tpi(struct kvm_vcpu *vcpu)
227{
228	struct kvm_s390_interrupt_info *inti;
229	unsigned long len;
230	u32 tpi_data[3];
231	int cc, rc;
232	u64 addr;
233
234	rc = 0;
235	addr = kvm_s390_get_base_disp_s(vcpu);
236	if (addr & 3)
237		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
238	cc = 0;
239	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
240	if (!inti)
241		goto no_interrupt;
242	cc = 1;
243	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
244	tpi_data[1] = inti->io.io_int_parm;
245	tpi_data[2] = inti->io.io_int_word;
246	if (addr) {
247		/*
248		 * Store the two-word I/O interruption code into the
249		 * provided area.
250		 */
251		len = sizeof(tpi_data) - 4;
252		rc = write_guest(vcpu, addr, &tpi_data, len);
253		if (rc)
254			return kvm_s390_inject_prog_cond(vcpu, rc);
255	} else {
256		/*
257		 * Store the three-word I/O interruption code into
258		 * the appropriate lowcore area.
259		 */
260		len = sizeof(tpi_data);
261		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
262			rc = -EFAULT;
263	}
264	/*
265	 * If we encounter a problem storing the interruption code, the
266	 * instruction is suppressed from the guest's view: reinject the
267	 * interrupt.
268	 */
269	if (!rc)
270		kfree(inti);
271	else
272		kvm_s390_reinject_io_int(vcpu->kvm, inti);
273no_interrupt:
274	/* Set condition code and we're done. */
275	if (!rc)
276		kvm_s390_set_psw_cc(vcpu, cc);
277	return rc ? -EFAULT : 0;
278}
279
280static int handle_tsch(struct kvm_vcpu *vcpu)
281{
282	struct kvm_s390_interrupt_info *inti;
283
284	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
285				   vcpu->run->s.regs.gprs[1]);
286
287	/*
288	 * Prepare exit to userspace.
289	 * We indicate whether we dequeued a pending I/O interrupt
290	 * so that userspace can re-inject it if the instruction gets
291	 * a program check. While this may re-order the pending I/O
292	 * interrupts, this is no problem since the priority is kept
293	 * intact.
294	 */
295	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
296	vcpu->run->s390_tsch.dequeued = !!inti;
297	if (inti) {
298		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
299		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
300		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
301		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
302	}
303	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
304	kfree(inti);
305	return -EREMOTE;
306}
307
308static int handle_io_inst(struct kvm_vcpu *vcpu)
309{
310	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
311
312	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
313		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
314
315	if (vcpu->kvm->arch.css_support) {
316		/*
317		 * Most I/O instructions will be handled by userspace.
318		 * Exceptions are tpi and the interrupt portion of tsch.
319		 */
320		if (vcpu->arch.sie_block->ipa == 0xb236)
321			return handle_tpi(vcpu);
322		if (vcpu->arch.sie_block->ipa == 0xb235)
323			return handle_tsch(vcpu);
324		/* Handle in userspace. */
325		return -EOPNOTSUPP;
326	} else {
327		/*
328		 * Set condition code 3 to stop the guest from issuing channel
329		 * I/O instructions.
330		 */
331		kvm_s390_set_psw_cc(vcpu, 3);
332		return 0;
333	}
334}
335
336static int handle_stfl(struct kvm_vcpu *vcpu)
337{
338	int rc;
339
340	vcpu->stat.instruction_stfl++;
341
342	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
343		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
344
345	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
346			    vfacilities, 4);
347	if (rc)
348		return rc;
349	VCPU_EVENT(vcpu, 5, "store facility list value %x",
350		   *(unsigned int *) vfacilities);
351	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
352	return 0;
353}
354
355#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
356#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
357#define PSW_ADDR_24 0x0000000000ffffffUL
358#define PSW_ADDR_31 0x000000007fffffffUL
359
360int is_valid_psw(psw_t *psw)
361{
362	if (psw->mask & PSW_MASK_UNASSIGNED)
363		return 0;
364	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
365		if (psw->addr & ~PSW_ADDR_31)
366			return 0;
367	}
368	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
369		return 0;
370	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
371		return 0;
372	if (psw->addr & 1)
373		return 0;
374	return 1;
375}
376
377int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
378{
379	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
380	psw_compat_t new_psw;
381	u64 addr;
382	int rc;
383
384	if (gpsw->mask & PSW_MASK_PSTATE)
385		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
386
387	addr = kvm_s390_get_base_disp_s(vcpu);
388	if (addr & 7)
389		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
390
391	rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
392	if (rc)
393		return kvm_s390_inject_prog_cond(vcpu, rc);
394	if (!(new_psw.mask & PSW32_MASK_BASE))
395		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
396	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
397	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
398	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
399	if (!is_valid_psw(gpsw))
400		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
401	return 0;
402}
403
404static int handle_lpswe(struct kvm_vcpu *vcpu)
405{
406	psw_t new_psw;
407	u64 addr;
408	int rc;
409
410	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
411		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
412
413	addr = kvm_s390_get_base_disp_s(vcpu);
414	if (addr & 7)
415		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
416	rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
417	if (rc)
418		return kvm_s390_inject_prog_cond(vcpu, rc);
419	vcpu->arch.sie_block->gpsw = new_psw;
420	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
421		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
422	return 0;
423}
424
425static int handle_stidp(struct kvm_vcpu *vcpu)
426{
427	u64 stidp_data = vcpu->arch.stidp_data;
428	u64 operand2;
429	int rc;
430
431	vcpu->stat.instruction_stidp++;
432
433	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
434		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
435
436	operand2 = kvm_s390_get_base_disp_s(vcpu);
437
438	if (operand2 & 7)
439		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
440
441	rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
442	if (rc)
443		return kvm_s390_inject_prog_cond(vcpu, rc);
444
445	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
446	return 0;
447}
448
449static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
450{
451	int cpus = 0;
452	int n;
453
454	cpus = atomic_read(&vcpu->kvm->online_vcpus);
455
456	/* deal with other level 3 hypervisors */
457	if (stsi(mem, 3, 2, 2))
458		mem->count = 0;
459	if (mem->count < 8)
460		mem->count++;
461	for (n = mem->count - 1; n > 0 ; n--)
462		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
463
464	mem->vm[0].cpus_total = cpus;
465	mem->vm[0].cpus_configured = cpus;
466	mem->vm[0].cpus_standby = 0;
467	mem->vm[0].cpus_reserved = 0;
468	mem->vm[0].caf = 1000;
469	memcpy(mem->vm[0].name, "KVMguest", 8);
470	ASCEBC(mem->vm[0].name, 8);
471	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
472	ASCEBC(mem->vm[0].cpi, 16);
473}
474
475static int handle_stsi(struct kvm_vcpu *vcpu)
476{
477	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
478	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
479	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
480	unsigned long mem = 0;
481	u64 operand2;
482	int rc = 0;
483
484	vcpu->stat.instruction_stsi++;
485	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
486
487	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
488		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
489
490	if (fc > 3) {
491		kvm_s390_set_psw_cc(vcpu, 3);
492		return 0;
493	}
494
495	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
496	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
497		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
498
499	if (fc == 0) {
500		vcpu->run->s.regs.gprs[0] = 3 << 28;
501		kvm_s390_set_psw_cc(vcpu, 0);
502		return 0;
503	}
504
505	operand2 = kvm_s390_get_base_disp_s(vcpu);
506
507	if (operand2 & 0xfff)
508		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
509
510	switch (fc) {
511	case 1: /* same handling for 1 and 2 */
512	case 2:
513		mem = get_zeroed_page(GFP_KERNEL);
514		if (!mem)
515			goto out_no_data;
516		if (stsi((void *) mem, fc, sel1, sel2))
517			goto out_no_data;
518		break;
519	case 3:
520		if (sel1 != 2 || sel2 != 2)
521			goto out_no_data;
522		mem = get_zeroed_page(GFP_KERNEL);
523		if (!mem)
524			goto out_no_data;
525		handle_stsi_3_2_2(vcpu, (void *) mem);
526		break;
527	}
528
529	rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
530	if (rc) {
531		rc = kvm_s390_inject_prog_cond(vcpu, rc);
532		goto out;
533	}
534	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
535	free_page(mem);
536	kvm_s390_set_psw_cc(vcpu, 0);
537	vcpu->run->s.regs.gprs[0] = 0;
538	return 0;
539out_no_data:
540	kvm_s390_set_psw_cc(vcpu, 3);
541out:
542	free_page(mem);
543	return rc;
544}
545
546static const intercept_handler_t b2_handlers[256] = {
547	[0x02] = handle_stidp,
548	[0x04] = handle_set_clock,
549	[0x10] = handle_set_prefix,
550	[0x11] = handle_store_prefix,
551	[0x12] = handle_store_cpu_address,
552	[0x21] = handle_ipte_interlock,
553	[0x29] = handle_skey,
554	[0x2a] = handle_skey,
555	[0x2b] = handle_skey,
556	[0x2c] = handle_test_block,
557	[0x30] = handle_io_inst,
558	[0x31] = handle_io_inst,
559	[0x32] = handle_io_inst,
560	[0x33] = handle_io_inst,
561	[0x34] = handle_io_inst,
562	[0x35] = handle_io_inst,
563	[0x36] = handle_io_inst,
564	[0x37] = handle_io_inst,
565	[0x38] = handle_io_inst,
566	[0x39] = handle_io_inst,
567	[0x3a] = handle_io_inst,
568	[0x3b] = handle_io_inst,
569	[0x3c] = handle_io_inst,
570	[0x50] = handle_ipte_interlock,
571	[0x5f] = handle_io_inst,
572	[0x74] = handle_io_inst,
573	[0x76] = handle_io_inst,
574	[0x7d] = handle_stsi,
575	[0xb1] = handle_stfl,
576	[0xb2] = handle_lpswe,
577};
578
579int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
580{
581	intercept_handler_t handler;
582
583	/*
584	 * A lot of B2 instructions are priviledged. Here we check for
585	 * the privileged ones, that we can handle in the kernel.
586	 * Anything else goes to userspace.
587	 */
588	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
589	if (handler)
590		return handler(vcpu);
591
592	return -EOPNOTSUPP;
593}
594
595static int handle_epsw(struct kvm_vcpu *vcpu)
596{
597	int reg1, reg2;
598
599	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
600
601	/* This basically extracts the mask half of the psw. */
602	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
603	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
604	if (reg2) {
605		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
606		vcpu->run->s.regs.gprs[reg2] |=
607			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
608	}
609	return 0;
610}
611
612#define PFMF_RESERVED   0xfffc0101UL
613#define PFMF_SK         0x00020000UL
614#define PFMF_CF         0x00010000UL
615#define PFMF_UI         0x00008000UL
616#define PFMF_FSC        0x00007000UL
617#define PFMF_NQ         0x00000800UL
618#define PFMF_MR         0x00000400UL
619#define PFMF_MC         0x00000200UL
620#define PFMF_KEY        0x000000feUL
621
622static int handle_pfmf(struct kvm_vcpu *vcpu)
623{
624	int reg1, reg2;
625	unsigned long start, end;
626
627	vcpu->stat.instruction_pfmf++;
628
629	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
630
631	if (!MACHINE_HAS_PFMF)
632		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
633
634	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
635		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
636
637	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
638		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
639
640	/* Only provide non-quiescing support if the host supports it */
641	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
642		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
643
644	/* No support for conditional-SSKE */
645	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
646		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
647
648	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
649	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
650		if (kvm_s390_check_low_addr_protection(vcpu, start))
651			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
652	}
653
654	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
655	case 0x00000000:
656		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
657		break;
658	case 0x00001000:
659		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
660		break;
661	/* We dont support EDAT2
662	case 0x00002000:
663		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
664		break;*/
665	default:
666		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
667	}
668	while (start < end) {
669		unsigned long useraddr, abs_addr;
670
671		/* Translate guest address to host address */
672		if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
673			abs_addr = kvm_s390_real_to_abs(vcpu, start);
674		else
675			abs_addr = start;
676		useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
677		if (kvm_is_error_hva(useraddr))
678			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
679
680		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
681			if (clear_user((void __user *)useraddr, PAGE_SIZE))
682				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
683		}
684
685		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
686			__skey_check_enable(vcpu);
687			if (set_guest_storage_key(current->mm, useraddr,
688					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
689					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
690				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
691		}
692
693		start += PAGE_SIZE;
694	}
695	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
696		vcpu->run->s.regs.gprs[reg2] = end;
697	return 0;
698}
699
700static int handle_essa(struct kvm_vcpu *vcpu)
701{
702	/* entries expected to be 1FF */
703	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
704	unsigned long *cbrlo, cbrle;
705	struct gmap *gmap;
706	int i;
707
708	VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
709	gmap = vcpu->arch.gmap;
710	vcpu->stat.instruction_essa++;
711	if (!kvm_s390_cmma_enabled(vcpu->kvm))
712		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
713
714	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
715		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
716
717	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
718		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
719
720	/* Rewind PSW to repeat the ESSA instruction */
721	vcpu->arch.sie_block->gpsw.addr =
722		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
723	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
724	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
725	down_read(&gmap->mm->mmap_sem);
726	for (i = 0; i < entries; ++i) {
727		cbrle = cbrlo[i];
728		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
729			/* invalid entry */
730			break;
731		/* try to free backing */
732		__gmap_zap(gmap, cbrle);
733	}
734	up_read(&gmap->mm->mmap_sem);
735	if (i < entries)
736		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
737	return 0;
738}
739
740static const intercept_handler_t b9_handlers[256] = {
741	[0x8a] = handle_ipte_interlock,
742	[0x8d] = handle_epsw,
743	[0x8e] = handle_ipte_interlock,
744	[0x8f] = handle_ipte_interlock,
745	[0xab] = handle_essa,
746	[0xaf] = handle_pfmf,
747};
748
749int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
750{
751	intercept_handler_t handler;
752
753	/* This is handled just as for the B2 instructions. */
754	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
755	if (handler)
756		return handler(vcpu);
757
758	return -EOPNOTSUPP;
759}
760
761int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
762{
763	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
764	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
765	u32 val = 0;
766	int reg, rc;
767	u64 ga;
768
769	vcpu->stat.instruction_lctl++;
770
771	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
772		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
773
774	ga = kvm_s390_get_base_disp_rs(vcpu);
775
776	if (ga & 3)
777		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
778
779	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
780	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
781
782	reg = reg1;
783	do {
784		rc = read_guest(vcpu, ga, &val, sizeof(val));
785		if (rc)
786			return kvm_s390_inject_prog_cond(vcpu, rc);
787		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
788		vcpu->arch.sie_block->gcr[reg] |= val;
789		ga += 4;
790		if (reg == reg3)
791			break;
792		reg = (reg + 1) % 16;
793	} while (1);
794
795	return 0;
796}
797
798int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
799{
800	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
801	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
802	u64 ga;
803	u32 val;
804	int reg, rc;
805
806	vcpu->stat.instruction_stctl++;
807
808	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
809		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
810
811	ga = kvm_s390_get_base_disp_rs(vcpu);
812
813	if (ga & 3)
814		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
815
816	VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
817	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
818
819	reg = reg1;
820	do {
821		val = vcpu->arch.sie_block->gcr[reg] &  0x00000000fffffffful;
822		rc = write_guest(vcpu, ga, &val, sizeof(val));
823		if (rc)
824			return kvm_s390_inject_prog_cond(vcpu, rc);
825		ga += 4;
826		if (reg == reg3)
827			break;
828		reg = (reg + 1) % 16;
829	} while (1);
830
831	return 0;
832}
833
834static int handle_lctlg(struct kvm_vcpu *vcpu)
835{
836	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
837	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
838	u64 ga, val;
839	int reg, rc;
840
841	vcpu->stat.instruction_lctlg++;
842
843	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
844		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
845
846	ga = kvm_s390_get_base_disp_rsy(vcpu);
847
848	if (ga & 7)
849		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
850
851	reg = reg1;
852
853	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
854	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
855
856	do {
857		rc = read_guest(vcpu, ga, &val, sizeof(val));
858		if (rc)
859			return kvm_s390_inject_prog_cond(vcpu, rc);
860		vcpu->arch.sie_block->gcr[reg] = val;
861		ga += 8;
862		if (reg == reg3)
863			break;
864		reg = (reg + 1) % 16;
865	} while (1);
866
867	return 0;
868}
869
870static int handle_stctg(struct kvm_vcpu *vcpu)
871{
872	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
873	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
874	u64 ga, val;
875	int reg, rc;
876
877	vcpu->stat.instruction_stctg++;
878
879	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
880		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
881
882	ga = kvm_s390_get_base_disp_rsy(vcpu);
883
884	if (ga & 7)
885		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
886
887	reg = reg1;
888
889	VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
890	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
891
892	do {
893		val = vcpu->arch.sie_block->gcr[reg];
894		rc = write_guest(vcpu, ga, &val, sizeof(val));
895		if (rc)
896			return kvm_s390_inject_prog_cond(vcpu, rc);
897		ga += 8;
898		if (reg == reg3)
899			break;
900		reg = (reg + 1) % 16;
901	} while (1);
902
903	return 0;
904}
905
906static const intercept_handler_t eb_handlers[256] = {
907	[0x2f] = handle_lctlg,
908	[0x25] = handle_stctg,
909};
910
911int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
912{
913	intercept_handler_t handler;
914
915	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
916	if (handler)
917		return handler(vcpu);
918	return -EOPNOTSUPP;
919}
920
921static int handle_tprot(struct kvm_vcpu *vcpu)
922{
923	u64 address1, address2;
924	unsigned long hva, gpa;
925	int ret = 0, cc = 0;
926	bool writable;
927
928	vcpu->stat.instruction_tprot++;
929
930	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
931		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
932
933	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
934
935	/* we only handle the Linux memory detection case:
936	 * access key == 0
937	 * everything else goes to userspace. */
938	if (address2 & 0xf0)
939		return -EOPNOTSUPP;
940	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
941		ipte_lock(vcpu);
942	ret = guest_translate_address(vcpu, address1, &gpa, 1);
943	if (ret == PGM_PROTECTION) {
944		/* Write protected? Try again with read-only... */
945		cc = 1;
946		ret = guest_translate_address(vcpu, address1, &gpa, 0);
947	}
948	if (ret) {
949		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
950			ret = kvm_s390_inject_program_int(vcpu, ret);
951		} else if (ret > 0) {
952			/* Translation not available */
953			kvm_s390_set_psw_cc(vcpu, 3);
954			ret = 0;
955		}
956		goto out_unlock;
957	}
958
959	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
960	if (kvm_is_error_hva(hva)) {
961		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
962	} else {
963		if (!writable)
964			cc = 1;		/* Write not permitted ==> read-only */
965		kvm_s390_set_psw_cc(vcpu, cc);
966		/* Note: CC2 only occurs for storage keys (not supported yet) */
967	}
968out_unlock:
969	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
970		ipte_unlock(vcpu);
971	return ret;
972}
973
974int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
975{
976	/* For e5xx... instructions we only handle TPROT */
977	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
978		return handle_tprot(vcpu);
979	return -EOPNOTSUPP;
980}
981
982static int handle_sckpf(struct kvm_vcpu *vcpu)
983{
984	u32 value;
985
986	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
987		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
988
989	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
990		return kvm_s390_inject_program_int(vcpu,
991						   PGM_SPECIFICATION);
992
993	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
994	vcpu->arch.sie_block->todpr = value;
995
996	return 0;
997}
998
999static const intercept_handler_t x01_handlers[256] = {
1000	[0x07] = handle_sckpf,
1001};
1002
1003int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1004{
1005	intercept_handler_t handler;
1006
1007	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1008	if (handler)
1009		return handler(vcpu);
1010	return -EOPNOTSUPP;
1011}
1012