lpar.c revision 23f66e2d661b4d3226d16e25910a9e9472ce2410
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20 */
21
22/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/console.h>
28#include <linux/export.h>
29#include <linux/static_key.h>
30#include <asm/processor.h>
31#include <asm/mmu.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/machdep.h>
35#include <asm/mmu_context.h>
36#include <asm/iommu.h>
37#include <asm/tlbflush.h>
38#include <asm/tlb.h>
39#include <asm/prom.h>
40#include <asm/cputable.h>
41#include <asm/udbg.h>
42#include <asm/smp.h>
43#include <asm/trace.h>
44#include <asm/firmware.h>
45#include <asm/plpar_wrappers.h>
46
47#include "pseries.h"
48
49/* Flag bits for H_BULK_REMOVE */
50#define HBR_REQUEST	0x4000000000000000UL
51#define HBR_RESPONSE	0x8000000000000000UL
52#define HBR_END		0xc000000000000000UL
53#define HBR_AVPN	0x0200000000000000UL
54#define HBR_ANDCOND	0x0100000000000000UL
55
56
57/* in hvCall.S */
58EXPORT_SYMBOL(plpar_hcall);
59EXPORT_SYMBOL(plpar_hcall9);
60EXPORT_SYMBOL(plpar_hcall_norets);
61
62extern void pSeries_find_serial_port(void);
63
64void vpa_init(int cpu)
65{
66	int hwcpu = get_hard_smp_processor_id(cpu);
67	unsigned long addr;
68	long ret;
69	struct paca_struct *pp;
70	struct dtl_entry *dtl;
71
72	/*
73	 * The spec says it "may be problematic" if CPU x registers the VPA of
74	 * CPU y. We should never do that, but wail if we ever do.
75	 */
76	WARN_ON(cpu != smp_processor_id());
77
78	if (cpu_has_feature(CPU_FTR_ALTIVEC))
79		lppaca_of(cpu).vmxregs_in_use = 1;
80
81	if (cpu_has_feature(CPU_FTR_ARCH_207S))
82		lppaca_of(cpu).ebb_regs_in_use = 1;
83
84	addr = __pa(&lppaca_of(cpu));
85	ret = register_vpa(hwcpu, addr);
86
87	if (ret) {
88		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
89		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
90		return;
91	}
92	/*
93	 * PAPR says this feature is SLB-Buffer but firmware never
94	 * reports that.  All SPLPAR support SLB shadow buffer.
95	 */
96	addr = __pa(paca[cpu].slb_shadow_ptr);
97	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
98		ret = register_slb_shadow(hwcpu, addr);
99		if (ret)
100			pr_err("WARNING: SLB shadow buffer registration for "
101			       "cpu %d (hw %d) of area %lx failed with %ld\n",
102			       cpu, hwcpu, addr, ret);
103	}
104
105	/*
106	 * Register dispatch trace log, if one has been allocated.
107	 */
108	pp = &paca[cpu];
109	dtl = pp->dispatch_log;
110	if (dtl) {
111		pp->dtl_ridx = 0;
112		pp->dtl_curr = dtl;
113		lppaca_of(cpu).dtl_idx = 0;
114
115		/* hypervisor reads buffer length from this field */
116		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
117		ret = register_dtl(hwcpu, __pa(dtl));
118		if (ret)
119			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
120			       "failed with %ld\n", smp_processor_id(),
121			       hwcpu, ret);
122		lppaca_of(cpu).dtl_enable_mask = 2;
123	}
124}
125
126static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
127				     unsigned long vpn, unsigned long pa,
128				     unsigned long rflags, unsigned long vflags,
129				     int psize, int apsize, int ssize)
130{
131	unsigned long lpar_rc;
132	unsigned long flags;
133	unsigned long slot;
134	unsigned long hpte_v, hpte_r;
135
136	if (!(vflags & HPTE_V_BOLTED))
137		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
138			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
139			 hpte_group, vpn,  pa, rflags, vflags, psize);
140
141	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
142	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
143
144	if (!(vflags & HPTE_V_BOLTED))
145		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
146
147	/* Now fill in the actual HPTE */
148	/* Set CEC cookie to 0         */
149	/* Zero page = 0               */
150	/* I-cache Invalidate = 0      */
151	/* I-cache synchronize = 0     */
152	/* Exact = 0                   */
153	flags = 0;
154
155	/* Make pHyp happy */
156	if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
157		hpte_r &= ~HPTE_R_M;
158
159	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
160		flags |= H_COALESCE_CAND;
161
162	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
163	if (unlikely(lpar_rc == H_PTEG_FULL)) {
164		if (!(vflags & HPTE_V_BOLTED))
165			pr_devel(" full\n");
166		return -1;
167	}
168
169	/*
170	 * Since we try and ioremap PHBs we don't own, the pte insert
171	 * will fail. However we must catch the failure in hash_page
172	 * or we will loop forever, so return -2 in this case.
173	 */
174	if (unlikely(lpar_rc != H_SUCCESS)) {
175		if (!(vflags & HPTE_V_BOLTED))
176			pr_devel(" lpar err %ld\n", lpar_rc);
177		return -2;
178	}
179	if (!(vflags & HPTE_V_BOLTED))
180		pr_devel(" -> slot: %lu\n", slot & 7);
181
182	/* Because of iSeries, we have to pass down the secondary
183	 * bucket bit here as well
184	 */
185	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
186}
187
188static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
189
190static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
191{
192	unsigned long slot_offset;
193	unsigned long lpar_rc;
194	int i;
195	unsigned long dummy1, dummy2;
196
197	/* pick a random slot to start at */
198	slot_offset = mftb() & 0x7;
199
200	for (i = 0; i < HPTES_PER_GROUP; i++) {
201
202		/* don't remove a bolted entry */
203		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
204					   (0x1UL << 4), &dummy1, &dummy2);
205		if (lpar_rc == H_SUCCESS)
206			return i;
207
208		/*
209		 * The test for adjunct partition is performed before the
210		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
211		 * check for that as well.
212		 */
213		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
214
215		slot_offset++;
216		slot_offset &= 0x7;
217	}
218
219	return -1;
220}
221
222static void pSeries_lpar_hptab_clear(void)
223{
224	unsigned long size_bytes = 1UL << ppc64_pft_size;
225	unsigned long hpte_count = size_bytes >> 4;
226	struct {
227		unsigned long pteh;
228		unsigned long ptel;
229	} ptes[4];
230	long lpar_rc;
231	unsigned long i, j;
232
233	/* Read in batches of 4,
234	 * invalidate only valid entries not in the VRMA
235	 * hpte_count will be a multiple of 4
236         */
237	for (i = 0; i < hpte_count; i += 4) {
238		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
239		if (lpar_rc != H_SUCCESS)
240			continue;
241		for (j = 0; j < 4; j++){
242			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
243				HPTE_V_VRMA_MASK)
244				continue;
245			if (ptes[j].pteh & HPTE_V_VALID)
246				plpar_pte_remove_raw(0, i + j, 0,
247					&(ptes[j].pteh), &(ptes[j].ptel));
248		}
249	}
250
251#ifdef __LITTLE_ENDIAN__
252	/* Reset exceptions to big endian */
253	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
254		long rc;
255
256		rc = pseries_big_endian_exceptions();
257		/*
258		 * At this point it is unlikely panic() will get anything
259		 * out to the user, but at least this will stop us from
260		 * continuing on further and creating an even more
261		 * difficult to debug situation.
262		 */
263		if (rc)
264			panic("Could not enable big endian exceptions");
265	}
266#endif
267}
268
269/*
270 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
271 * the low 3 bits of flags happen to line up.  So no transform is needed.
272 * We can probably optimize here and assume the high bits of newpp are
273 * already zero.  For now I am paranoid.
274 */
275static long pSeries_lpar_hpte_updatepp(unsigned long slot,
276				       unsigned long newpp,
277				       unsigned long vpn,
278				       int psize, int apsize,
279				       int ssize, int local)
280{
281	unsigned long lpar_rc;
282	unsigned long flags = (newpp & 7) | H_AVPN;
283	unsigned long want_v;
284
285	want_v = hpte_encode_avpn(vpn, psize, ssize);
286
287	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
288		 want_v, slot, flags, psize);
289
290	lpar_rc = plpar_pte_protect(flags, slot, want_v);
291
292	if (lpar_rc == H_NOT_FOUND) {
293		pr_devel("not found !\n");
294		return -1;
295	}
296
297	pr_devel("ok\n");
298
299	BUG_ON(lpar_rc != H_SUCCESS);
300
301	return 0;
302}
303
304static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
305{
306	unsigned long dword0;
307	unsigned long lpar_rc;
308	unsigned long dummy_word1;
309	unsigned long flags;
310
311	/* Read 1 pte at a time                        */
312	/* Do not need RPN to logical page translation */
313	/* No cross CEC PFT access                     */
314	flags = 0;
315
316	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
317
318	BUG_ON(lpar_rc != H_SUCCESS);
319
320	return dword0;
321}
322
323static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
324{
325	unsigned long hash;
326	unsigned long i;
327	long slot;
328	unsigned long want_v, hpte_v;
329
330	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
331	want_v = hpte_encode_avpn(vpn, psize, ssize);
332
333	/* Bolted entries are always in the primary group */
334	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
335	for (i = 0; i < HPTES_PER_GROUP; i++) {
336		hpte_v = pSeries_lpar_hpte_getword0(slot);
337
338		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
339			/* HPTE matches */
340			return slot;
341		++slot;
342	}
343
344	return -1;
345}
346
347static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
348					     unsigned long ea,
349					     int psize, int ssize)
350{
351	unsigned long vpn;
352	unsigned long lpar_rc, slot, vsid, flags;
353
354	vsid = get_kernel_vsid(ea, ssize);
355	vpn = hpt_vpn(ea, vsid, ssize);
356
357	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
358	BUG_ON(slot == -1);
359
360	flags = newpp & 7;
361	lpar_rc = plpar_pte_protect(flags, slot, 0);
362
363	BUG_ON(lpar_rc != H_SUCCESS);
364}
365
366static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
367					 int psize, int apsize,
368					 int ssize, int local)
369{
370	unsigned long want_v;
371	unsigned long lpar_rc;
372	unsigned long dummy1, dummy2;
373
374	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
375		 slot, vpn, psize, local);
376
377	want_v = hpte_encode_avpn(vpn, psize, ssize);
378	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
379	if (lpar_rc == H_NOT_FOUND)
380		return;
381
382	BUG_ON(lpar_rc != H_SUCCESS);
383}
384
385/*
386 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
387 * to make sure that we avoid bouncing the hypervisor tlbie lock.
388 */
389#define PPC64_HUGE_HPTE_BATCH 12
390
391static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
392					     unsigned long *vpn, int count,
393					     int psize, int ssize)
394{
395	unsigned long param[8];
396	int i = 0, pix = 0, rc;
397	unsigned long flags = 0;
398	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
399
400	if (lock_tlbie)
401		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
402
403	for (i = 0; i < count; i++) {
404
405		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
406			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
407						     ssize, 0);
408		} else {
409			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
410			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
411			pix += 2;
412			if (pix == 8) {
413				rc = plpar_hcall9(H_BULK_REMOVE, param,
414						  param[0], param[1], param[2],
415						  param[3], param[4], param[5],
416						  param[6], param[7]);
417				BUG_ON(rc != H_SUCCESS);
418				pix = 0;
419			}
420		}
421	}
422	if (pix) {
423		param[pix] = HBR_END;
424		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
425				  param[2], param[3], param[4], param[5],
426				  param[6], param[7]);
427		BUG_ON(rc != H_SUCCESS);
428	}
429
430	if (lock_tlbie)
431		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
432}
433
434static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
435					     unsigned long addr,
436					     unsigned char *hpte_slot_array,
437					     int psize, int ssize)
438{
439	int i, index = 0;
440	unsigned long s_addr = addr;
441	unsigned int max_hpte_count, valid;
442	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
443	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
444	unsigned long shift, hidx, vpn = 0, hash, slot;
445
446	shift = mmu_psize_defs[psize].shift;
447	max_hpte_count = 1U << (PMD_SHIFT - shift);
448
449	for (i = 0; i < max_hpte_count; i++) {
450		valid = hpte_valid(hpte_slot_array, i);
451		if (!valid)
452			continue;
453		hidx =  hpte_hash_index(hpte_slot_array, i);
454
455		/* get the vpn */
456		addr = s_addr + (i * (1ul << shift));
457		vpn = hpt_vpn(addr, vsid, ssize);
458		hash = hpt_hash(vpn, shift, ssize);
459		if (hidx & _PTEIDX_SECONDARY)
460			hash = ~hash;
461
462		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
463		slot += hidx & _PTEIDX_GROUP_IX;
464
465		slot_array[index] = slot;
466		vpn_array[index] = vpn;
467		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
468			/*
469			 * Now do a bluk invalidate
470			 */
471			__pSeries_lpar_hugepage_invalidate(slot_array,
472							   vpn_array,
473							   PPC64_HUGE_HPTE_BATCH,
474							   psize, ssize);
475			index = 0;
476		} else
477			index++;
478	}
479	if (index)
480		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
481						   index, psize, ssize);
482}
483
484static void pSeries_lpar_hpte_removebolted(unsigned long ea,
485					   int psize, int ssize)
486{
487	unsigned long vpn;
488	unsigned long slot, vsid;
489
490	vsid = get_kernel_vsid(ea, ssize);
491	vpn = hpt_vpn(ea, vsid, ssize);
492
493	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
494	BUG_ON(slot == -1);
495	/*
496	 * lpar doesn't use the passed actual page size
497	 */
498	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
499}
500
501/*
502 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
503 * lock.
504 */
505static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
506{
507	unsigned long vpn;
508	unsigned long i, pix, rc;
509	unsigned long flags = 0;
510	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
511	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
512	unsigned long param[9];
513	unsigned long hash, index, shift, hidx, slot;
514	real_pte_t pte;
515	int psize, ssize;
516
517	if (lock_tlbie)
518		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
519
520	psize = batch->psize;
521	ssize = batch->ssize;
522	pix = 0;
523	for (i = 0; i < number; i++) {
524		vpn = batch->vpn[i];
525		pte = batch->pte[i];
526		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
527			hash = hpt_hash(vpn, shift, ssize);
528			hidx = __rpte_to_hidx(pte, index);
529			if (hidx & _PTEIDX_SECONDARY)
530				hash = ~hash;
531			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
532			slot += hidx & _PTEIDX_GROUP_IX;
533			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
534				/*
535				 * lpar doesn't use the passed actual page size
536				 */
537				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
538							     0, ssize, local);
539			} else {
540				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
541				param[pix+1] = hpte_encode_avpn(vpn, psize,
542								ssize);
543				pix += 2;
544				if (pix == 8) {
545					rc = plpar_hcall9(H_BULK_REMOVE, param,
546						param[0], param[1], param[2],
547						param[3], param[4], param[5],
548						param[6], param[7]);
549					BUG_ON(rc != H_SUCCESS);
550					pix = 0;
551				}
552			}
553		} pte_iterate_hashed_end();
554	}
555	if (pix) {
556		param[pix] = HBR_END;
557		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
558				  param[2], param[3], param[4], param[5],
559				  param[6], param[7]);
560		BUG_ON(rc != H_SUCCESS);
561	}
562
563	if (lock_tlbie)
564		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
565}
566
567static int __init disable_bulk_remove(char *str)
568{
569	if (strcmp(str, "off") == 0 &&
570	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
571			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
572			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
573	}
574	return 1;
575}
576
577__setup("bulk_remove=", disable_bulk_remove);
578
579void __init hpte_init_lpar(void)
580{
581	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
582	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
583	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
584	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
585	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
586	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
587	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
588	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
589	ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
590}
591
592#ifdef CONFIG_PPC_SMLPAR
593#define CMO_FREE_HINT_DEFAULT 1
594static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
595
596static int __init cmo_free_hint(char *str)
597{
598	char *parm;
599	parm = strstrip(str);
600
601	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
602		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
603		cmo_free_hint_flag = 0;
604		return 1;
605	}
606
607	cmo_free_hint_flag = 1;
608	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
609
610	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
611		return 1;
612
613	return 0;
614}
615
616__setup("cmo_free_hint=", cmo_free_hint);
617
618static void pSeries_set_page_state(struct page *page, int order,
619				   unsigned long state)
620{
621	int i, j;
622	unsigned long cmo_page_sz, addr;
623
624	cmo_page_sz = cmo_get_page_size();
625	addr = __pa((unsigned long)page_address(page));
626
627	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
628		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
629			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
630	}
631}
632
633void arch_free_page(struct page *page, int order)
634{
635	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
636		return;
637
638	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
639}
640EXPORT_SYMBOL(arch_free_page);
641
642#endif
643
644#ifdef CONFIG_TRACEPOINTS
645#ifdef CONFIG_JUMP_LABEL
646struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
647
648void hcall_tracepoint_regfunc(void)
649{
650	static_key_slow_inc(&hcall_tracepoint_key);
651}
652
653void hcall_tracepoint_unregfunc(void)
654{
655	static_key_slow_dec(&hcall_tracepoint_key);
656}
657#else
658/*
659 * We optimise our hcall path by placing hcall_tracepoint_refcount
660 * directly in the TOC so we can check if the hcall tracepoints are
661 * enabled via a single load.
662 */
663
664/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
665extern long hcall_tracepoint_refcount;
666
667void hcall_tracepoint_regfunc(void)
668{
669	hcall_tracepoint_refcount++;
670}
671
672void hcall_tracepoint_unregfunc(void)
673{
674	hcall_tracepoint_refcount--;
675}
676#endif
677
678/*
679 * Since the tracing code might execute hcalls we need to guard against
680 * recursion. One example of this are spinlocks calling H_YIELD on
681 * shared processor partitions.
682 */
683static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
684
685
686void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
687{
688	unsigned long flags;
689	unsigned int *depth;
690
691	/*
692	 * We cannot call tracepoints inside RCU idle regions which
693	 * means we must not trace H_CEDE.
694	 */
695	if (opcode == H_CEDE)
696		return;
697
698	local_irq_save(flags);
699
700	depth = &__get_cpu_var(hcall_trace_depth);
701
702	if (*depth)
703		goto out;
704
705	(*depth)++;
706	preempt_disable();
707	trace_hcall_entry(opcode, args);
708	(*depth)--;
709
710out:
711	local_irq_restore(flags);
712}
713
714void __trace_hcall_exit(long opcode, unsigned long retval,
715			unsigned long *retbuf)
716{
717	unsigned long flags;
718	unsigned int *depth;
719
720	if (opcode == H_CEDE)
721		return;
722
723	local_irq_save(flags);
724
725	depth = &__get_cpu_var(hcall_trace_depth);
726
727	if (*depth)
728		goto out;
729
730	(*depth)++;
731	trace_hcall_exit(opcode, retval, retbuf);
732	preempt_enable();
733	(*depth)--;
734
735out:
736	local_irq_restore(flags);
737}
738#endif
739
740/**
741 * h_get_mpp
742 * H_GET_MPP hcall returns info in 7 parms
743 */
744int h_get_mpp(struct hvcall_mpp_data *mpp_data)
745{
746	int rc;
747	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
748
749	rc = plpar_hcall9(H_GET_MPP, retbuf);
750
751	mpp_data->entitled_mem = retbuf[0];
752	mpp_data->mapped_mem = retbuf[1];
753
754	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
755	mpp_data->pool_num = retbuf[2] & 0xffff;
756
757	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
758	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
759	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
760
761	mpp_data->pool_size = retbuf[4];
762	mpp_data->loan_request = retbuf[5];
763	mpp_data->backing_mem = retbuf[6];
764
765	return rc;
766}
767EXPORT_SYMBOL(h_get_mpp);
768
769int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
770{
771	int rc;
772	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
773
774	rc = plpar_hcall9(H_GET_MPP_X, retbuf);
775
776	mpp_x_data->coalesced_bytes = retbuf[0];
777	mpp_x_data->pool_coalesced_bytes = retbuf[1];
778	mpp_x_data->pool_purr_cycles = retbuf[2];
779	mpp_x_data->pool_spurr_cycles = retbuf[3];
780
781	return rc;
782}
783