lpar.c revision b0d436c739b0d4afcdfe2e97d4d1ee41ea2db62e
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20 */
21
22/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/console.h>
28#include <linux/export.h>
29#include <asm/processor.h>
30#include <asm/mmu.h>
31#include <asm/page.h>
32#include <asm/pgtable.h>
33#include <asm/machdep.h>
34#include <asm/mmu_context.h>
35#include <asm/iommu.h>
36#include <asm/tlbflush.h>
37#include <asm/tlb.h>
38#include <asm/prom.h>
39#include <asm/cputable.h>
40#include <asm/udbg.h>
41#include <asm/smp.h>
42#include <asm/trace.h>
43#include <asm/firmware.h>
44
45#include "plpar_wrappers.h"
46#include "pseries.h"
47
48/* Flag bits for H_BULK_REMOVE */
49#define HBR_REQUEST	0x4000000000000000UL
50#define HBR_RESPONSE	0x8000000000000000UL
51#define HBR_END		0xc000000000000000UL
52#define HBR_AVPN	0x0200000000000000UL
53#define HBR_ANDCOND	0x0100000000000000UL
54
55
56/* in hvCall.S */
57EXPORT_SYMBOL(plpar_hcall);
58EXPORT_SYMBOL(plpar_hcall9);
59EXPORT_SYMBOL(plpar_hcall_norets);
60
61extern void pSeries_find_serial_port(void);
62
63void vpa_init(int cpu)
64{
65	int hwcpu = get_hard_smp_processor_id(cpu);
66	unsigned long addr;
67	long ret;
68	struct paca_struct *pp;
69	struct dtl_entry *dtl;
70
71	if (cpu_has_feature(CPU_FTR_ALTIVEC))
72		lppaca_of(cpu).vmxregs_in_use = 1;
73
74	if (cpu_has_feature(CPU_FTR_ARCH_207S))
75		lppaca_of(cpu).ebb_regs_in_use = 1;
76
77	addr = __pa(&lppaca_of(cpu));
78	ret = register_vpa(hwcpu, addr);
79
80	if (ret) {
81		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
82		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
83		return;
84	}
85	/*
86	 * PAPR says this feature is SLB-Buffer but firmware never
87	 * reports that.  All SPLPAR support SLB shadow buffer.
88	 */
89	addr = __pa(&slb_shadow[cpu]);
90	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
91		ret = register_slb_shadow(hwcpu, addr);
92		if (ret)
93			pr_err("WARNING: SLB shadow buffer registration for "
94			       "cpu %d (hw %d) of area %lx failed with %ld\n",
95			       cpu, hwcpu, addr, ret);
96	}
97
98	/*
99	 * Register dispatch trace log, if one has been allocated.
100	 */
101	pp = &paca[cpu];
102	dtl = pp->dispatch_log;
103	if (dtl) {
104		pp->dtl_ridx = 0;
105		pp->dtl_curr = dtl;
106		lppaca_of(cpu).dtl_idx = 0;
107
108		/* hypervisor reads buffer length from this field */
109		dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
110		ret = register_dtl(hwcpu, __pa(dtl));
111		if (ret)
112			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
113			       "failed with %ld\n", smp_processor_id(),
114			       hwcpu, ret);
115		lppaca_of(cpu).dtl_enable_mask = 2;
116	}
117}
118
119static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
120				     unsigned long vpn, unsigned long pa,
121				     unsigned long rflags, unsigned long vflags,
122				     int psize, int apsize, int ssize)
123{
124	unsigned long lpar_rc;
125	unsigned long flags;
126	unsigned long slot;
127	unsigned long hpte_v, hpte_r;
128
129	if (!(vflags & HPTE_V_BOLTED))
130		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
131			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
132			 hpte_group, vpn,  pa, rflags, vflags, psize);
133
134	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
135	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
136
137	if (!(vflags & HPTE_V_BOLTED))
138		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
139
140	/* Now fill in the actual HPTE */
141	/* Set CEC cookie to 0         */
142	/* Zero page = 0               */
143	/* I-cache Invalidate = 0      */
144	/* I-cache synchronize = 0     */
145	/* Exact = 0                   */
146	flags = 0;
147
148	/* Make pHyp happy */
149	if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
150		hpte_r &= ~_PAGE_COHERENT;
151	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
152		flags |= H_COALESCE_CAND;
153
154	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
155	if (unlikely(lpar_rc == H_PTEG_FULL)) {
156		if (!(vflags & HPTE_V_BOLTED))
157			pr_devel(" full\n");
158		return -1;
159	}
160
161	/*
162	 * Since we try and ioremap PHBs we don't own, the pte insert
163	 * will fail. However we must catch the failure in hash_page
164	 * or we will loop forever, so return -2 in this case.
165	 */
166	if (unlikely(lpar_rc != H_SUCCESS)) {
167		if (!(vflags & HPTE_V_BOLTED))
168			pr_devel(" lpar err %ld\n", lpar_rc);
169		return -2;
170	}
171	if (!(vflags & HPTE_V_BOLTED))
172		pr_devel(" -> slot: %lu\n", slot & 7);
173
174	/* Because of iSeries, we have to pass down the secondary
175	 * bucket bit here as well
176	 */
177	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
178}
179
180static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
181
182static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
183{
184	unsigned long slot_offset;
185	unsigned long lpar_rc;
186	int i;
187	unsigned long dummy1, dummy2;
188
189	/* pick a random slot to start at */
190	slot_offset = mftb() & 0x7;
191
192	for (i = 0; i < HPTES_PER_GROUP; i++) {
193
194		/* don't remove a bolted entry */
195		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
196					   (0x1UL << 4), &dummy1, &dummy2);
197		if (lpar_rc == H_SUCCESS)
198			return i;
199
200		/*
201		 * The test for adjunct partition is performed before the
202		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
203		 * check for that as well.
204		 */
205		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
206
207		slot_offset++;
208		slot_offset &= 0x7;
209	}
210
211	return -1;
212}
213
214static void pSeries_lpar_hptab_clear(void)
215{
216	unsigned long size_bytes = 1UL << ppc64_pft_size;
217	unsigned long hpte_count = size_bytes >> 4;
218	struct {
219		unsigned long pteh;
220		unsigned long ptel;
221	} ptes[4];
222	long lpar_rc;
223	unsigned long i, j;
224
225	/* Read in batches of 4,
226	 * invalidate only valid entries not in the VRMA
227	 * hpte_count will be a multiple of 4
228         */
229	for (i = 0; i < hpte_count; i += 4) {
230		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
231		if (lpar_rc != H_SUCCESS)
232			continue;
233		for (j = 0; j < 4; j++){
234			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
235				HPTE_V_VRMA_MASK)
236				continue;
237			if (ptes[j].pteh & HPTE_V_VALID)
238				plpar_pte_remove_raw(0, i + j, 0,
239					&(ptes[j].pteh), &(ptes[j].ptel));
240		}
241	}
242}
243
244/*
245 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
246 * the low 3 bits of flags happen to line up.  So no transform is needed.
247 * We can probably optimize here and assume the high bits of newpp are
248 * already zero.  For now I am paranoid.
249 */
250static long pSeries_lpar_hpte_updatepp(unsigned long slot,
251				       unsigned long newpp,
252				       unsigned long vpn,
253				       int psize, int apsize,
254				       int ssize, int local)
255{
256	unsigned long lpar_rc;
257	unsigned long flags = (newpp & 7) | H_AVPN;
258	unsigned long want_v;
259
260	want_v = hpte_encode_avpn(vpn, psize, ssize);
261
262	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
263		 want_v, slot, flags, psize);
264
265	lpar_rc = plpar_pte_protect(flags, slot, want_v);
266
267	if (lpar_rc == H_NOT_FOUND) {
268		pr_devel("not found !\n");
269		return -1;
270	}
271
272	pr_devel("ok\n");
273
274	BUG_ON(lpar_rc != H_SUCCESS);
275
276	return 0;
277}
278
279static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
280{
281	unsigned long dword0;
282	unsigned long lpar_rc;
283	unsigned long dummy_word1;
284	unsigned long flags;
285
286	/* Read 1 pte at a time                        */
287	/* Do not need RPN to logical page translation */
288	/* No cross CEC PFT access                     */
289	flags = 0;
290
291	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
292
293	BUG_ON(lpar_rc != H_SUCCESS);
294
295	return dword0;
296}
297
298static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
299{
300	unsigned long hash;
301	unsigned long i;
302	long slot;
303	unsigned long want_v, hpte_v;
304
305	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
306	want_v = hpte_encode_avpn(vpn, psize, ssize);
307
308	/* Bolted entries are always in the primary group */
309	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
310	for (i = 0; i < HPTES_PER_GROUP; i++) {
311		hpte_v = pSeries_lpar_hpte_getword0(slot);
312
313		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
314			/* HPTE matches */
315			return slot;
316		++slot;
317	}
318
319	return -1;
320}
321
322static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
323					     unsigned long ea,
324					     int psize, int ssize)
325{
326	unsigned long vpn;
327	unsigned long lpar_rc, slot, vsid, flags;
328
329	vsid = get_kernel_vsid(ea, ssize);
330	vpn = hpt_vpn(ea, vsid, ssize);
331
332	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
333	BUG_ON(slot == -1);
334
335	flags = newpp & 7;
336	lpar_rc = plpar_pte_protect(flags, slot, 0);
337
338	BUG_ON(lpar_rc != H_SUCCESS);
339}
340
341static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
342					 int psize, int apsize,
343					 int ssize, int local)
344{
345	unsigned long want_v;
346	unsigned long lpar_rc;
347	unsigned long dummy1, dummy2;
348
349	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
350		 slot, vpn, psize, local);
351
352	want_v = hpte_encode_avpn(vpn, psize, ssize);
353	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
354	if (lpar_rc == H_NOT_FOUND)
355		return;
356
357	BUG_ON(lpar_rc != H_SUCCESS);
358}
359
360/*
361 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
362 * to make sure that we avoid bouncing the hypervisor tlbie lock.
363 */
364#define PPC64_HUGE_HPTE_BATCH 12
365
366static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
367					     unsigned long *vpn, int count,
368					     int psize, int ssize)
369{
370	unsigned long param[8];
371	int i = 0, pix = 0, rc;
372	unsigned long flags = 0;
373	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
374
375	if (lock_tlbie)
376		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
377
378	for (i = 0; i < count; i++) {
379
380		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
381			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
382						     ssize, 0);
383		} else {
384			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
385			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
386			pix += 2;
387			if (pix == 8) {
388				rc = plpar_hcall9(H_BULK_REMOVE, param,
389						  param[0], param[1], param[2],
390						  param[3], param[4], param[5],
391						  param[6], param[7]);
392				BUG_ON(rc != H_SUCCESS);
393				pix = 0;
394			}
395		}
396	}
397	if (pix) {
398		param[pix] = HBR_END;
399		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
400				  param[2], param[3], param[4], param[5],
401				  param[6], param[7]);
402		BUG_ON(rc != H_SUCCESS);
403	}
404
405	if (lock_tlbie)
406		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
407}
408
409static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
410				       unsigned char *hpte_slot_array,
411				       unsigned long addr, int psize)
412{
413	int ssize = 0, i, index = 0;
414	unsigned long s_addr = addr;
415	unsigned int max_hpte_count, valid;
416	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
417	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
418	unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
419
420	shift = mmu_psize_defs[psize].shift;
421	max_hpte_count = 1U << (PMD_SHIFT - shift);
422
423	for (i = 0; i < max_hpte_count; i++) {
424		valid = hpte_valid(hpte_slot_array, i);
425		if (!valid)
426			continue;
427		hidx =  hpte_hash_index(hpte_slot_array, i);
428
429		/* get the vpn */
430		addr = s_addr + (i * (1ul << shift));
431		if (!is_kernel_addr(addr)) {
432			ssize = user_segment_size(addr);
433			vsid = get_vsid(mm->context.id, addr, ssize);
434			WARN_ON(vsid == 0);
435		} else {
436			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
437			ssize = mmu_kernel_ssize;
438		}
439
440		vpn = hpt_vpn(addr, vsid, ssize);
441		hash = hpt_hash(vpn, shift, ssize);
442		if (hidx & _PTEIDX_SECONDARY)
443			hash = ~hash;
444
445		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
446		slot += hidx & _PTEIDX_GROUP_IX;
447
448		slot_array[index] = slot;
449		vpn_array[index] = vpn;
450		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
451			/*
452			 * Now do a bluk invalidate
453			 */
454			__pSeries_lpar_hugepage_invalidate(slot_array,
455							   vpn_array,
456							   PPC64_HUGE_HPTE_BATCH,
457							   psize, ssize);
458			index = 0;
459		} else
460			index++;
461	}
462	if (index)
463		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
464						   index, psize, ssize);
465}
466
467static void pSeries_lpar_hpte_removebolted(unsigned long ea,
468					   int psize, int ssize)
469{
470	unsigned long vpn;
471	unsigned long slot, vsid;
472
473	vsid = get_kernel_vsid(ea, ssize);
474	vpn = hpt_vpn(ea, vsid, ssize);
475
476	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
477	BUG_ON(slot == -1);
478	/*
479	 * lpar doesn't use the passed actual page size
480	 */
481	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
482}
483
484/*
485 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
486 * lock.
487 */
488static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
489{
490	unsigned long vpn;
491	unsigned long i, pix, rc;
492	unsigned long flags = 0;
493	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
494	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
495	unsigned long param[9];
496	unsigned long hash, index, shift, hidx, slot;
497	real_pte_t pte;
498	int psize, ssize;
499
500	if (lock_tlbie)
501		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
502
503	psize = batch->psize;
504	ssize = batch->ssize;
505	pix = 0;
506	for (i = 0; i < number; i++) {
507		vpn = batch->vpn[i];
508		pte = batch->pte[i];
509		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
510			hash = hpt_hash(vpn, shift, ssize);
511			hidx = __rpte_to_hidx(pte, index);
512			if (hidx & _PTEIDX_SECONDARY)
513				hash = ~hash;
514			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
515			slot += hidx & _PTEIDX_GROUP_IX;
516			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
517				/*
518				 * lpar doesn't use the passed actual page size
519				 */
520				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
521							     0, ssize, local);
522			} else {
523				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
524				param[pix+1] = hpte_encode_avpn(vpn, psize,
525								ssize);
526				pix += 2;
527				if (pix == 8) {
528					rc = plpar_hcall9(H_BULK_REMOVE, param,
529						param[0], param[1], param[2],
530						param[3], param[4], param[5],
531						param[6], param[7]);
532					BUG_ON(rc != H_SUCCESS);
533					pix = 0;
534				}
535			}
536		} pte_iterate_hashed_end();
537	}
538	if (pix) {
539		param[pix] = HBR_END;
540		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
541				  param[2], param[3], param[4], param[5],
542				  param[6], param[7]);
543		BUG_ON(rc != H_SUCCESS);
544	}
545
546	if (lock_tlbie)
547		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
548}
549
550static int __init disable_bulk_remove(char *str)
551{
552	if (strcmp(str, "off") == 0 &&
553	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
554			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
555			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
556	}
557	return 1;
558}
559
560__setup("bulk_remove=", disable_bulk_remove);
561
562void __init hpte_init_lpar(void)
563{
564	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
565	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
566	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
567	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
568	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
569	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
570	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
571	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
572	ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
573}
574
575#ifdef CONFIG_PPC_SMLPAR
576#define CMO_FREE_HINT_DEFAULT 1
577static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
578
579static int __init cmo_free_hint(char *str)
580{
581	char *parm;
582	parm = strstrip(str);
583
584	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
585		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
586		cmo_free_hint_flag = 0;
587		return 1;
588	}
589
590	cmo_free_hint_flag = 1;
591	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
592
593	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
594		return 1;
595
596	return 0;
597}
598
599__setup("cmo_free_hint=", cmo_free_hint);
600
601static void pSeries_set_page_state(struct page *page, int order,
602				   unsigned long state)
603{
604	int i, j;
605	unsigned long cmo_page_sz, addr;
606
607	cmo_page_sz = cmo_get_page_size();
608	addr = __pa((unsigned long)page_address(page));
609
610	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
611		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
612			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
613	}
614}
615
616void arch_free_page(struct page *page, int order)
617{
618	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
619		return;
620
621	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
622}
623EXPORT_SYMBOL(arch_free_page);
624
625#endif
626
627#ifdef CONFIG_TRACEPOINTS
628/*
629 * We optimise our hcall path by placing hcall_tracepoint_refcount
630 * directly in the TOC so we can check if the hcall tracepoints are
631 * enabled via a single load.
632 */
633
634/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
635extern long hcall_tracepoint_refcount;
636
637/*
638 * Since the tracing code might execute hcalls we need to guard against
639 * recursion. One example of this are spinlocks calling H_YIELD on
640 * shared processor partitions.
641 */
642static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
643
644void hcall_tracepoint_regfunc(void)
645{
646	hcall_tracepoint_refcount++;
647}
648
649void hcall_tracepoint_unregfunc(void)
650{
651	hcall_tracepoint_refcount--;
652}
653
654void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
655{
656	unsigned long flags;
657	unsigned int *depth;
658
659	/*
660	 * We cannot call tracepoints inside RCU idle regions which
661	 * means we must not trace H_CEDE.
662	 */
663	if (opcode == H_CEDE)
664		return;
665
666	local_irq_save(flags);
667
668	depth = &__get_cpu_var(hcall_trace_depth);
669
670	if (*depth)
671		goto out;
672
673	(*depth)++;
674	preempt_disable();
675	trace_hcall_entry(opcode, args);
676	(*depth)--;
677
678out:
679	local_irq_restore(flags);
680}
681
682void __trace_hcall_exit(long opcode, unsigned long retval,
683			unsigned long *retbuf)
684{
685	unsigned long flags;
686	unsigned int *depth;
687
688	if (opcode == H_CEDE)
689		return;
690
691	local_irq_save(flags);
692
693	depth = &__get_cpu_var(hcall_trace_depth);
694
695	if (*depth)
696		goto out;
697
698	(*depth)++;
699	trace_hcall_exit(opcode, retval, retbuf);
700	preempt_enable();
701	(*depth)--;
702
703out:
704	local_irq_restore(flags);
705}
706#endif
707
708/**
709 * h_get_mpp
710 * H_GET_MPP hcall returns info in 7 parms
711 */
712int h_get_mpp(struct hvcall_mpp_data *mpp_data)
713{
714	int rc;
715	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
716
717	rc = plpar_hcall9(H_GET_MPP, retbuf);
718
719	mpp_data->entitled_mem = retbuf[0];
720	mpp_data->mapped_mem = retbuf[1];
721
722	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
723	mpp_data->pool_num = retbuf[2] & 0xffff;
724
725	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
726	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
727	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
728
729	mpp_data->pool_size = retbuf[4];
730	mpp_data->loan_request = retbuf[5];
731	mpp_data->backing_mem = retbuf[6];
732
733	return rc;
734}
735EXPORT_SYMBOL(h_get_mpp);
736
737int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
738{
739	int rc;
740	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
741
742	rc = plpar_hcall9(H_GET_MPP_X, retbuf);
743
744	mpp_x_data->coalesced_bytes = retbuf[0];
745	mpp_x_data->pool_coalesced_bytes = retbuf[1];
746	mpp_x_data->pool_purr_cycles = retbuf[2];
747	mpp_x_data->pool_spurr_cycles = retbuf[3];
748
749	return rc;
750}
751