1/*
2 * SN Platform GRU Driver
3 *
4 *            DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
6 *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
7 *
8 *  This program is free software; you can redistribute it and/or modify
9 *  it under the terms of the GNU General Public License as published by
10 *  the Free Software Foundation; either version 2 of the License, or
11 *  (at your option) any later version.
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 *  GNU General Public License for more details.
17 *
18 *  You should have received a copy of the GNU General Public License
19 *  along with this program; if not, write to the Free Software
20 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/device.h>
29#include <linux/list.h>
30#include <linux/err.h>
31#include <linux/prefetch.h>
32#include <asm/uv/uv_hub.h>
33#include "gru.h"
34#include "grutables.h"
35#include "gruhandles.h"
36
37unsigned long gru_options __read_mostly;
38
39static struct device_driver gru_driver = {
40	.name = "gru"
41};
42
43static struct device gru_device = {
44	.init_name = "",
45	.driver = &gru_driver,
46};
47
48struct device *grudev = &gru_device;
49
50/*
51 * Select a gru fault map to be used by the current cpu. Note that
52 * multiple cpus may be using the same map.
53 *	ZZZ should be inline but did not work on emulator
54 */
55int gru_cpu_fault_map_id(void)
56{
57#ifdef CONFIG_IA64
58	return uv_blade_processor_id() % GRU_NUM_TFM;
59#else
60	int cpu = smp_processor_id();
61	int id, core;
62
63	core = uv_cpu_core_number(cpu);
64	id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
65	return id;
66#endif
67}
68
69/*--------- ASID Management -------------------------------------------
70 *
71 *  Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
72 *  Once MAX is reached, flush the TLB & start over. However,
73 *  some asids may still be in use. There won't be many (percentage wise) still
74 *  in use. Search active contexts & determine the value of the first
75 *  asid in use ("x"s below). Set "limit" to this value.
76 *  This defines a block of assignable asids.
77 *
78 *  When "limit" is reached, search forward from limit+1 and determine the
79 *  next block of assignable asids.
80 *
81 *  Repeat until MAX_ASID is reached, then start over again.
82 *
83 *  Each time MAX_ASID is reached, increment the asid generation. Since
84 *  the search for in-use asids only checks contexts with GRUs currently
85 *  assigned, asids in some contexts will be missed. Prior to loading
86 *  a context, the asid generation of the GTS asid is rechecked. If it
87 *  doesn't match the current generation, a new asid will be assigned.
88 *
89 *   	0---------------x------------x---------------------x----|
90 *	  ^-next	^-limit	   				^-MAX_ASID
91 *
92 * All asid manipulation & context loading/unloading is protected by the
93 * gs_lock.
94 */
95
96/* Hit the asid limit. Start over */
97static int gru_wrap_asid(struct gru_state *gru)
98{
99	gru_dbg(grudev, "gid %d\n", gru->gs_gid);
100	STAT(asid_wrap);
101	gru->gs_asid_gen++;
102	return MIN_ASID;
103}
104
105/* Find the next chunk of unused asids */
106static int gru_reset_asid_limit(struct gru_state *gru, int asid)
107{
108	int i, gid, inuse_asid, limit;
109
110	gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
111	STAT(asid_next);
112	limit = MAX_ASID;
113	if (asid >= limit)
114		asid = gru_wrap_asid(gru);
115	gru_flush_all_tlb(gru);
116	gid = gru->gs_gid;
117again:
118	for (i = 0; i < GRU_NUM_CCH; i++) {
119		if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
120			continue;
121		inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
122		gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
123			gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
124			inuse_asid, i);
125		if (inuse_asid == asid) {
126			asid += ASID_INC;
127			if (asid >= limit) {
128				/*
129				 * empty range: reset the range limit and
130				 * start over
131				 */
132				limit = MAX_ASID;
133				if (asid >= MAX_ASID)
134					asid = gru_wrap_asid(gru);
135				goto again;
136			}
137		}
138
139		if ((inuse_asid > asid) && (inuse_asid < limit))
140			limit = inuse_asid;
141	}
142	gru->gs_asid_limit = limit;
143	gru->gs_asid = asid;
144	gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
145					asid, limit);
146	return asid;
147}
148
149/* Assign a new ASID to a thread context.  */
150static int gru_assign_asid(struct gru_state *gru)
151{
152	int asid;
153
154	gru->gs_asid += ASID_INC;
155	asid = gru->gs_asid;
156	if (asid >= gru->gs_asid_limit)
157		asid = gru_reset_asid_limit(gru, asid);
158
159	gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
160	return asid;
161}
162
163/*
164 * Clear n bits in a word. Return a word indicating the bits that were cleared.
165 * Optionally, build an array of chars that contain the bit numbers allocated.
166 */
167static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
168				       char *idx)
169{
170	unsigned long bits = 0;
171	int i;
172
173	while (n--) {
174		i = find_first_bit(p, mmax);
175		if (i == mmax)
176			BUG();
177		__clear_bit(i, p);
178		__set_bit(i, &bits);
179		if (idx)
180			*idx++ = i;
181	}
182	return bits;
183}
184
185unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
186				       char *cbmap)
187{
188	return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
189				 cbmap);
190}
191
192unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
193				       char *dsmap)
194{
195	return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
196				 dsmap);
197}
198
199static void reserve_gru_resources(struct gru_state *gru,
200				  struct gru_thread_state *gts)
201{
202	gru->gs_active_contexts++;
203	gts->ts_cbr_map =
204	    gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
205				     gts->ts_cbr_idx);
206	gts->ts_dsr_map =
207	    gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
208}
209
210static void free_gru_resources(struct gru_state *gru,
211			       struct gru_thread_state *gts)
212{
213	gru->gs_active_contexts--;
214	gru->gs_cbr_map |= gts->ts_cbr_map;
215	gru->gs_dsr_map |= gts->ts_dsr_map;
216}
217
218/*
219 * Check if a GRU has sufficient free resources to satisfy an allocation
220 * request. Note: GRU locks may or may not be held when this is called. If
221 * not held, recheck after acquiring the appropriate locks.
222 *
223 * Returns 1 if sufficient resources, 0 if not
224 */
225static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
226			       int dsr_au_count, int max_active_contexts)
227{
228	return hweight64(gru->gs_cbr_map) >= cbr_au_count
229		&& hweight64(gru->gs_dsr_map) >= dsr_au_count
230		&& gru->gs_active_contexts < max_active_contexts;
231}
232
233/*
234 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
235 * context.
236 */
237static int gru_load_mm_tracker(struct gru_state *gru,
238					struct gru_thread_state *gts)
239{
240	struct gru_mm_struct *gms = gts->ts_gms;
241	struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
242	unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
243	int asid;
244
245	spin_lock(&gms->ms_asid_lock);
246	asid = asids->mt_asid;
247
248	spin_lock(&gru->gs_asid_lock);
249	if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
250			  gru->gs_asid_gen)) {
251		asid = gru_assign_asid(gru);
252		asids->mt_asid = asid;
253		asids->mt_asid_gen = gru->gs_asid_gen;
254		STAT(asid_new);
255	} else {
256		STAT(asid_reuse);
257	}
258	spin_unlock(&gru->gs_asid_lock);
259
260	BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
261	asids->mt_ctxbitmap |= ctxbitmap;
262	if (!test_bit(gru->gs_gid, gms->ms_asidmap))
263		__set_bit(gru->gs_gid, gms->ms_asidmap);
264	spin_unlock(&gms->ms_asid_lock);
265
266	gru_dbg(grudev,
267		"gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
268		gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
269		gms->ms_asidmap[0]);
270	return asid;
271}
272
273static void gru_unload_mm_tracker(struct gru_state *gru,
274					struct gru_thread_state *gts)
275{
276	struct gru_mm_struct *gms = gts->ts_gms;
277	struct gru_mm_tracker *asids;
278	unsigned short ctxbitmap;
279
280	asids = &gms->ms_asids[gru->gs_gid];
281	ctxbitmap = (1 << gts->ts_ctxnum);
282	spin_lock(&gms->ms_asid_lock);
283	spin_lock(&gru->gs_asid_lock);
284	BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
285	asids->mt_ctxbitmap ^= ctxbitmap;
286	gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
287		gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
288	spin_unlock(&gru->gs_asid_lock);
289	spin_unlock(&gms->ms_asid_lock);
290}
291
292/*
293 * Decrement the reference count on a GTS structure. Free the structure
294 * if the reference count goes to zero.
295 */
296void gts_drop(struct gru_thread_state *gts)
297{
298	if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
299		if (gts->ts_gms)
300			gru_drop_mmu_notifier(gts->ts_gms);
301		kfree(gts);
302		STAT(gts_free);
303	}
304}
305
306/*
307 * Locate the GTS structure for the current thread.
308 */
309static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
310			    *vdata, int tsid)
311{
312	struct gru_thread_state *gts;
313
314	list_for_each_entry(gts, &vdata->vd_head, ts_next)
315	    if (gts->ts_tsid == tsid)
316		return gts;
317	return NULL;
318}
319
320/*
321 * Allocate a thread state structure.
322 */
323struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
324		int cbr_au_count, int dsr_au_count,
325		unsigned char tlb_preload_count, int options, int tsid)
326{
327	struct gru_thread_state *gts;
328	struct gru_mm_struct *gms;
329	int bytes;
330
331	bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
332	bytes += sizeof(struct gru_thread_state);
333	gts = kmalloc(bytes, GFP_KERNEL);
334	if (!gts)
335		return ERR_PTR(-ENOMEM);
336
337	STAT(gts_alloc);
338	memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
339	atomic_set(&gts->ts_refcnt, 1);
340	mutex_init(&gts->ts_ctxlock);
341	gts->ts_cbr_au_count = cbr_au_count;
342	gts->ts_dsr_au_count = dsr_au_count;
343	gts->ts_tlb_preload_count = tlb_preload_count;
344	gts->ts_user_options = options;
345	gts->ts_user_blade_id = -1;
346	gts->ts_user_chiplet_id = -1;
347	gts->ts_tsid = tsid;
348	gts->ts_ctxnum = NULLCTX;
349	gts->ts_tlb_int_select = -1;
350	gts->ts_cch_req_slice = -1;
351	gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
352	if (vma) {
353		gts->ts_mm = current->mm;
354		gts->ts_vma = vma;
355		gms = gru_register_mmu_notifier();
356		if (IS_ERR(gms))
357			goto err;
358		gts->ts_gms = gms;
359	}
360
361	gru_dbg(grudev, "alloc gts %p\n", gts);
362	return gts;
363
364err:
365	gts_drop(gts);
366	return ERR_CAST(gms);
367}
368
369/*
370 * Allocate a vma private data structure.
371 */
372struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
373{
374	struct gru_vma_data *vdata = NULL;
375
376	vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
377	if (!vdata)
378		return NULL;
379
380	STAT(vdata_alloc);
381	INIT_LIST_HEAD(&vdata->vd_head);
382	spin_lock_init(&vdata->vd_lock);
383	gru_dbg(grudev, "alloc vdata %p\n", vdata);
384	return vdata;
385}
386
387/*
388 * Find the thread state structure for the current thread.
389 */
390struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
391					int tsid)
392{
393	struct gru_vma_data *vdata = vma->vm_private_data;
394	struct gru_thread_state *gts;
395
396	spin_lock(&vdata->vd_lock);
397	gts = gru_find_current_gts_nolock(vdata, tsid);
398	spin_unlock(&vdata->vd_lock);
399	gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
400	return gts;
401}
402
403/*
404 * Allocate a new thread state for a GSEG. Note that races may allow
405 * another thread to race to create a gts.
406 */
407struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
408					int tsid)
409{
410	struct gru_vma_data *vdata = vma->vm_private_data;
411	struct gru_thread_state *gts, *ngts;
412
413	gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
414			    vdata->vd_dsr_au_count,
415			    vdata->vd_tlb_preload_count,
416			    vdata->vd_user_options, tsid);
417	if (IS_ERR(gts))
418		return gts;
419
420	spin_lock(&vdata->vd_lock);
421	ngts = gru_find_current_gts_nolock(vdata, tsid);
422	if (ngts) {
423		gts_drop(gts);
424		gts = ngts;
425		STAT(gts_double_allocate);
426	} else {
427		list_add(&gts->ts_next, &vdata->vd_head);
428	}
429	spin_unlock(&vdata->vd_lock);
430	gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
431	return gts;
432}
433
434/*
435 * Free the GRU context assigned to the thread state.
436 */
437static void gru_free_gru_context(struct gru_thread_state *gts)
438{
439	struct gru_state *gru;
440
441	gru = gts->ts_gru;
442	gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
443
444	spin_lock(&gru->gs_lock);
445	gru->gs_gts[gts->ts_ctxnum] = NULL;
446	free_gru_resources(gru, gts);
447	BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
448	__clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
449	gts->ts_ctxnum = NULLCTX;
450	gts->ts_gru = NULL;
451	gts->ts_blade = -1;
452	spin_unlock(&gru->gs_lock);
453
454	gts_drop(gts);
455	STAT(free_context);
456}
457
458/*
459 * Prefetching cachelines help hardware performance.
460 * (Strictly a performance enhancement. Not functionally required).
461 */
462static void prefetch_data(void *p, int num, int stride)
463{
464	while (num-- > 0) {
465		prefetchw(p);
466		p += stride;
467	}
468}
469
470static inline long gru_copy_handle(void *d, void *s)
471{
472	memcpy(d, s, GRU_HANDLE_BYTES);
473	return GRU_HANDLE_BYTES;
474}
475
476static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
477				unsigned long cbrmap, unsigned long length)
478{
479	int i, scr;
480
481	prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
482		      GRU_CACHE_LINE_BYTES);
483
484	for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
485		prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
486		prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
487			      GRU_CACHE_LINE_BYTES);
488		cb += GRU_HANDLE_STRIDE;
489	}
490}
491
492static void gru_load_context_data(void *save, void *grubase, int ctxnum,
493				  unsigned long cbrmap, unsigned long dsrmap,
494				  int data_valid)
495{
496	void *gseg, *cb, *cbe;
497	unsigned long length;
498	int i, scr;
499
500	gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
501	cb = gseg + GRU_CB_BASE;
502	cbe = grubase + GRU_CBE_BASE;
503	length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
504	gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
505
506	for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
507		if (data_valid) {
508			save += gru_copy_handle(cb, save);
509			save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
510						save);
511		} else {
512			memset(cb, 0, GRU_CACHE_LINE_BYTES);
513			memset(cbe + i * GRU_HANDLE_STRIDE, 0,
514						GRU_CACHE_LINE_BYTES);
515		}
516		/* Flush CBE to hide race in context restart */
517		mb();
518		gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
519		cb += GRU_HANDLE_STRIDE;
520	}
521
522	if (data_valid)
523		memcpy(gseg + GRU_DS_BASE, save, length);
524	else
525		memset(gseg + GRU_DS_BASE, 0, length);
526}
527
528static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
529				    unsigned long cbrmap, unsigned long dsrmap)
530{
531	void *gseg, *cb, *cbe;
532	unsigned long length;
533	int i, scr;
534
535	gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
536	cb = gseg + GRU_CB_BASE;
537	cbe = grubase + GRU_CBE_BASE;
538	length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
539
540	/* CBEs may not be coherent. Flush them from cache */
541	for_each_cbr_in_allocation_map(i, &cbrmap, scr)
542		gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
543	mb();		/* Let the CL flush complete */
544
545	gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
546
547	for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
548		save += gru_copy_handle(save, cb);
549		save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
550		cb += GRU_HANDLE_STRIDE;
551	}
552	memcpy(save, gseg + GRU_DS_BASE, length);
553}
554
555void gru_unload_context(struct gru_thread_state *gts, int savestate)
556{
557	struct gru_state *gru = gts->ts_gru;
558	struct gru_context_configuration_handle *cch;
559	int ctxnum = gts->ts_ctxnum;
560
561	if (!is_kernel_context(gts))
562		zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
563	cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
564
565	gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
566		gts, gts->ts_cbr_map, gts->ts_dsr_map);
567	lock_cch_handle(cch);
568	if (cch_interrupt_sync(cch))
569		BUG();
570
571	if (!is_kernel_context(gts))
572		gru_unload_mm_tracker(gru, gts);
573	if (savestate) {
574		gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
575					ctxnum, gts->ts_cbr_map,
576					gts->ts_dsr_map);
577		gts->ts_data_valid = 1;
578	}
579
580	if (cch_deallocate(cch))
581		BUG();
582	unlock_cch_handle(cch);
583
584	gru_free_gru_context(gts);
585}
586
587/*
588 * Load a GRU context by copying it from the thread data structure in memory
589 * to the GRU.
590 */
591void gru_load_context(struct gru_thread_state *gts)
592{
593	struct gru_state *gru = gts->ts_gru;
594	struct gru_context_configuration_handle *cch;
595	int i, err, asid, ctxnum = gts->ts_ctxnum;
596
597	cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
598	lock_cch_handle(cch);
599	cch->tfm_fault_bit_enable =
600	    (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
601	     || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
602	cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
603	if (cch->tlb_int_enable) {
604		gts->ts_tlb_int_select = gru_cpu_fault_map_id();
605		cch->tlb_int_select = gts->ts_tlb_int_select;
606	}
607	if (gts->ts_cch_req_slice >= 0) {
608		cch->req_slice_set_enable = 1;
609		cch->req_slice = gts->ts_cch_req_slice;
610	} else {
611		cch->req_slice_set_enable =0;
612	}
613	cch->tfm_done_bit_enable = 0;
614	cch->dsr_allocation_map = gts->ts_dsr_map;
615	cch->cbr_allocation_map = gts->ts_cbr_map;
616
617	if (is_kernel_context(gts)) {
618		cch->unmap_enable = 1;
619		cch->tfm_done_bit_enable = 1;
620		cch->cb_int_enable = 1;
621		cch->tlb_int_select = 0;	/* For now, ints go to cpu 0 */
622	} else {
623		cch->unmap_enable = 0;
624		cch->tfm_done_bit_enable = 0;
625		cch->cb_int_enable = 0;
626		asid = gru_load_mm_tracker(gru, gts);
627		for (i = 0; i < 8; i++) {
628			cch->asid[i] = asid + i;
629			cch->sizeavail[i] = gts->ts_sizeavail;
630		}
631	}
632
633	err = cch_allocate(cch);
634	if (err) {
635		gru_dbg(grudev,
636			"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
637			err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
638		BUG();
639	}
640
641	gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
642			gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
643
644	if (cch_start(cch))
645		BUG();
646	unlock_cch_handle(cch);
647
648	gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
649		gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
650		(gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
651}
652
653/*
654 * Update fields in an active CCH:
655 * 	- retarget interrupts on local blade
656 * 	- update sizeavail mask
657 */
658int gru_update_cch(struct gru_thread_state *gts)
659{
660	struct gru_context_configuration_handle *cch;
661	struct gru_state *gru = gts->ts_gru;
662	int i, ctxnum = gts->ts_ctxnum, ret = 0;
663
664	cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
665
666	lock_cch_handle(cch);
667	if (cch->state == CCHSTATE_ACTIVE) {
668		if (gru->gs_gts[gts->ts_ctxnum] != gts)
669			goto exit;
670		if (cch_interrupt(cch))
671			BUG();
672		for (i = 0; i < 8; i++)
673			cch->sizeavail[i] = gts->ts_sizeavail;
674		gts->ts_tlb_int_select = gru_cpu_fault_map_id();
675		cch->tlb_int_select = gru_cpu_fault_map_id();
676		cch->tfm_fault_bit_enable =
677		  (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
678		    || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
679		if (cch_start(cch))
680			BUG();
681		ret = 1;
682	}
683exit:
684	unlock_cch_handle(cch);
685	return ret;
686}
687
688/*
689 * Update CCH tlb interrupt select. Required when all the following is true:
690 * 	- task's GRU context is loaded into a GRU
691 * 	- task is using interrupt notification for TLB faults
692 * 	- task has migrated to a different cpu on the same blade where
693 * 	  it was previously running.
694 */
695static int gru_retarget_intr(struct gru_thread_state *gts)
696{
697	if (gts->ts_tlb_int_select < 0
698	    || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
699		return 0;
700
701	gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
702		gru_cpu_fault_map_id());
703	return gru_update_cch(gts);
704}
705
706/*
707 * Check if a GRU context is allowed to use a specific chiplet. By default
708 * a context is assigned to any blade-local chiplet. However, users can
709 * override this.
710 * 	Returns 1 if assignment allowed, 0 otherwise
711 */
712static int gru_check_chiplet_assignment(struct gru_state *gru,
713					struct gru_thread_state *gts)
714{
715	int blade_id;
716	int chiplet_id;
717
718	blade_id = gts->ts_user_blade_id;
719	if (blade_id < 0)
720		blade_id = uv_numa_blade_id();
721
722	chiplet_id = gts->ts_user_chiplet_id;
723	return gru->gs_blade_id == blade_id &&
724		(chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
725}
726
727/*
728 * Unload the gru context if it is not assigned to the correct blade or
729 * chiplet. Misassignment can occur if the process migrates to a different
730 * blade or if the user changes the selected blade/chiplet.
731 */
732void gru_check_context_placement(struct gru_thread_state *gts)
733{
734	struct gru_state *gru;
735
736	/*
737	 * If the current task is the context owner, verify that the
738	 * context is correctly placed. This test is skipped for non-owner
739	 * references. Pthread apps use non-owner references to the CBRs.
740	 */
741	gru = gts->ts_gru;
742	if (!gru || gts->ts_tgid_owner != current->tgid)
743		return;
744
745	if (!gru_check_chiplet_assignment(gru, gts)) {
746		STAT(check_context_unload);
747		gru_unload_context(gts, 1);
748	} else if (gru_retarget_intr(gts)) {
749		STAT(check_context_retarget_intr);
750	}
751}
752
753
754/*
755 * Insufficient GRU resources available on the local blade. Steal a context from
756 * a process. This is a hack until a _real_ resource scheduler is written....
757 */
758#define next_ctxnum(n)	((n) <  GRU_NUM_CCH - 2 ? (n) + 1 : 0)
759#define next_gru(b, g)	(((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ?  \
760				 ((g)+1) : &(b)->bs_grus[0])
761
762static int is_gts_stealable(struct gru_thread_state *gts,
763		struct gru_blade_state *bs)
764{
765	if (is_kernel_context(gts))
766		return down_write_trylock(&bs->bs_kgts_sema);
767	else
768		return mutex_trylock(&gts->ts_ctxlock);
769}
770
771static void gts_stolen(struct gru_thread_state *gts,
772		struct gru_blade_state *bs)
773{
774	if (is_kernel_context(gts)) {
775		up_write(&bs->bs_kgts_sema);
776		STAT(steal_kernel_context);
777	} else {
778		mutex_unlock(&gts->ts_ctxlock);
779		STAT(steal_user_context);
780	}
781}
782
783void gru_steal_context(struct gru_thread_state *gts)
784{
785	struct gru_blade_state *blade;
786	struct gru_state *gru, *gru0;
787	struct gru_thread_state *ngts = NULL;
788	int ctxnum, ctxnum0, flag = 0, cbr, dsr;
789	int blade_id;
790
791	blade_id = gts->ts_user_blade_id;
792	if (blade_id < 0)
793		blade_id = uv_numa_blade_id();
794	cbr = gts->ts_cbr_au_count;
795	dsr = gts->ts_dsr_au_count;
796
797	blade = gru_base[blade_id];
798	spin_lock(&blade->bs_lock);
799
800	ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
801	gru = blade->bs_lru_gru;
802	if (ctxnum == 0)
803		gru = next_gru(blade, gru);
804	blade->bs_lru_gru = gru;
805	blade->bs_lru_ctxnum = ctxnum;
806	ctxnum0 = ctxnum;
807	gru0 = gru;
808	while (1) {
809		if (gru_check_chiplet_assignment(gru, gts)) {
810			if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
811				break;
812			spin_lock(&gru->gs_lock);
813			for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
814				if (flag && gru == gru0 && ctxnum == ctxnum0)
815					break;
816				ngts = gru->gs_gts[ctxnum];
817				/*
818			 	* We are grabbing locks out of order, so trylock is
819			 	* needed. GTSs are usually not locked, so the odds of
820			 	* success are high. If trylock fails, try to steal a
821			 	* different GSEG.
822			 	*/
823				if (ngts && is_gts_stealable(ngts, blade))
824					break;
825				ngts = NULL;
826			}
827			spin_unlock(&gru->gs_lock);
828			if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
829				break;
830		}
831		if (flag && gru == gru0)
832			break;
833		flag = 1;
834		ctxnum = 0;
835		gru = next_gru(blade, gru);
836	}
837	spin_unlock(&blade->bs_lock);
838
839	if (ngts) {
840		gts->ustats.context_stolen++;
841		ngts->ts_steal_jiffies = jiffies;
842		gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
843		gts_stolen(ngts, blade);
844	} else {
845		STAT(steal_context_failed);
846	}
847	gru_dbg(grudev,
848		"stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
849		" avail cb %ld, ds %ld\n",
850		gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
851		hweight64(gru->gs_dsr_map));
852}
853
854/*
855 * Assign a gru context.
856 */
857static int gru_assign_context_number(struct gru_state *gru)
858{
859	int ctxnum;
860
861	ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
862	__set_bit(ctxnum, &gru->gs_context_map);
863	return ctxnum;
864}
865
866/*
867 * Scan the GRUs on the local blade & assign a GRU context.
868 */
869struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
870{
871	struct gru_state *gru, *grux;
872	int i, max_active_contexts;
873	int blade_id = gts->ts_user_blade_id;
874
875	if (blade_id < 0)
876		blade_id = uv_numa_blade_id();
877again:
878	gru = NULL;
879	max_active_contexts = GRU_NUM_CCH;
880	for_each_gru_on_blade(grux, blade_id, i) {
881		if (!gru_check_chiplet_assignment(grux, gts))
882			continue;
883		if (check_gru_resources(grux, gts->ts_cbr_au_count,
884					gts->ts_dsr_au_count,
885					max_active_contexts)) {
886			gru = grux;
887			max_active_contexts = grux->gs_active_contexts;
888			if (max_active_contexts == 0)
889				break;
890		}
891	}
892
893	if (gru) {
894		spin_lock(&gru->gs_lock);
895		if (!check_gru_resources(gru, gts->ts_cbr_au_count,
896					 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
897			spin_unlock(&gru->gs_lock);
898			goto again;
899		}
900		reserve_gru_resources(gru, gts);
901		gts->ts_gru = gru;
902		gts->ts_blade = gru->gs_blade_id;
903		gts->ts_ctxnum = gru_assign_context_number(gru);
904		atomic_inc(&gts->ts_refcnt);
905		gru->gs_gts[gts->ts_ctxnum] = gts;
906		spin_unlock(&gru->gs_lock);
907
908		STAT(assign_context);
909		gru_dbg(grudev,
910			"gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
911			gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
912			gts->ts_gru->gs_gid, gts->ts_ctxnum,
913			gts->ts_cbr_au_count, gts->ts_dsr_au_count);
914	} else {
915		gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
916		STAT(assign_context_failed);
917	}
918
919	return gru;
920}
921
922/*
923 * gru_nopage
924 *
925 * Map the user's GRU segment
926 *
927 * 	Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
928 */
929int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
930{
931	struct gru_thread_state *gts;
932	unsigned long paddr, vaddr;
933
934	vaddr = (unsigned long)vmf->virtual_address;
935	gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
936		vma, vaddr, GSEG_BASE(vaddr));
937	STAT(nopfn);
938
939	/* The following check ensures vaddr is a valid address in the VMA */
940	gts = gru_find_thread_state(vma, TSID(vaddr, vma));
941	if (!gts)
942		return VM_FAULT_SIGBUS;
943
944again:
945	mutex_lock(&gts->ts_ctxlock);
946	preempt_disable();
947
948	gru_check_context_placement(gts);
949
950	if (!gts->ts_gru) {
951		STAT(load_user_context);
952		if (!gru_assign_gru_context(gts)) {
953			preempt_enable();
954			mutex_unlock(&gts->ts_ctxlock);
955			set_current_state(TASK_INTERRUPTIBLE);
956			schedule_timeout(GRU_ASSIGN_DELAY);  /* true hack ZZZ */
957			if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
958				gru_steal_context(gts);
959			goto again;
960		}
961		gru_load_context(gts);
962		paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
963		remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
964				paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
965				vma->vm_page_prot);
966	}
967
968	preempt_enable();
969	mutex_unlock(&gts->ts_ctxlock);
970
971	return VM_FAULT_NOPAGE;
972}
973
974