1/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm.h"
28
29#include <linux/shmem_fs.h>
30#include <drm/exynos_drm.h>
31
32#include "exynos_drm_drv.h"
33#include "exynos_drm_gem.h"
34#include "exynos_drm_buf.h"
35
36static unsigned int convert_to_vm_err_msg(int msg)
37{
38	unsigned int out_msg;
39
40	switch (msg) {
41	case 0:
42	case -ERESTARTSYS:
43	case -EINTR:
44		out_msg = VM_FAULT_NOPAGE;
45		break;
46
47	case -ENOMEM:
48		out_msg = VM_FAULT_OOM;
49		break;
50
51	default:
52		out_msg = VM_FAULT_SIGBUS;
53		break;
54	}
55
56	return out_msg;
57}
58
59static int check_gem_flags(unsigned int flags)
60{
61	if (flags & ~(EXYNOS_BO_MASK)) {
62		DRM_ERROR("invalid flags.\n");
63		return -EINVAL;
64	}
65
66	return 0;
67}
68
69static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
70{
71	if (!IS_NONCONTIG_BUFFER(flags)) {
72		if (size >= SZ_1M)
73			return roundup(size, SECTION_SIZE);
74		else if (size >= SZ_64K)
75			return roundup(size, SZ_64K);
76		else
77			goto out;
78	}
79out:
80	return roundup(size, PAGE_SIZE);
81}
82
83static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
84						gfp_t gfpmask)
85{
86	struct inode *inode;
87	struct address_space *mapping;
88	struct page *p, **pages;
89	int i, npages;
90
91	/* This is the shared memory object that backs the GEM resource */
92	inode = obj->filp->f_path.dentry->d_inode;
93	mapping = inode->i_mapping;
94
95	npages = obj->size >> PAGE_SHIFT;
96
97	pages = drm_malloc_ab(npages, sizeof(struct page *));
98	if (pages == NULL)
99		return ERR_PTR(-ENOMEM);
100
101	gfpmask |= mapping_gfp_mask(mapping);
102
103	for (i = 0; i < npages; i++) {
104		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
105		if (IS_ERR(p))
106			goto fail;
107		pages[i] = p;
108	}
109
110	return pages;
111
112fail:
113	while (i--)
114		page_cache_release(pages[i]);
115
116	drm_free_large(pages);
117	return ERR_PTR(PTR_ERR(p));
118}
119
120static void exynos_gem_put_pages(struct drm_gem_object *obj,
121					struct page **pages,
122					bool dirty, bool accessed)
123{
124	int i, npages;
125
126	npages = obj->size >> PAGE_SHIFT;
127
128	for (i = 0; i < npages; i++) {
129		if (dirty)
130			set_page_dirty(pages[i]);
131
132		if (accessed)
133			mark_page_accessed(pages[i]);
134
135		/* Undo the reference we took when populating the table */
136		page_cache_release(pages[i]);
137	}
138
139	drm_free_large(pages);
140}
141
142static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
143					struct vm_area_struct *vma,
144					unsigned long f_vaddr,
145					pgoff_t page_offset)
146{
147	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
148	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
149	unsigned long pfn;
150
151	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
152		if (!buf->pages)
153			return -EINTR;
154
155		pfn = page_to_pfn(buf->pages[page_offset++]);
156	} else
157		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
158
159	return vm_insert_mixed(vma, f_vaddr, pfn);
160}
161
162static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
163{
164	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
165	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
166	struct scatterlist *sgl;
167	struct page **pages;
168	unsigned int npages, i = 0;
169	int ret;
170
171	if (buf->pages) {
172		DRM_DEBUG_KMS("already allocated.\n");
173		return -EINVAL;
174	}
175
176	pages = exynos_gem_get_pages(obj, GFP_KERNEL);
177	if (IS_ERR(pages)) {
178		DRM_ERROR("failed to get pages.\n");
179		return PTR_ERR(pages);
180	}
181
182	npages = obj->size >> PAGE_SHIFT;
183
184	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
185	if (!buf->sgt) {
186		DRM_ERROR("failed to allocate sg table.\n");
187		ret = -ENOMEM;
188		goto err;
189	}
190
191	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
192	if (ret < 0) {
193		DRM_ERROR("failed to initialize sg table.\n");
194		ret = -EFAULT;
195		goto err1;
196	}
197
198	sgl = buf->sgt->sgl;
199
200	/* set all pages to sg list. */
201	while (i < npages) {
202		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
203		sg_dma_address(sgl) = page_to_phys(pages[i]);
204		i++;
205		sgl = sg_next(sgl);
206	}
207
208	/* add some codes for UNCACHED type here. TODO */
209
210	buf->pages = pages;
211	return ret;
212err1:
213	kfree(buf->sgt);
214	buf->sgt = NULL;
215err:
216	exynos_gem_put_pages(obj, pages, true, false);
217	return ret;
218
219}
220
221static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
222{
223	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
224	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
225
226	/*
227	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
228	 * allocated at gem fault handler.
229	 */
230	sg_free_table(buf->sgt);
231	kfree(buf->sgt);
232	buf->sgt = NULL;
233
234	exynos_gem_put_pages(obj, buf->pages, true, false);
235	buf->pages = NULL;
236
237	/* add some codes for UNCACHED type here. TODO */
238}
239
240static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
241					struct drm_file *file_priv,
242					unsigned int *handle)
243{
244	int ret;
245
246	/*
247	 * allocate a id of idr table where the obj is registered
248	 * and handle has the id what user can see.
249	 */
250	ret = drm_gem_handle_create(file_priv, obj, handle);
251	if (ret)
252		return ret;
253
254	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
255
256	/* drop reference from allocate - handle holds it now. */
257	drm_gem_object_unreference_unlocked(obj);
258
259	return 0;
260}
261
262void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
263{
264	struct drm_gem_object *obj;
265
266	DRM_DEBUG_KMS("%s\n", __FILE__);
267
268	if (!exynos_gem_obj)
269		return;
270
271	obj = &exynos_gem_obj->base;
272
273	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
274
275	if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
276			exynos_gem_obj->buffer->pages)
277		exynos_drm_gem_put_pages(obj);
278	else
279		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
280					exynos_gem_obj->buffer);
281
282	exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
283	exynos_gem_obj->buffer = NULL;
284
285	if (obj->map_list.map)
286		drm_gem_free_mmap_offset(obj);
287
288	/* release file pointer to gem object. */
289	drm_gem_object_release(obj);
290
291	kfree(exynos_gem_obj);
292	exynos_gem_obj = NULL;
293}
294
295static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
296						      unsigned long size)
297{
298	struct exynos_drm_gem_obj *exynos_gem_obj;
299	struct drm_gem_object *obj;
300	int ret;
301
302	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
303	if (!exynos_gem_obj) {
304		DRM_ERROR("failed to allocate exynos gem object\n");
305		return NULL;
306	}
307
308	exynos_gem_obj->size = size;
309	obj = &exynos_gem_obj->base;
310
311	ret = drm_gem_object_init(dev, obj, size);
312	if (ret < 0) {
313		DRM_ERROR("failed to initialize gem object\n");
314		kfree(exynos_gem_obj);
315		return NULL;
316	}
317
318	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
319
320	return exynos_gem_obj;
321}
322
323struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
324						unsigned int flags,
325						unsigned long size)
326{
327	struct exynos_drm_gem_obj *exynos_gem_obj;
328	struct exynos_drm_gem_buf *buf;
329	int ret;
330
331	if (!size) {
332		DRM_ERROR("invalid size.\n");
333		return ERR_PTR(-EINVAL);
334	}
335
336	size = roundup_gem_size(size, flags);
337	DRM_DEBUG_KMS("%s\n", __FILE__);
338
339	ret = check_gem_flags(flags);
340	if (ret)
341		return ERR_PTR(ret);
342
343	buf = exynos_drm_init_buf(dev, size);
344	if (!buf)
345		return ERR_PTR(-ENOMEM);
346
347	exynos_gem_obj = exynos_drm_gem_init(dev, size);
348	if (!exynos_gem_obj) {
349		ret = -ENOMEM;
350		goto err_fini_buf;
351	}
352
353	exynos_gem_obj->buffer = buf;
354
355	/* set memory type and cache attribute from user side. */
356	exynos_gem_obj->flags = flags;
357
358	/*
359	 * allocate all pages as desired size if user wants to allocate
360	 * physically non-continuous memory.
361	 */
362	if (flags & EXYNOS_BO_NONCONTIG) {
363		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
364		if (ret < 0) {
365			drm_gem_object_release(&exynos_gem_obj->base);
366			goto err_fini_buf;
367		}
368	} else {
369		ret = exynos_drm_alloc_buf(dev, buf, flags);
370		if (ret < 0) {
371			drm_gem_object_release(&exynos_gem_obj->base);
372			goto err_fini_buf;
373		}
374	}
375
376	return exynos_gem_obj;
377
378err_fini_buf:
379	exynos_drm_fini_buf(dev, buf);
380	return ERR_PTR(ret);
381}
382
383int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
384				struct drm_file *file_priv)
385{
386	struct drm_exynos_gem_create *args = data;
387	struct exynos_drm_gem_obj *exynos_gem_obj;
388	int ret;
389
390	DRM_DEBUG_KMS("%s\n", __FILE__);
391
392	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
393	if (IS_ERR(exynos_gem_obj))
394		return PTR_ERR(exynos_gem_obj);
395
396	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
397			&args->handle);
398	if (ret) {
399		exynos_drm_gem_destroy(exynos_gem_obj);
400		return ret;
401	}
402
403	return 0;
404}
405
406void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
407					unsigned int gem_handle,
408					struct drm_file *file_priv)
409{
410	struct exynos_drm_gem_obj *exynos_gem_obj;
411	struct drm_gem_object *obj;
412
413	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
414	if (!obj) {
415		DRM_ERROR("failed to lookup gem object.\n");
416		return ERR_PTR(-EINVAL);
417	}
418
419	exynos_gem_obj = to_exynos_gem_obj(obj);
420
421	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
422		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
423		drm_gem_object_unreference_unlocked(obj);
424
425		/* TODO */
426		return ERR_PTR(-EINVAL);
427	}
428
429	return &exynos_gem_obj->buffer->dma_addr;
430}
431
432void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
433					unsigned int gem_handle,
434					struct drm_file *file_priv)
435{
436	struct exynos_drm_gem_obj *exynos_gem_obj;
437	struct drm_gem_object *obj;
438
439	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
440	if (!obj) {
441		DRM_ERROR("failed to lookup gem object.\n");
442		return;
443	}
444
445	exynos_gem_obj = to_exynos_gem_obj(obj);
446
447	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
448		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
449		drm_gem_object_unreference_unlocked(obj);
450
451		/* TODO */
452		return;
453	}
454
455	drm_gem_object_unreference_unlocked(obj);
456
457	/*
458	 * decrease obj->refcount one more time because we has already
459	 * increased it at exynos_drm_gem_get_dma_addr().
460	 */
461	drm_gem_object_unreference_unlocked(obj);
462}
463
464int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
465				    struct drm_file *file_priv)
466{
467	struct drm_exynos_gem_map_off *args = data;
468
469	DRM_DEBUG_KMS("%s\n", __FILE__);
470
471	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
472			args->handle, (unsigned long)args->offset);
473
474	if (!(dev->driver->driver_features & DRIVER_GEM)) {
475		DRM_ERROR("does not support GEM.\n");
476		return -ENODEV;
477	}
478
479	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
480			&args->offset);
481}
482
483static int exynos_drm_gem_mmap_buffer(struct file *filp,
484				      struct vm_area_struct *vma)
485{
486	struct drm_gem_object *obj = filp->private_data;
487	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
488	struct exynos_drm_gem_buf *buffer;
489	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
490	int ret;
491
492	DRM_DEBUG_KMS("%s\n", __FILE__);
493
494	vma->vm_flags |= (VM_IO | VM_RESERVED);
495
496	/* in case of direct mapping, always having non-cachable attribute */
497	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
498
499	vm_size = usize = vma->vm_end - vma->vm_start;
500
501	/*
502	 * a buffer contains information to physically continuous memory
503	 * allocated by user request or at framebuffer creation.
504	 */
505	buffer = exynos_gem_obj->buffer;
506
507	/* check if user-requested size is valid. */
508	if (vm_size > buffer->size)
509		return -EINVAL;
510
511	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
512		int i = 0;
513
514		if (!buffer->pages)
515			return -EINVAL;
516
517		vma->vm_flags |= VM_MIXEDMAP;
518
519		do {
520			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
521			if (ret) {
522				DRM_ERROR("failed to remap user space.\n");
523				return ret;
524			}
525
526			uaddr += PAGE_SIZE;
527			usize -= PAGE_SIZE;
528		} while (usize > 0);
529	} else {
530		/*
531		 * get page frame number to physical memory to be mapped
532		 * to user space.
533		 */
534		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
535								PAGE_SHIFT;
536
537		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
538
539		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
540					vma->vm_page_prot)) {
541			DRM_ERROR("failed to remap pfn range.\n");
542			return -EAGAIN;
543		}
544	}
545
546	return 0;
547}
548
549static const struct file_operations exynos_drm_gem_fops = {
550	.mmap = exynos_drm_gem_mmap_buffer,
551};
552
553int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
554			      struct drm_file *file_priv)
555{
556	struct drm_exynos_gem_mmap *args = data;
557	struct drm_gem_object *obj;
558	unsigned int addr;
559
560	DRM_DEBUG_KMS("%s\n", __FILE__);
561
562	if (!(dev->driver->driver_features & DRIVER_GEM)) {
563		DRM_ERROR("does not support GEM.\n");
564		return -ENODEV;
565	}
566
567	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
568	if (!obj) {
569		DRM_ERROR("failed to lookup gem object.\n");
570		return -EINVAL;
571	}
572
573	obj->filp->f_op = &exynos_drm_gem_fops;
574	obj->filp->private_data = obj;
575
576	addr = vm_mmap(obj->filp, 0, args->size,
577			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
578
579	drm_gem_object_unreference_unlocked(obj);
580
581	if (IS_ERR((void *)addr))
582		return PTR_ERR((void *)addr);
583
584	args->mapped = addr;
585
586	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
587
588	return 0;
589}
590
591int exynos_drm_gem_init_object(struct drm_gem_object *obj)
592{
593	DRM_DEBUG_KMS("%s\n", __FILE__);
594
595	return 0;
596}
597
598void exynos_drm_gem_free_object(struct drm_gem_object *obj)
599{
600	DRM_DEBUG_KMS("%s\n", __FILE__);
601
602	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
603}
604
605int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
606			       struct drm_device *dev,
607			       struct drm_mode_create_dumb *args)
608{
609	struct exynos_drm_gem_obj *exynos_gem_obj;
610	int ret;
611
612	DRM_DEBUG_KMS("%s\n", __FILE__);
613
614	/*
615	 * alocate memory to be used for framebuffer.
616	 * - this callback would be called by user application
617	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
618	 */
619
620	args->pitch = args->width * args->bpp >> 3;
621	args->size = PAGE_ALIGN(args->pitch * args->height);
622
623	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
624	if (IS_ERR(exynos_gem_obj))
625		return PTR_ERR(exynos_gem_obj);
626
627	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
628			&args->handle);
629	if (ret) {
630		exynos_drm_gem_destroy(exynos_gem_obj);
631		return ret;
632	}
633
634	return 0;
635}
636
637int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
638				   struct drm_device *dev, uint32_t handle,
639				   uint64_t *offset)
640{
641	struct exynos_drm_gem_obj *exynos_gem_obj;
642	struct drm_gem_object *obj;
643	int ret = 0;
644
645	DRM_DEBUG_KMS("%s\n", __FILE__);
646
647	mutex_lock(&dev->struct_mutex);
648
649	/*
650	 * get offset of memory allocated for drm framebuffer.
651	 * - this callback would be called by user application
652	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
653	 */
654
655	obj = drm_gem_object_lookup(dev, file_priv, handle);
656	if (!obj) {
657		DRM_ERROR("failed to lookup gem object.\n");
658		ret = -EINVAL;
659		goto unlock;
660	}
661
662	exynos_gem_obj = to_exynos_gem_obj(obj);
663
664	if (!exynos_gem_obj->base.map_list.map) {
665		ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
666		if (ret)
667			goto out;
668	}
669
670	*offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
671	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
672
673out:
674	drm_gem_object_unreference(obj);
675unlock:
676	mutex_unlock(&dev->struct_mutex);
677	return ret;
678}
679
680int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
681				struct drm_device *dev,
682				unsigned int handle)
683{
684	int ret;
685
686	DRM_DEBUG_KMS("%s\n", __FILE__);
687
688	/*
689	 * obj->refcount and obj->handle_count are decreased and
690	 * if both them are 0 then exynos_drm_gem_free_object()
691	 * would be called by callback to release resources.
692	 */
693	ret = drm_gem_handle_delete(file_priv, handle);
694	if (ret < 0) {
695		DRM_ERROR("failed to delete drm_gem_handle.\n");
696		return ret;
697	}
698
699	return 0;
700}
701
702int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
703{
704	struct drm_gem_object *obj = vma->vm_private_data;
705	struct drm_device *dev = obj->dev;
706	unsigned long f_vaddr;
707	pgoff_t page_offset;
708	int ret;
709
710	page_offset = ((unsigned long)vmf->virtual_address -
711			vma->vm_start) >> PAGE_SHIFT;
712	f_vaddr = (unsigned long)vmf->virtual_address;
713
714	mutex_lock(&dev->struct_mutex);
715
716	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
717	if (ret < 0)
718		DRM_ERROR("failed to map pages.\n");
719
720	mutex_unlock(&dev->struct_mutex);
721
722	return convert_to_vm_err_msg(ret);
723}
724
725int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
726{
727	int ret;
728
729	DRM_DEBUG_KMS("%s\n", __FILE__);
730
731	/* set vm_area_struct. */
732	ret = drm_gem_mmap(filp, vma);
733	if (ret < 0) {
734		DRM_ERROR("failed to mmap.\n");
735		return ret;
736	}
737
738	vma->vm_flags &= ~VM_PFNMAP;
739	vma->vm_flags |= VM_MIXEDMAP;
740
741	return ret;
742}
743