1/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 *    Jerome Glisse <glisse@freedesktop.org>
29 */
30#include "drmP.h"
31#include "drm.h"
32#include "radeon.h"
33
34int radeon_sa_bo_manager_init(struct radeon_device *rdev,
35			      struct radeon_sa_manager *sa_manager,
36			      unsigned size, u32 domain)
37{
38	int r;
39
40	sa_manager->bo = NULL;
41	sa_manager->size = size;
42	sa_manager->domain = domain;
43	INIT_LIST_HEAD(&sa_manager->sa_bo);
44
45	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
46			     RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
47	if (r) {
48		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
49		return r;
50	}
51
52	return r;
53}
54
55void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
56			       struct radeon_sa_manager *sa_manager)
57{
58	struct radeon_sa_bo *sa_bo, *tmp;
59
60	if (!list_empty(&sa_manager->sa_bo)) {
61		dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
62	}
63	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
64		list_del_init(&sa_bo->list);
65	}
66	radeon_bo_unref(&sa_manager->bo);
67	sa_manager->size = 0;
68}
69
70int radeon_sa_bo_manager_start(struct radeon_device *rdev,
71			       struct radeon_sa_manager *sa_manager)
72{
73	int r;
74
75	if (sa_manager->bo == NULL) {
76		dev_err(rdev->dev, "no bo for sa manager\n");
77		return -EINVAL;
78	}
79
80	/* map the buffer */
81	r = radeon_bo_reserve(sa_manager->bo, false);
82	if (r) {
83		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
84		return r;
85	}
86	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
87	if (r) {
88		radeon_bo_unreserve(sa_manager->bo);
89		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
90		return r;
91	}
92	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
93	radeon_bo_unreserve(sa_manager->bo);
94	return r;
95}
96
97int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
98				 struct radeon_sa_manager *sa_manager)
99{
100	int r;
101
102	if (sa_manager->bo == NULL) {
103		dev_err(rdev->dev, "no bo for sa manager\n");
104		return -EINVAL;
105	}
106
107	r = radeon_bo_reserve(sa_manager->bo, false);
108	if (!r) {
109		radeon_bo_kunmap(sa_manager->bo);
110		radeon_bo_unpin(sa_manager->bo);
111		radeon_bo_unreserve(sa_manager->bo);
112	}
113	return r;
114}
115
116/*
117 * Principe is simple, we keep a list of sub allocation in offset
118 * order (first entry has offset == 0, last entry has the highest
119 * offset).
120 *
121 * When allocating new object we first check if there is room at
122 * the end total_size - (last_object_offset + last_object_size) >=
123 * alloc_size. If so we allocate new object there.
124 *
125 * When there is not enough room at the end, we start waiting for
126 * each sub object until we reach object_offset+object_size >=
127 * alloc_size, this object then become the sub object we return.
128 *
129 * Alignment can't be bigger than page size
130 */
131int radeon_sa_bo_new(struct radeon_device *rdev,
132		     struct radeon_sa_manager *sa_manager,
133		     struct radeon_sa_bo *sa_bo,
134		     unsigned size, unsigned align)
135{
136	struct radeon_sa_bo *tmp;
137	struct list_head *head;
138	unsigned offset = 0, wasted = 0;
139
140	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
141	BUG_ON(size > sa_manager->size);
142
143	/* no one ? */
144	head = sa_manager->sa_bo.prev;
145	if (list_empty(&sa_manager->sa_bo)) {
146		goto out;
147	}
148
149	/* look for a hole big enough */
150	offset = 0;
151	list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
152		/* room before this object ? */
153		if ((tmp->offset - offset) >= size) {
154			head = tmp->list.prev;
155			goto out;
156		}
157		offset = tmp->offset + tmp->size;
158		wasted = offset % align;
159		if (wasted) {
160			wasted = align - wasted;
161		}
162		offset += wasted;
163	}
164	/* room at the end ? */
165	head = sa_manager->sa_bo.prev;
166	tmp = list_entry(head, struct radeon_sa_bo, list);
167	offset = tmp->offset + tmp->size;
168	wasted = offset % align;
169	if (wasted) {
170		wasted = align - wasted;
171	}
172	offset += wasted;
173	if ((sa_manager->size - offset) < size) {
174		/* failed to find somethings big enough */
175		return -ENOMEM;
176	}
177
178out:
179	sa_bo->manager = sa_manager;
180	sa_bo->offset = offset;
181	sa_bo->size = size;
182	list_add(&sa_bo->list, head);
183	return 0;
184}
185
186void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
187{
188	list_del_init(&sa_bo->list);
189}
190