1/*
2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/mm.h>
36#include <linux/scatterlist.h>
37#include <linux/slab.h>
38
39#include <linux/mlx4/cmd.h>
40
41#include "mlx4.h"
42#include "icm.h"
43#include "fw.h"
44
45/*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
47 * per chunk.
48 */
49enum {
50	MLX4_ICM_ALLOC_SIZE	= 1 << 18,
51	MLX4_TABLE_CHUNK_SIZE	= 1 << 18
52};
53
54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
55{
56	int i;
57
58	if (chunk->nsg > 0)
59		pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60			     PCI_DMA_BIDIRECTIONAL);
61
62	for (i = 0; i < chunk->npages; ++i)
63		__free_pages(sg_page(&chunk->mem[i]),
64			     get_order(chunk->mem[i].length));
65}
66
67static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
68{
69	int i;
70
71	for (i = 0; i < chunk->npages; ++i)
72		dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73				  lowmem_page_address(sg_page(&chunk->mem[i])),
74				  sg_dma_address(&chunk->mem[i]));
75}
76
77void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
78{
79	struct mlx4_icm_chunk *chunk, *tmp;
80
81	if (!icm)
82		return;
83
84	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
85		if (coherent)
86			mlx4_free_icm_coherent(dev, chunk);
87		else
88			mlx4_free_icm_pages(dev, chunk);
89
90		kfree(chunk);
91	}
92
93	kfree(icm);
94}
95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97{
98	struct page *page;
99
100	page = alloc_pages(gfp_mask, order);
101	if (!page)
102		return -ENOMEM;
103
104	sg_set_page(mem, page, PAGE_SIZE << order, 0);
105	return 0;
106}
107
108static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
109				    int order, gfp_t gfp_mask)
110{
111	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
112				       &sg_dma_address(mem), gfp_mask);
113	if (!buf)
114		return -ENOMEM;
115
116	sg_set_buf(mem, buf, PAGE_SIZE << order);
117	BUG_ON(mem->offset);
118	sg_dma_len(mem) = PAGE_SIZE << order;
119	return 0;
120}
121
122struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
123				gfp_t gfp_mask, int coherent)
124{
125	struct mlx4_icm *icm;
126	struct mlx4_icm_chunk *chunk = NULL;
127	int cur_order;
128	int ret;
129
130	/* We use sg_set_buf for coherent allocs, which assumes low memory */
131	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
132
133	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
134	if (!icm)
135		return NULL;
136
137	icm->refcount = 0;
138	INIT_LIST_HEAD(&icm->chunk_list);
139
140	cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
141
142	while (npages > 0) {
143		if (!chunk) {
144			chunk = kmalloc(sizeof *chunk,
145					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
146			if (!chunk)
147				goto fail;
148
149			sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
150			chunk->npages = 0;
151			chunk->nsg    = 0;
152			list_add_tail(&chunk->list, &icm->chunk_list);
153		}
154
155		while (1 << cur_order > npages)
156			--cur_order;
157
158		if (coherent)
159			ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
160						      &chunk->mem[chunk->npages],
161						      cur_order, gfp_mask);
162		else
163			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164						   cur_order, gfp_mask);
165
166		if (ret) {
167			if (--cur_order < 0)
168				goto fail;
169			else
170				continue;
171		}
172
173		++chunk->npages;
174
175		if (coherent)
176			++chunk->nsg;
177		else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
178			chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
179						chunk->npages,
180						PCI_DMA_BIDIRECTIONAL);
181
182			if (chunk->nsg <= 0)
183				goto fail;
184		}
185
186		if (chunk->npages == MLX4_ICM_CHUNK_LEN)
187			chunk = NULL;
188
189		npages -= 1 << cur_order;
190	}
191
192	if (!coherent && chunk) {
193		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
194					chunk->npages,
195					PCI_DMA_BIDIRECTIONAL);
196
197		if (chunk->nsg <= 0)
198			goto fail;
199	}
200
201	return icm;
202
203fail:
204	mlx4_free_icm(dev, icm, coherent);
205	return NULL;
206}
207
208static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
209{
210	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
211}
212
213static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
214{
215	return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
216			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
217}
218
219int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
220{
221	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
222}
223
224int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
225{
226	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
227			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
228}
229
230int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
231{
232	int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
233	int ret = 0;
234
235	mutex_lock(&table->mutex);
236
237	if (table->icm[i]) {
238		++table->icm[i]->refcount;
239		goto out;
240	}
241
242	table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
243				       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
244				       __GFP_NOWARN, table->coherent);
245	if (!table->icm[i]) {
246		ret = -ENOMEM;
247		goto out;
248	}
249
250	if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
251			 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
252		mlx4_free_icm(dev, table->icm[i], table->coherent);
253		table->icm[i] = NULL;
254		ret = -ENOMEM;
255		goto out;
256	}
257
258	++table->icm[i]->refcount;
259
260out:
261	mutex_unlock(&table->mutex);
262	return ret;
263}
264
265void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
266{
267	int i;
268
269	i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
270
271	mutex_lock(&table->mutex);
272
273	if (--table->icm[i]->refcount == 0) {
274		mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
275			       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
276		mlx4_free_icm(dev, table->icm[i], table->coherent);
277		table->icm[i] = NULL;
278	}
279
280	mutex_unlock(&table->mutex);
281}
282
283void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
284{
285	int idx, offset, dma_offset, i;
286	struct mlx4_icm_chunk *chunk;
287	struct mlx4_icm *icm;
288	struct page *page = NULL;
289
290	if (!table->lowmem)
291		return NULL;
292
293	mutex_lock(&table->mutex);
294
295	idx = (obj & (table->num_obj - 1)) * table->obj_size;
296	icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
297	dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
298
299	if (!icm)
300		goto out;
301
302	list_for_each_entry(chunk, &icm->chunk_list, list) {
303		for (i = 0; i < chunk->npages; ++i) {
304			if (dma_handle && dma_offset >= 0) {
305				if (sg_dma_len(&chunk->mem[i]) > dma_offset)
306					*dma_handle = sg_dma_address(&chunk->mem[i]) +
307						dma_offset;
308				dma_offset -= sg_dma_len(&chunk->mem[i]);
309			}
310			/*
311			 * DMA mapping can merge pages but not split them,
312			 * so if we found the page, dma_handle has already
313			 * been assigned to.
314			 */
315			if (chunk->mem[i].length > offset) {
316				page = sg_page(&chunk->mem[i]);
317				goto out;
318			}
319			offset -= chunk->mem[i].length;
320		}
321	}
322
323out:
324	mutex_unlock(&table->mutex);
325	return page ? lowmem_page_address(page) + offset : NULL;
326}
327
328int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
329			 int start, int end)
330{
331	int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
332	int i, err;
333
334	for (i = start; i <= end; i += inc) {
335		err = mlx4_table_get(dev, table, i);
336		if (err)
337			goto fail;
338	}
339
340	return 0;
341
342fail:
343	while (i > start) {
344		i -= inc;
345		mlx4_table_put(dev, table, i);
346	}
347
348	return err;
349}
350
351void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
352			  int start, int end)
353{
354	int i;
355
356	for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
357		mlx4_table_put(dev, table, i);
358}
359
360int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
361			u64 virt, int obj_size,	int nobj, int reserved,
362			int use_lowmem, int use_coherent)
363{
364	int obj_per_chunk;
365	int num_icm;
366	unsigned chunk_size;
367	int i;
368
369	obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
370	num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
371
372	table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
373	if (!table->icm)
374		return -ENOMEM;
375	table->virt     = virt;
376	table->num_icm  = num_icm;
377	table->num_obj  = nobj;
378	table->obj_size = obj_size;
379	table->lowmem   = use_lowmem;
380	table->coherent = use_coherent;
381	mutex_init(&table->mutex);
382
383	for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
384		chunk_size = MLX4_TABLE_CHUNK_SIZE;
385		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
386			chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
387
388		table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
389					       (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
390					       __GFP_NOWARN, use_coherent);
391		if (!table->icm[i])
392			goto err;
393		if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
394			mlx4_free_icm(dev, table->icm[i], use_coherent);
395			table->icm[i] = NULL;
396			goto err;
397		}
398
399		/*
400		 * Add a reference to this ICM chunk so that it never
401		 * gets freed (since it contains reserved firmware objects).
402		 */
403		++table->icm[i]->refcount;
404	}
405
406	return 0;
407
408err:
409	for (i = 0; i < num_icm; ++i)
410		if (table->icm[i]) {
411			mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
412				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
413			mlx4_free_icm(dev, table->icm[i], use_coherent);
414		}
415
416	return -ENOMEM;
417}
418
419void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
420{
421	int i;
422
423	for (i = 0; i < table->num_icm; ++i)
424		if (table->icm[i]) {
425			mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
426				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
427			mlx4_free_icm(dev, table->icm[i], table->coherent);
428		}
429
430	kfree(table->icm);
431}
432