Lines Matching refs:sgl

290  * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
292 * Allocates memory for sgl and overlapping pages. Pages which might
297 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
303 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
304 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
305 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
306 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
309 __func__, user_addr, user_size, sgl->nr_pages,
310 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
312 sgl->user_addr = user_addr;
313 sgl->user_size = user_size;
314 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
316 if (get_order(sgl->sgl_size) > MAX_ORDER) {
322 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
323 &sgl->sgl_dma_addr);
324 if (sgl->sgl == NULL) {
331 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
332 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
333 &sgl->fpage_dma_addr);
334 if (sgl->fpage == NULL)
338 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
339 user_addr, sgl->fpage_size)) {
344 if (sgl->lpage_size != 0) {
345 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
346 &sgl->lpage_dma_addr);
347 if (sgl->lpage == NULL)
351 if (copy_from_user(sgl->lpage, user_addr + user_size -
352 sgl->lpage_size, sgl->lpage_size)) {
360 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361 sgl->fpage_dma_addr);
363 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
364 sgl->sgl_dma_addr);
368 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
375 size_t size = sgl->user_size;
378 map_offs = sgl->fpage_offs; /* offset in first page */
380 s = &sgl->sgl[0]; /* first set of 8 entries */
382 while (p < sgl->nr_pages) {
388 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
397 if ((p == 0) && (sgl->fpage != NULL)) {
398 daddr = sgl->fpage_dma_addr + map_offs;
400 } else if ((p == sgl->nr_pages - 1) &&
401 (sgl->lpage != NULL)) {
402 daddr = sgl->lpage_dma_addr;
421 if (p == sgl->nr_pages)
437 if (p == sgl->nr_pages)
445 s -= 8; /* full shift needed on previous sgl block */
459 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
462 * the sgl and the cached pages. Data is being transfered from cached
465 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
470 if (sgl->fpage) {
471 if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
472 sgl->fpage_size)) {
477 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
478 sgl->fpage_dma_addr);
479 sgl->fpage = NULL;
480 sgl->fpage_dma_addr = 0;
482 if (sgl->lpage) {
483 if (copy_to_user(sgl->user_addr + sgl->user_size -
484 sgl->lpage_size, sgl->lpage,
485 sgl->lpage_size)) {
490 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
491 sgl->lpage_dma_addr);
492 sgl->lpage = NULL;
493 sgl->lpage_dma_addr = 0;
495 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
496 sgl->sgl_dma_addr);
498 sgl->sgl = NULL;
499 sgl->sgl_dma_addr = 0x0;
500 sgl->sgl_size = 0;