Searched defs:chunk (Results 1 - 25 of 61) sorted by relevance

123

/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c48 struct ib_umem_chunk *chunk; local
76 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
77 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
H A Dmr.c83 struct ib_umem_chunk *chunk; local
95 list_for_each_entry(chunk, &umem->chunk_list, list)
96 for (j = 0; j < chunk->nmap; ++j) {
97 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
99 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.h64 struct mlx4_icm_chunk *chunk; member in struct:mlx4_icm_iter
96 iter->chunk = list_empty(&icm->chunk_list) ?
104 return !iter->chunk;
109 if (++iter->page_idx >= iter->chunk->nsg) {
110 if (iter->chunk->list.next == &iter->icm->chunk_list) {
111 iter->chunk = NULL;
115 iter->chunk = list_entry(iter->chunk->list.next,
123 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
128 return sg_dma_len(&iter->chunk
[all...]
H A Dicm.c47 * per chunk.
54 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) argument
58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(sg_page(&chunk->mem[i]),
64 get_order(chunk->mem[i].length));
67 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) argument
71 for (i = 0; i < chunk
79 struct mlx4_icm_chunk *chunk, *tmp; local
126 struct mlx4_icm_chunk *chunk = NULL; local
286 struct mlx4_icm_chunk *chunk; local
[all...]
/drivers/zorro/
H A Dzorro.c68 * by the system. Every bit represents a 64K chunk, for a maximum of 8MB
100 u32 chunk = start>>Z2RAM_CHUNKSHIFT; local
102 set_bit(chunk, zorro_unused_z2ram);
104 clear_bit(chunk, zorro_unused_z2ram);
/drivers/infiniband/core/
H A Dumem.c52 struct ib_umem_chunk *chunk, *tmp; local
55 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
56 ib_dma_unmap_sg(dev, chunk->page_list,
57 chunk->nents, DMA_BIDIRECTIONAL);
58 for (i = 0; i < chunk->nents; ++i) {
59 struct page *page = sg_page(&chunk->page_list[i]);
66 kfree(chunk);
84 struct ib_umem_chunk *chunk; local
166 chunk = kmalloc(sizeof *chunk
281 struct ib_umem_chunk *chunk; local
[all...]
/drivers/md/
H A Ddm-exception-store.h23 * An exception is used where an old chunk of data has been
27 * chunk within the device.
82 * still-to-be-merged chunk and returns the number of
141 static inline chunk_t dm_chunk_number(chunk_t chunk) argument
143 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
168 static inline chunk_t dm_chunk_number(chunk_t chunk) argument
170 return chunk;
H A Ddm-stripe.c34 /* stripe chunk size */
94 * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
128 ti->error = "Invalid chunk size";
134 "chunk size";
220 sector_t chunk = offset >> sc->chunk_shift; local
223 *stripe = sector_div(chunk, sc->stripes);
225 *stripe = chunk & sc->stripes_mask;
226 chunk >>= sc->stripes_shift;
229 *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask);
242 *result += sc->chunk_mask + 1; /* next chunk */
[all...]
H A Draid0.c262 * chunk size is a multiple of that sector size
314 sector_t chunk; local
321 /* find the sector offset inside the chunk */
324 /* chunk in zone */
325 chunk = *sector_offset;
326 /* quotient is the chunk in real device*/
327 sector_div(chunk, zone->nb_dev << chunksect_bits);
330 chunk = *sector_offset;
331 sector_div(chunk, chunk_sects * zone->nb_dev);
335 * real sector = chunk i
[all...]
/drivers/s390/cio/
H A Ditcw.c116 * to the placement of the data chunk in memory, and a further
181 void *chunk; local
193 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
194 if (IS_ERR(chunk))
195 return chunk;
196 itcw = chunk;
209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
210 if (IS_ERR(chunk))
211 return chunk;
212 itcw->tcw = chunk;
[all...]
/drivers/gpu/drm/radeon/
H A Dradeon_cs.c38 struct radeon_cs_chunk *chunk; local
45 chunk = &p->chunks[p->chunk_relocs_idx];
47 p->nrelocs = chunk->length_dw / 4;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
/drivers/infiniband/hw/cxgb4/
H A Dresource.c342 PDBG("%s failed to add PBL chunk (%x/%x)\n",
353 PDBG("%s added PBL chunk (%x/%x)\n",
404 PDBG("%s failed to add RQT chunk (%x/%x)\n",
414 PDBG("%s added RQT chunk (%x/%x)\n",
447 unsigned start, chunk, top; local
454 chunk = rdev->lldi.vr->ocq.size;
455 top = start + chunk;
458 chunk = min(top - start + 1, chunk);
459 if (gen_pool_add(rdev->ocqp_pool, start, chunk,
[all...]
H A Dmem.c549 struct ib_umem_chunk *chunk; local
580 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
581 n += chunk->nents;
595 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
596 for (j = 0; j < chunk->nmap; ++j) {
597 len = sg_dma_len(&chunk->page_list[j]) >> shift;
600 &chunk->page_list[j]) +
/drivers/infiniband/hw/ipath/
H A Dipath_mr.c191 struct ib_umem_chunk *chunk; local
206 list_for_each_entry(chunk, &umem->chunk_list, list)
207 n += chunk->nents;
227 list_for_each_entry(chunk, &umem->chunk_list, list) {
228 for (i = 0; i < chunk->nents; i++) {
231 vaddr = page_address(sg_page(&chunk->page_list[i]));
/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.h76 struct mthca_icm_chunk *chunk; member in struct:mthca_icm_iter
103 iter->chunk = list_empty(&icm->chunk_list) ?
111 return !iter->chunk;
116 if (++iter->page_idx >= iter->chunk->nsg) {
117 if (iter->chunk->list.next == &iter->icm->chunk_list) {
118 iter->chunk = NULL;
122 iter->chunk = list_entry(iter->chunk->list.next,
130 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
135 return sg_dma_len(&iter->chunk
[all...]
H A Dmthca_memfree.c48 * per chunk.
64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
68 if (chunk->nsg > 0)
69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
73 __free_pages(sg_page(&chunk->mem[i]),
74 get_order(chunk->mem[i].length));
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
81 for (i = 0; i < chunk
90 struct mthca_icm_chunk *chunk, *tmp; local
141 struct mthca_icm_chunk *chunk = NULL; local
281 struct mthca_icm_chunk *chunk; local
[all...]
H A Dmthca_mr.c362 int chunk; local
369 chunk = min(size, list_len);
372 buffer_list, chunk);
375 buffer_list, chunk);
377 list_len -= chunk;
378 start_index += chunk;
379 buffer_list += chunk;
H A Dmthca_provider.c979 struct ib_umem_chunk *chunk; local
1014 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1015 n += chunk->nents;
1033 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1034 for (j = 0; j < chunk->nmap; ++j) {
1035 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1037 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
/drivers/infiniband/hw/qib/
H A Dqib_mr.c201 struct ib_umem_chunk *chunk; local
216 list_for_each_entry(chunk, &umem->chunk_list, list)
217 n += chunk->nents;
238 list_for_each_entry(chunk, &umem->chunk_list, list) {
239 for (i = 0; i < chunk->nents; i++) {
242 vaddr = page_address(sg_page(&chunk->page_list[i]));
/drivers/net/wireless/wl12xx/
H A Dboot.c105 u8 *p, *chunk; local
119 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
120 if (!chunk) {
121 wl1271_error("allocation for firmware upload chunk failed");
129 /* 10.1 set partition limit and chunk num */
144 /* 10.3 upload the chunk */
147 memcpy(chunk, p, CHUNK_SIZE);
148 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
150 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
155 /* 10.4 upload the last chunk */
[all...]
/drivers/staging/line6/
H A Dmidi.c50 unsigned char chunk[line6->max_packet_size]; local
57 done = snd_rawmidi_transmit_peek(substream, chunk, req);
63 line6_write_hexdump(line6, 's', chunk, done);
65 line6_midibuf_write(mb, chunk, done);
70 done = line6_midibuf_read(mb, chunk, line6->max_packet_size);
79 send_midi_async(line6, chunk, done);
/drivers/staging/tidspbridge/pmgr/
H A Ddmm.c197 * Add a mapping block to the reserved chunk. DMM assumes that this block
205 struct map_page *chunk; local
209 /* Find the Reserved memory chunk containing the DSP block to
211 chunk = (struct map_page *)get_region(addr);
212 if (chunk != NULL) {
214 chunk->mapped = true;
215 chunk->mapped_size = (size / PG_SIZE4K);
221 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
229 * Reserve a chunk o
286 struct map_page *chunk; local
316 struct map_page *chunk; local
[all...]
/drivers/net/ethernet/sfc/
H A Dmtd.c468 size_t chunk; local
472 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
474 buffer, chunk);
477 offset += chunk;
478 buffer += chunk;
492 size_t chunk = part->mtd.erasesize; local
507 chunk);
510 offset += chunk;
524 size_t chunk; local
535 chunk
[all...]
/drivers/usb/
H A Dusb-skeleton.c309 size_t chunk = min(available, count); local
324 * chunk tells us how much shall be copied
329 chunk))
332 rv = chunk;
334 dev->bulk_in_copied += chunk;
341 skel_do_read_io(dev, count - chunk);
/drivers/char/
H A Dmem.c661 size_t chunk = count; local
663 if (chunk > PAGE_SIZE)
664 chunk = PAGE_SIZE; /* Just for latency reasons */
665 unwritten = __clear_user(buf, chunk);
666 written += chunk - unwritten;
671 buf += chunk;
672 count -= chunk;

Completed in 1696 milliseconds

123