Searched refs:chunk (Results 1 - 25 of 62) sorted by relevance

123

/drivers/s390/cio/
H A Ditcw.c116 * to the placement of the data chunk in memory, and a further
181 void *chunk; local
193 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
194 if (IS_ERR(chunk))
195 return chunk;
196 itcw = chunk;
209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
210 if (IS_ERR(chunk))
211 return chunk;
212 itcw->tcw = chunk;
[all...]
/drivers/infiniband/core/
H A Dumem.c52 struct ib_umem_chunk *chunk, *tmp; local
55 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
56 ib_dma_unmap_sg(dev, chunk->page_list,
57 chunk->nents, DMA_BIDIRECTIONAL);
58 for (i = 0; i < chunk->nents; ++i) {
59 struct page *page = sg_page(&chunk->page_list[i]);
66 kfree(chunk);
84 struct ib_umem_chunk *chunk; local
166 chunk = kmalloc(sizeof *chunk
281 struct ib_umem_chunk *chunk; local
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c47 * per chunk.
54 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) argument
58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(sg_page(&chunk->mem[i]),
64 get_order(chunk->mem[i].length));
67 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) argument
71 for (i = 0; i < chunk
79 struct mlx4_icm_chunk *chunk, *tmp; local
126 struct mlx4_icm_chunk *chunk = NULL; local
286 struct mlx4_icm_chunk *chunk; local
[all...]
H A Dicm.h64 struct mlx4_icm_chunk *chunk; member in struct:mlx4_icm_iter
96 iter->chunk = list_empty(&icm->chunk_list) ?
104 return !iter->chunk;
109 if (++iter->page_idx >= iter->chunk->nsg) {
110 if (iter->chunk->list.next == &iter->icm->chunk_list) {
111 iter->chunk = NULL;
115 iter->chunk = list_entry(iter->chunk->list.next,
123 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
128 return sg_dma_len(&iter->chunk
[all...]
H A Dmr.c559 int chunk; local
568 chunk = min_t(int, max_mtts_first_page, npages);
571 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
574 npages -= chunk;
575 start_index += chunk;
576 page_list += chunk;
578 chunk = min_t(int, mtts_per_page, npages);
588 int chunk; local
602 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
606 for (i = 0; i < chunk;
[all...]
/drivers/staging/tidspbridge/pmgr/
H A Ddmm.c197 * Add a mapping block to the reserved chunk. DMM assumes that this block
205 struct map_page *chunk; local
209 /* Find the Reserved memory chunk containing the DSP block to
211 chunk = (struct map_page *)get_region(addr);
212 if (chunk != NULL) {
214 chunk->mapped = true;
215 chunk->mapped_size = (size / PG_SIZE4K);
221 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
229 * Reserve a chunk o
286 struct map_page *chunk; local
316 struct map_page *chunk; local
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.h76 struct mthca_icm_chunk *chunk; member in struct:mthca_icm_iter
103 iter->chunk = list_empty(&icm->chunk_list) ?
111 return !iter->chunk;
116 if (++iter->page_idx >= iter->chunk->nsg) {
117 if (iter->chunk->list.next == &iter->icm->chunk_list) {
118 iter->chunk = NULL;
122 iter->chunk = list_entry(iter->chunk->list.next,
130 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
135 return sg_dma_len(&iter->chunk
[all...]
H A Dmthca_memfree.c48 * per chunk.
64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
68 if (chunk->nsg > 0)
69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
73 __free_pages(sg_page(&chunk->mem[i]),
74 get_order(chunk->mem[i].length));
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) argument
81 for (i = 0; i < chunk
90 struct mthca_icm_chunk *chunk, *tmp; local
141 struct mthca_icm_chunk *chunk = NULL; local
281 struct mthca_icm_chunk *chunk; local
[all...]
/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c48 struct ib_umem_chunk *chunk; local
76 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
77 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
H A Dmr.c83 struct ib_umem_chunk *chunk; local
95 list_for_each_entry(chunk, &umem->chunk_list, list)
96 for (j = 0; j < chunk->nmap; ++j) {
97 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
99 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
/drivers/md/
H A Ddm-exception-store.h23 * An exception is used where an old chunk of data has been
27 * chunk within the device.
82 * still-to-be-merged chunk and returns the number of
141 static inline chunk_t dm_chunk_number(chunk_t chunk) argument
143 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
168 static inline chunk_t dm_chunk_number(chunk_t chunk) argument
170 return chunk;
H A Ddm-snap.c141 chunk_t chunk)
143 return chunk << store->chunk_shift;
175 * For writing a complete chunk, bypassing the copy.
191 chunk_t chunk; member in struct:dm_snap_tracked_chunk
197 chunk_t chunk)
203 c->chunk = chunk;
207 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
225 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) argument
234 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], nod
140 chunk_to_sector(struct dm_exception_store *store, chunk_t chunk) argument
196 track_chunk(struct dm_snapshot *s, chunk_t chunk) argument
250 __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) argument
560 exception_hash(struct dm_exception_table *et, chunk_t chunk) argument
574 dm_lookup_exception(struct dm_exception_table *et, chunk_t chunk) argument
1520 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) argument
1539 __find_pending_exception(struct dm_snapshot *s, struct dm_snap_pending_exception *pe, chunk_t chunk) argument
1566 remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) argument
1583 chunk_t chunk; local
1697 chunk_t chunk; local
1937 chunk_t chunk; local
[all...]
H A Dbitmap.c9 * - changes to allow various bitmap chunk sizes
682 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
686 static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk) argument
689 chunk += sizeof(bitmap_super_t) << 3;
690 return chunk >> PAGE_BIT_SHIFT;
694 static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk) argument
697 chunk += sizeof(bitmap_super_t) << 3;
698 return chunk & (PAGE_BITS - 1);
709 unsigned long chunk)
711 if (file_page_index(bitmap, chunk) >
708 filemap_get_page(struct bitmap *bitmap, unsigned long chunk) argument
838 unsigned long chunk = block >> bitmap->chunkshift; local
1084 sector_t chunk = offset >> bitmap->chunkshift; local
1253 sector_t chunk = offset >> bitmap->chunkshift; variable
1567 unsigned long chunk; local
[all...]
/drivers/s390/block/
H A Ddasd_int.h556 struct dasd_mchunk *chunk; local
559 chunk = (struct dasd_mchunk *) mem;
560 chunk->size = size - sizeof(struct dasd_mchunk);
561 list_add(&chunk->list, chunk_list);
567 struct dasd_mchunk *chunk, *tmp; local
570 list_for_each_entry(chunk, chunk_list, list) {
571 if (chunk->size < size)
573 if (chunk->size > size + sizeof(struct dasd_mchunk)) {
574 char *endaddr = (char *) (chunk + 1) + chunk
589 struct dasd_mchunk *chunk, *tmp; local
[all...]
/drivers/staging/media/go7007/
H A Dgo7007-fw.c381 int size = 0, i, off = 0, chunk; local
394 chunk = mjpeg_frame_header(go, buf + size, 1);
395 memmove(buf + size, buf + size + 80, chunk - 80);
396 size += chunk - 80;
398 for (i = 0; i < size; i += chunk * 2) {
406 chunk = 28;
407 if (mem + chunk > 0x4000)
408 chunk = 0x4000 - mem;
409 if (i + 2 * chunk > size)
410 chunk
651 int i, off = 0, chunk; local
838 int i, off = 0, chunk; local
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dresource.c342 PDBG("%s failed to add PBL chunk (%x/%x)\n",
353 PDBG("%s added PBL chunk (%x/%x)\n",
404 PDBG("%s failed to add RQT chunk (%x/%x)\n",
414 PDBG("%s added RQT chunk (%x/%x)\n",
447 unsigned start, chunk, top; local
454 chunk = rdev->lldi.vr->ocq.size;
455 top = start + chunk;
458 chunk = min(top - start + 1, chunk);
459 if (gen_pool_add(rdev->ocqp_pool, start, chunk,
[all...]
/drivers/net/ethernet/sfc/
H A Dmtd.c468 size_t chunk; local
472 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
474 buffer, chunk);
477 offset += chunk;
478 buffer += chunk;
492 size_t chunk = part->mtd.erasesize; local
507 chunk);
510 offset += chunk;
524 size_t chunk; local
535 chunk
[all...]
/drivers/staging/line6/
H A Dmidi.c50 unsigned char chunk[line6->max_packet_size]; local
57 done = snd_rawmidi_transmit_peek(substream, chunk, req);
63 line6_write_hexdump(line6, 's', chunk, done);
65 line6_midibuf_write(mb, chunk, done);
70 done = line6_midibuf_read(mb, chunk, line6->max_packet_size);
79 send_midi_async(line6, chunk, done);
/drivers/atm/
H A Dfore200e.h561 /* chunk of memory */
563 typedef struct chunk { struct
564 void* alloc_addr; /* base address of allocated chunk */
565 void* align_addr; /* base address of aligned chunk */
566 dma_addr_t dma_addr; /* DMA address of aligned chunk */
568 u32 alloc_size; /* length of allocated chunk */
569 u32 align_size; /* length of aligned chunk */
581 struct chunk data; /* data buffer */
603 struct chunk status; /* array of completion status */
613 struct chunk tp
[all...]
H A Dfore200e.c174 /* allocate and align a chunk of memory intended to hold the data behing exchanged
178 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) argument
185 chunk->alloc_size = size + alignment;
186 chunk->align_size = size;
187 chunk->direction = direction;
189 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
190 if (chunk->alloc_addr == NULL)
194 offset = FORE200E_ALIGN(chunk
207 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) argument
468 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int nbr, int alignment) argument
489 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) argument
719 fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, int size, int nbr, int alignment) argument
739 fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_mr.c191 struct ib_umem_chunk *chunk; local
206 list_for_each_entry(chunk, &umem->chunk_list, list)
207 n += chunk->nents;
227 list_for_each_entry(chunk, &umem->chunk_list, list) {
228 for (i = 0; i < chunk->nents; i++) {
231 vaddr = page_address(sg_page(&chunk->page_list[i]));
/drivers/infiniband/hw/qib/
H A Dqib_mr.c201 struct ib_umem_chunk *chunk; local
216 list_for_each_entry(chunk, &umem->chunk_list, list)
217 n += chunk->nents;
238 list_for_each_entry(chunk, &umem->chunk_list, list) {
239 for (i = 0; i < chunk->nents; i++) {
242 vaddr = page_address(sg_page(&chunk->page_list[i]));
/drivers/net/wireless/wl12xx/
H A Dboot.c105 u8 *p, *chunk; local
119 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
120 if (!chunk) {
121 wl1271_error("allocation for firmware upload chunk failed");
129 /* 10.1 set partition limit and chunk num */
144 /* 10.3 upload the chunk */
147 memcpy(chunk, p, CHUNK_SIZE);
148 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
150 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
155 /* 10.4 upload the last chunk */
[all...]
/drivers/crypto/
H A Dn2_core.c677 struct n2_crypto_chunk chunk; member in struct:n2_request_context
880 struct n2_crypto_chunk *chunk; local
893 chunk = &rctx->chunk;
894 INIT_LIST_HEAD(&chunk->entry);
896 chunk->iv_paddr = 0UL;
897 chunk->arr_len = 0;
898 chunk->dest_paddr = 0UL;
916 if (chunk->arr_len != 0) {
920 chunk
[all...]
/drivers/net/wireless/libertas/
H A Dif_sdio.c344 u16 size, type, chunk; local
368 chunk = sdio_align_size(card->func, size);
370 ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk);
374 chunk = card->buffer[0] | (card->buffer[1] << 8);
378 (int)type, (int)chunk);
380 if (chunk > size) {
382 (int)chunk, (int)size);
387 if (chunk < size) {
389 (int)chunk, (int)size);
394 ret = if_sdio_handle_cmd(card, card->buffer + 4, chunk
[all...]

Completed in 466 milliseconds

123