Searched refs:cache (Results 1 - 25 of 127) sorted by relevance

123456

/drivers/acpi/acpica/
H A Dutcache.c3 * Module Name: utcache - local cache allocation routines
55 * PARAMETERS: cache_name - Ascii name for the cache
57 * max_depth - Maximum depth of the cache (in objects)
58 * return_cache - Where the new cache object is returned
62 * DESCRIPTION: Create a cache object
70 struct acpi_memory_list *cache; local
78 /* Create the cache object */
80 cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
81 if (!cache) {
85 /* Populate the cache objec
108 acpi_os_purge_cache(struct acpi_memory_list * cache) argument
154 acpi_os_delete_cache(struct acpi_memory_list * cache) argument
188 acpi_os_release_object(struct acpi_memory_list * cache, void *object) argument
243 acpi_os_acquire_object(struct acpi_memory_list *cache) argument
[all...]
H A Duttrack.c82 * PARAMETERS: cache_name - Ascii name for the cache
84 * return_cache - Where the new cache object is returned
96 struct acpi_memory_list *cache; local
98 cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
99 if (!cache) {
103 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
105 cache->list_name = list_name;
106 cache->object_size = object_size;
108 *return_cache = cache;
610 /* Ignore allocated objects that are in a cache */
[all...]
/drivers/md/
H A Ddm-cache-target.c10 #include "dm-cache-metadata.h"
20 #define DM_MSG_PREFIX "cache"
23 "A percentage of time allocated for copying to and/or from cache");
31 * cblock: index of a cache block
32 * promotion: movement of a block from origin to cache
33 * demotion: movement of a block from cache to origin
34 * migration: movement of a block between the origin and cache device,
104 * The block size of the device holding cache data must be
111 * FIXME: the cache is read/write for the time being.
121 * dirty. If you lose the cache devic
178 struct cache { struct
298 struct cache *cache; member in struct:per_bio_data
305 struct cache *cache; member in struct:dm_cache_migration
334 wake_worker(struct cache *cache) argument
341 alloc_prison_cell(struct cache *cache) argument
347 free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) argument
352 prealloc_data_structs(struct cache *cache, struct prealloc *p) argument
375 prealloc_free_structs(struct cache *cache, struct prealloc *p) argument
450 bio_detain(struct cache *cache, dm_oblock_t oblock, struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, cell_free_fn free_fn, void *free_context, struct dm_bio_prison_cell **cell_result) argument
466 get_cell(struct cache *cache, dm_oblock_t oblock, struct prealloc *structs, struct dm_bio_prison_cell **cell_result) argument
487 is_dirty(struct cache *cache, dm_cblock_t b) argument
492 set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) argument
500 clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) argument
511 block_size_is_power_of_two(struct cache *cache) argument
527 set_discard(struct cache *cache, dm_oblock_t b) argument
538 clear_discard(struct cache *cache, dm_oblock_t b) argument
547 is_discarded(struct cache *cache, dm_oblock_t b) argument
559 is_discarded_oblock(struct cache *cache, dm_oblock_t b) argument
573 load_stats(struct cache *cache) argument
584 save_stats(struct cache *cache) argument
621 get_per_bio_data_size(struct cache *cache) argument
647 remap_to_origin(struct cache *cache, struct bio *bio) argument
652 remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) argument
669 check_if_tick_bio_needed(struct cache *cache, struct bio *bio) argument
684 remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, dm_oblock_t oblock) argument
693 remap_to_cache_dirty(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) argument
704 get_bio_block(struct cache *cache, struct bio *bio) argument
716 bio_triggers_commit(struct cache *cache, struct bio *bio) argument
725 inc_ds(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) argument
737 issue(struct cache *cache, struct bio *bio) argument
756 inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) argument
762 defer_writethrough_bio(struct cache *cache, struct bio *bio) argument
801 remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) argument
825 inc_nr_migrations(struct cache *cache) argument
830 dec_nr_migrations(struct cache *cache) argument
840 __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) argument
848 cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) argument
862 struct cache *cache = mg->cache; local
869 struct cache *cache = mg->cache; local
895 struct cache *cache = mg->cache; local
931 struct cache *cache = mg->cache; local
969 struct cache *cache = mg->cache; local
985 struct cache *cache = mg->cache; local
1014 struct cache *cache = mg->cache; local
1048 bio_writes_complete_block(struct cache *cache, struct bio *bio) argument
1063 struct cache *cache = mg->cache; local
1090 process_migrations(struct cache *cache, struct list_head *head, void (*fn)(struct dm_cache_migration *)) argument
1114 struct cache *cache = mg->cache; local
1123 queue_quiesced_migrations(struct cache *cache, struct list_head *work) argument
1136 check_for_quiesced_migrations(struct cache *cache, struct per_bio_data *pb) argument
1157 promote(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) argument
1180 writeback(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) argument
1203 demote_then_promote(struct cache *cache, struct prealloc *structs, dm_oblock_t old_oblock, dm_oblock_t new_oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *old_ocell, struct dm_bio_prison_cell *new_ocell) argument
1233 invalidate(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) argument
1259 defer_bio(struct cache *cache, struct bio *bio) argument
1270 process_flush_bio(struct cache *cache, struct bio *bio) argument
1301 process_discard_bio(struct cache *cache, struct bio *bio) argument
1316 spare_migration_bandwidth(struct cache *cache) argument
1323 inc_hit_counter(struct cache *cache, struct bio *bio) argument
1329 inc_miss_counter(struct cache *cache, struct bio *bio) argument
1335 process_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) argument
1449 need_commit_due_to_time(struct cache *cache) argument
1455 commit_if_needed(struct cache *cache) argument
1470 process_deferred_bios(struct cache *cache) argument
1511 process_deferred_flush_bios(struct cache *cache, bool submit_bios) argument
1531 process_deferred_writethrough_bios(struct cache *cache) argument
1551 writeback_some_dirty_blocks(struct cache *cache) argument
1586 process_invalidation_request(struct cache *cache, struct invalidation_request *req) argument
1619 process_invalidation_requests(struct cache *cache) argument
1636 is_quiescing(struct cache *cache) argument
1641 ack_quiescing(struct cache *cache) argument
1649 wait_for_quiescing_ack(struct cache *cache) argument
1654 start_quiescing(struct cache *cache) argument
1660 stop_quiescing(struct cache *cache) argument
1666 wait_for_migrations(struct cache *cache) argument
1671 stop_worker(struct cache *cache) argument
1677 requeue_deferred_io(struct cache *cache) argument
1690 more_work(struct cache *cache) argument
1708 struct cache *cache = container_of(ws, struct cache, worker); local
1746 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); local
1762 struct cache *cache = container_of(cb, struct cache, callbacks); local
1776 destroy(struct cache *cache) argument
1828 struct cache *cache = ti->private; local
2118 process_config_option(struct cache *cache, const char *key, const char *value) argument
2133 set_config_value(struct cache *cache, const char *key, const char *value) argument
2146 set_config_values(struct cache *cache, int argc, const char **argv) argument
2167 create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) argument
2189 struct cache *cache; local
2378 copy_ctr_args(struct cache *cache, int argc, const char **argv) argument
2406 struct cache *cache = NULL; local
2436 __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) argument
2553 struct cache *cache = ti->private; local
2566 struct cache *cache = ti->private; local
2584 write_dirty_bitset(struct cache *cache) argument
2598 write_discard_bitset(struct cache *cache) argument
2622 sync_metadata(struct cache *cache) argument
2654 struct cache *cache = ti->private; local
2669 struct cache *cache = context; local
2686 struct cache *cache = context; local
2696 get_cache_dev_size(struct cache *cache) argument
2703 can_resize(struct cache *cache, dm_cblock_t new_size) argument
2723 resize_cache_dev(struct cache *cache, dm_cblock_t new_size) argument
2741 struct cache *cache = ti->private; local
2789 struct cache *cache = ti->private; local
2815 struct cache *cache = ti->private; local
2908 parse_cblock_range(struct cache *cache, const char *str, struct cblock_range *result) argument
2945 validate_cblock_range(struct cache *cache, struct cblock_range *range) argument
2969 request_invalidation(struct cache *cache, struct cblock_range *range) argument
2988 process_invalidate_cblocks_message(struct cache *cache, unsigned count, const char **cblock_ranges) argument
3030 struct cache *cache = ti->private; local
3048 struct cache *cache = ti->private; local
3067 struct cache *cache = ti->private; local
3077 set_discard_limits(struct cache *cache, struct queue_limits *limits) argument
3088 struct cache *cache = ti->private; local
[all...]
H A DMakefile14 dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
15 dm-cache-mq-y += dm-cache-policy-mq.o
16 dm-cache-cleaner-y += dm-cache-policy-cleaner.o
54 obj-$(CONFIG_DM_CACHE) += dm-cache.o
55 obj-$(CONFIG_DM_CACHE_MQ) += dm-cache
[all...]
/drivers/base/regmap/
H A Dregcache-flat.c2 * Register cache access API - flat caching support
22 unsigned int *cache; local
24 map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
26 if (!map->cache)
29 cache = map->cache;
32 cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
39 kfree(map->cache);
40 map->cache = NULL;
48 unsigned int *cache local
58 unsigned int *cache = map->cache; local
[all...]
H A Dregcache-rbtree.c2 * Register cache access API - rbtree caching support
69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
140 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
205 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
206 if (!map->cache)
209 rbtree_ctx = map->cache;
235 rbtree_ctx = map->cache;
251 kfree(map->cache);
252 map->cache = NULL;
383 rbtree_ctx = map->cache;
[all...]
H A Dregcache-lzo.c2 * Register cache access API - LZO caching support
142 map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
144 if (!map->cache)
146 lzo_blocks = map->cache;
149 * allocate a bitmap to be used when syncing the cache with
209 lzo_blocks = map->cache;
215 * the pointer to the bitmap used for syncing the cache
230 map->cache = NULL;
248 lzo_blocks = map->cache;
262 /* fetch the value from the cache */
[all...]
H A Dregcache.c2 * Register cache access API
41 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
43 /* Bypass the cache access till data read from HW*/
125 map->cache = NULL;
146 /* Some devices such as PMICs don't have cache defaults,
148 * crafting the cache defaults by hand.
159 dev_dbg(map->dev, "Initializing %s cache\n",
187 dev_dbg(map->dev, "Destroying %s cache\n",
194 * regcache_read: Fetch the value of a given register from the cache.
225 * regcache_write: Set the value of a given register in the cache
511 u8 *cache = base; local
516 u16 *cache = base; local
521 u32 *cache = base; local
544 const u8 *cache = base; local
548 const u16 *cache = base; local
552 const u32 *cache = base; local
[all...]
/drivers/staging/lustre/lustre/fld/
H A Dfld_cache.c62 * create fld cache.
67 struct fld_cache *cache; local
72 OBD_ALLOC_PTR(cache);
73 if (cache == NULL)
76 INIT_LIST_HEAD(&cache->fci_entries_head);
77 INIT_LIST_HEAD(&cache->fci_lru);
79 cache->fci_cache_count = 0;
80 rwlock_init(&cache->fci_lock);
82 strlcpy(cache->fci_name, name,
83 sizeof(cache
100 fld_cache_fini(struct fld_cache *cache) argument
125 fld_cache_entry_delete(struct fld_cache *cache, struct fld_cache_entry *node) argument
137 fld_fix_new_list(struct fld_cache *cache) argument
201 fld_cache_entry_add(struct fld_cache *cache, struct fld_cache_entry *f_new, struct list_head *pos) argument
216 fld_cache_shrink(struct fld_cache *cache) argument
247 fld_cache_flush(struct fld_cache *cache) argument
260 fld_cache_punch_hole(struct fld_cache *cache, struct fld_cache_entry *f_curr, struct fld_cache_entry *f_new) argument
299 fld_cache_overlap_handle(struct fld_cache *cache, struct fld_cache_entry *f_curr, struct fld_cache_entry *f_new) argument
381 fld_cache_insert_nolock(struct fld_cache *cache, struct fld_cache_entry *f_new) argument
429 fld_cache_insert(struct fld_cache *cache, const struct lu_seq_range *range) argument
448 fld_cache_delete_nolock(struct fld_cache *cache, const struct lu_seq_range *range) argument
471 fld_cache_delete(struct fld_cache *cache, const struct lu_seq_range *range) argument
480 fld_cache_entry_lookup_nolock(struct fld_cache *cache, struct lu_seq_range *range) argument
504 fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range) argument
517 fld_cache_lookup(struct fld_cache *cache, const u64 seq, struct lu_seq_range *range) argument
[all...]
H A Dfld_internal.h77 * fld cache entries are sorted on range->lsr_start field. */
125 /* 4M of FLD cache will not hurt client a lot. */
128 /* 1M of FLD cache will not hurt client a lot. */
153 void fld_cache_fini(struct fld_cache *cache);
155 void fld_cache_flush(struct fld_cache *cache);
157 int fld_cache_insert(struct fld_cache *cache,
163 int fld_cache_insert_nolock(struct fld_cache *cache,
165 void fld_cache_delete(struct fld_cache *cache,
167 void fld_cache_delete_nolock(struct fld_cache *cache,
169 int fld_cache_lookup(struct fld_cache *cache,
[all...]
/drivers/infiniband/core/
H A Dcache.c77 struct ib_gid_cache *cache; local
84 read_lock_irqsave(&device->cache.lock, flags);
86 cache = device->cache.gid_cache[port_num - start_port(device)];
88 if (index < 0 || index >= cache->table_len)
91 *gid = cache->table[index];
93 read_unlock_irqrestore(&device->cache.lock, flags);
104 struct ib_gid_cache *cache; local
113 read_lock_irqsave(&device->cache.lock, flags);
116 cache
139 struct ib_pkey_cache *cache; local
166 struct ib_pkey_cache *cache; local
207 struct ib_pkey_cache *cache; local
[all...]
/drivers/video/fbdev/
H A Dsh_mobile_meram.c110 * @regs: Registers cache
114 * @cache_unit: Bytes to cache per ICB
133 struct sh_mobile_meram_icb *cache; member in struct:sh_mobile_meram_fb_plane
145 * @regs: Registers cache
213 * LCDC cache planes allocation, init, cleanup and free
227 plane->cache = &priv->icbs[idx];
239 __set_bit(plane->cache->index, &priv->used_icb);
255 __clear_bit(plane->cache->index, &priv->used_icb);
269 struct sh_mobile_meram_fb_cache *cache,
273 struct sh_mobile_meram_icb *icb = cache
268 meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, unsigned long base_addr_y, unsigned long base_addr_c) argument
296 meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, struct sh_mobile_meram_fb_cache *cache, unsigned long *icb_addr_y, unsigned long *icb_addr_c) argument
432 struct sh_mobile_meram_fb_cache *cache; local
471 struct sh_mobile_meram_fb_cache *cache; local
529 struct sh_mobile_meram_fb_cache *cache = data; local
556 struct sh_mobile_meram_fb_cache *cache = data; local
[all...]
/drivers/power/
H A Dbq27x00_battery.c110 struct bq27x00_reg_cache cache; member in struct:bq27x00_device_info
437 struct bq27x00_reg_cache cache = {0, }; local
443 cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, flags_1b);
444 if ((cache.flags & 0xff) == 0xff)
446 cache.flags = -1;
447 if (cache.flags >= 0) {
449 && (cache.flags & BQ27000_FLAG_CI)) {
451 cache.capacity = -ENODATA;
452 cache.energy = -ENODATA;
453 cache
[all...]
/drivers/block/
H A Dps3vram.c83 struct ps3vram_cache cache; member in struct:ps3vram_priv
317 struct ps3vram_cache *cache = &priv->cache; local
319 if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY))
323 cache->tags[entry].address);
324 if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size,
325 cache->tags[entry].address, DMA_PAGE_SIZE,
326 cache->page_size / DMA_PAGE_SIZE) < 0) {
329 entry * cache->page_size, cache
339 struct ps3vram_cache *cache = &priv->cache; local
359 struct ps3vram_cache *cache = &priv->cache; local
373 struct ps3vram_cache *cache = &priv->cache; local
[all...]
/drivers/infiniband/hw/mlx5/
H A Dmr.c59 struct mlx5_mr_cache *cache = &dev->cache; local
61 if (order < cache->ent[0].order)
64 return order - cache->ent[0].order;
71 struct mlx5_mr_cache *cache = &dev->cache; local
73 struct mlx5_cache_ent *ent = &cache->ent[c];
105 cache->last_add = jiffies;
123 struct mlx5_mr_cache *cache = &dev->cache; local
174 struct mlx5_mr_cache *cache = &dev->cache; local
327 someone_adding(struct mlx5_mr_cache *cache) argument
342 struct mlx5_mr_cache *cache = &dev->cache; local
397 struct mlx5_mr_cache *cache = &dev->cache; local
441 struct mlx5_mr_cache *cache = &dev->cache; local
465 struct mlx5_mr_cache *cache = &dev->cache; local
492 struct mlx5_mr_cache *cache = &dev->cache; local
551 struct mlx5_mr_cache *cache = &dev->cache; local
[all...]
/drivers/gpu/drm/nouveau/core/engine/fifo/
H A Dnv50.h13 struct nouveau_gpuobj *cache; member in struct:nv50_fifo_base
/drivers/md/bcache/
H A Dbcache.h7 * Bcache mostly works with cache sets, cache devices, and backing devices.
9 * Support for multiple cache devices hasn't quite been finished off yet, but
10 * it's about 95% plumbed through. A cache set and its cache devices is sort of
12 * about individual cache devices, the main abstraction is the cache set.
14 * Multiple cache devices is intended to give us the ability to mirror dirty
18 * cache set. When you register a newly formatted backing device it'll come up
20 * a cache se
395 struct cache { struct
501 struct cache *cache[MAX_CACHES_PER_SET]; member in struct:cache_set
[all...]
H A Dalloc.c49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * out of a cache set.
74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
86 struct cache *ca;
129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
178 static void invalidate_buckets_lru(struct cache *ca)
215 static void invalidate_buckets_fifo(struct cache *ca)
238 static void invalidate_buckets_random(struct cache *c
[all...]
/drivers/xen/
H A Dfeatures.c9 #include <linux/cache.h>
/drivers/gpio/
H A Dgpio-mcp23s08.c69 u16 cache[11]; member in struct:mcp23s08
196 if ((n + reg) > sizeof(mcp->cache))
238 if ((n + reg) > sizeof(mcp->cache))
275 mcp->cache[MCP_IODIR] |= (1 << offset);
276 status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
293 mcp->cache[MCP_GPIO] = status;
302 unsigned olat = mcp->cache[MCP_OLAT];
308 mcp->cache[MCP_OLAT] = olat;
332 mcp->cache[MCP_IODIR] &= ~mask;
333 status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODI
[all...]
/drivers/net/wireless/cw1200/
H A Dtxrx.c48 /* TX policy cache implementation */
216 static int tx_policy_find(struct tx_policy_cache *cache, argument
220 * the cache.
225 list_for_each_entry(it, &cache->used, link) {
227 return it - cache->cache;
230 list_for_each_entry(it, &cache->free, link) {
232 return it - cache->cache;
237 static inline void tx_policy_use(struct tx_policy_cache *cache, argument
244 tx_policy_release(struct tx_policy_cache *cache, struct tx_policy_cache_entry *entry) argument
256 struct tx_policy_cache *cache = &priv->tx_policy_cache; local
286 struct tx_policy_cache *cache = &priv->tx_policy_cache; local
304 struct tx_policy_cache *cache = &priv->tx_policy_cache; local
343 struct tx_policy_cache *cache = &priv->tx_policy_cache; local
357 struct tx_policy_cache *cache = &priv->tx_policy_cache; local
[all...]
/drivers/acpi/apei/
H A Dghes.c528 struct ghes_estatus_cache *cache; local
534 cache = rcu_dereference(ghes_estatus_caches[i]);
535 if (cache == NULL)
537 if (len != cache->estatus_len)
539 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
542 atomic_inc(&cache->count);
544 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
558 struct ghes_estatus_cache *cache; local
568 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
569 if (!cache) {
582 ghes_estatus_cache_free(struct ghes_estatus_cache *cache) argument
594 struct ghes_estatus_cache *cache; local
606 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; local
[all...]
/drivers/gpu/drm/shmobile/
H A Dshmob_drm_crtc.h30 void *cache; member in struct:shmob_drm_crtc
/drivers/macintosh/
H A Dwindfarm_smu_sat.c33 /* If the cache is older than 800ms we'll refetch it */
40 unsigned long last_read; /* jiffies when cache last updated */
41 u8 cache[16]; member in struct:wf_sat
126 /* refresh the cache */
131 err = i2c_smbus_read_i2c_block_data(sat->i2c, 0x3f, 16, sat->cache);
140 DBG(" %.2x", sat->cache[i]);
165 val = ((sat->cache[i] << 8) + sat->cache[i+1]) << sens->shift;
169 val = (val * ((sat->cache[i] << 8) + sat->cache[
[all...]
/drivers/hwmon/
H A Dapplesmc.c142 struct applesmc_entry *cache; /* cached key entries */ member in struct:applesmc_registers
342 struct applesmc_entry *cache = &smcreg.cache[index]; local
347 if (cache->valid)
348 return cache;
352 if (cache->valid)
362 memcpy(cache->key, key, 4);
363 cache->len = info[0];
364 memcpy(cache->type, &info[1], 4);
365 cache
[all...]

Completed in 6622 milliseconds

123456