Lines Matching refs:mc

81 void delete_MC_Chunk (MC_Chunk* mc);
99 static void add_to_freed_queue ( MC_Chunk* mc )
102 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
109 mc->next = NULL;
110 freed_list_end[l] = freed_list_start[l] = mc;
113 if (mc->szB >= MC_(clo_freelist_vol)) {
114 mc->next = freed_list_start[l];
115 freed_list_start[l] = mc;
117 mc->next = NULL;
118 freed_list_end[l]->next = mc;
119 freed_list_end[l] = mc;
122 VG_(free_queue_volume) += (Long)mc->szB;
175 MC_Chunk* mc;
176 mc = freed_list_start[i];
177 while (mc) {
178 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
180 return mc;
181 mc = mc->next;
193 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
194 mc->data = p;
195 mc->szB = szB;
196 mc->allockind = kind;
198 case 2: mc->where[1] = 0; // fallback to 1
199 case 1: mc->where[0] = 0; // fallback to 0
203 MC_(set_allocated_at) (tid, mc);
211 the mc->data field isn't visible to the leak checker. If memory
214 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
217 return mc;
221 void delete_MC_Chunk (MC_Chunk* mc)
223 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
226 // True if mc is in the given block list.
227 static Bool in_block_list (VgHashTable block_list, MC_Chunk* mc)
229 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
231 tl_assert (found_mc->data == mc->data);
241 if (found_mc->szB != mc->szB
242 || found_mc->allockind != mc->allockind)
244 tl_assert (found_mc == mc);
250 // True if mc is a live block (not yet freed).
251 static Bool live_block (MC_Chunk* mc)
253 if (mc->allockind == MC_AllocCustom) {
257 if ( in_block_list (mp->chunks, mc) )
264 return in_block_list ( MC_(malloc_list), mc );
267 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
271 case KS_alloc: return mc->where[0];
273 case KS_alloc_then_free: return (live_block(mc) ?
274 mc->where[0] : VG_(null_ExeContext) ());
275 case KS_alloc_and_free: return mc->where[0];
280 ExeContext* MC_(freed_at) (MC_Chunk* mc)
285 case KS_free: return (mc->where[0] ?
286 mc->where[0] : VG_(null_ExeContext) ());
287 case KS_alloc_then_free: return (live_block(mc) ?
288 VG_(null_ExeContext) () : mc->where[0]);
289 case KS_alloc_and_free: return (mc->where[1] ?
290 mc->where[1] : VG_(null_ExeContext) ());
295 void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
305 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
308 void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
319 mc->where[pos] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
370 MC_Chunk* mc;
393 mc = create_MC_Chunk (tid, p, szB, kind);
394 VG_(HT_add_node)( table, mc );
399 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
458 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
462 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
464 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
469 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
472 MC_(set_freed_at) (tid, mc);
474 add_to_freed_queue ( mc );
483 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
491 VG_(HT_add_node)( MC_(malloc_list), mc );
492 MC_(record_freemismatch_error) ( tid, mc );
493 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
499 MC_Chunk* mc;
503 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504 if (mc == NULL) {
508 if (kind != mc->allockind) {
509 tl_assert(p == mc->data);
510 record_freemismatch_error ( tid, mc );
512 die_and_free_mem ( tid, mc, rzB );
581 // Now insert the new mc (with a new 'data' field) into malloc_list.
650 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
654 return ( mc ? mc->szB : 0 );
664 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
675 mc->szB = newSizeB;
716 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
737 MC_Chunk* mc;
757 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
760 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
886 MC_Chunk* mc;
901 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
902 if (mc == NULL) {
910 pool, addr, mc->szB + 0UL);
913 die_and_free_mem ( tid, mc, mp->rzB );
921 MC_Chunk* mc;
950 mc = (MC_Chunk*) chunks[i];
952 lo = mc->data;
953 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
970 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
971 MC_(record_free_error)(tid, (Addr)mc->data);
976 die_and_free_mem ( tid, mc, mp->rzB );
985 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
986 MC_(record_free_error)(tid, (Addr)mc->data);
992 if (mc->data < addr) {
993 min = mc->data;
997 lo = mc->data;
1000 if (mc->data + szB > addr + szB) {
1001 max = mc->data + szB;
1005 hi = mc->data + szB;
1020 mc->data = lo;
1021 mc->szB = (UInt) (hi - lo);
1022 VG_(HT_add_node)( mp->chunks, mc );
1057 MC_Chunk* mc;
1074 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1075 if (mc == NULL) {
1080 mc->data = addrB;
1081 mc->szB = szB;
1082 VG_(HT_add_node)( mp->chunks, mc );
1105 MC_Chunk* mc;
1116 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1118 nbytes += (ULong)mc->szB;