1/*-------------------------------------------------------------------------
2 * Vulkan CTS Framework
3 * --------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Memory allocation callback utilities.
22 *//*--------------------------------------------------------------------*/
23
24#include "vkAllocationCallbackUtil.hpp"
25#include "tcuFormatUtil.hpp"
26#include "tcuTestLog.hpp"
27#include "deSTLUtil.hpp"
28#include "deMemory.h"
29
30#include <map>
31
32namespace vk
33{
34
35// System default allocator
36
37static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
38{
39	if (size > 0)
40		return deAlignedMalloc(size, (deUint32)alignment);
41	else
42		return DE_NULL;
43}
44
45static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
46{
47	deAlignedFree(pMem);
48}
49
50static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
51{
52	return deAlignedRealloc(pOriginal, size, alignment);
53}
54
55static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
56{
57}
58
59static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
60{
61}
62
63static const VkAllocationCallbacks s_systemAllocator =
64{
65	DE_NULL,		// pUserData
66	systemAllocate,
67	systemReallocate,
68	systemFree,
69	systemInternalAllocationNotification,
70	systemInternalFreeNotification,
71};
72
73const VkAllocationCallbacks* getSystemAllocator (void)
74{
75	return &s_systemAllocator;
76}
77
78// AllocationCallbacks
79
80static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
81{
82	return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
83}
84
85static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
86{
87	return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
88}
89
90static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
91{
92	reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
93}
94
95static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
96{
97	reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
98}
99
100static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
101{
102	reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
103}
104
105static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
106{
107	const VkAllocationCallbacks callbacks =
108	{
109		reinterpret_cast<void*>(object),
110		allocationCallback,
111		reallocationCallback,
112		freeCallback,
113		internalAllocationNotificationCallback,
114		internalFreeNotificationCallback
115	};
116	return callbacks;
117}
118
119AllocationCallbacks::AllocationCallbacks (void)
120	: m_callbacks(makeCallbacks(this))
121{
122}
123
124AllocationCallbacks::~AllocationCallbacks (void)
125{
126}
127
128// AllocationCallbackRecord
129
130AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
131{
132	AllocationCallbackRecord record;
133
134	record.type							= TYPE_ALLOCATION;
135	record.data.allocation.size			= size;
136	record.data.allocation.alignment	= alignment;
137	record.data.allocation.scope		= scope;
138	record.data.allocation.returnedPtr	= returnedPtr;
139
140	return record;
141}
142
143AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
144{
145	AllocationCallbackRecord record;
146
147	record.type								= TYPE_REALLOCATION;
148	record.data.reallocation.original		= original;
149	record.data.reallocation.size			= size;
150	record.data.reallocation.alignment		= alignment;
151	record.data.reallocation.scope			= scope;
152	record.data.reallocation.returnedPtr	= returnedPtr;
153
154	return record;
155}
156
157AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
158{
159	AllocationCallbackRecord record;
160
161	record.type				= TYPE_FREE;
162	record.data.free.mem	= mem;
163
164	return record;
165}
166
167AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
168{
169	AllocationCallbackRecord record;
170
171	record.type								= TYPE_INTERNAL_ALLOCATION;
172	record.data.internalAllocation.size		= size;
173	record.data.internalAllocation.type		= type;
174	record.data.internalAllocation.scope	= scope;
175
176	return record;
177}
178
179AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
180{
181	AllocationCallbackRecord record;
182
183	record.type								= TYPE_INTERNAL_FREE;
184	record.data.internalAllocation.size		= size;
185	record.data.internalAllocation.type		= type;
186	record.data.internalAllocation.scope	= scope;
187
188	return record;
189}
190
191// ChainedAllocator
192
193ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
194	: m_nextAllocator(nextAllocator)
195{
196}
197
198ChainedAllocator::~ChainedAllocator (void)
199{
200}
201
202void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
203{
204	return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
205}
206
207void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
208{
209	return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
210}
211
212void ChainedAllocator::free (void* mem)
213{
214	m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
215}
216
217void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
218{
219	m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
220}
221
222void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
223{
224	m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
225}
226
227// AllocationCallbackRecorder
228
229AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
230	: ChainedAllocator	(allocator)
231	, m_records			(callCountHint)
232{
233}
234
235AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
236{
237}
238
239void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
240{
241	void* const	ptr	= ChainedAllocator::allocate(size, alignment, allocationScope);
242
243	m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
244
245	return ptr;
246}
247
248void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
249{
250	void* const	ptr	= ChainedAllocator::reallocate(original, size, alignment, allocationScope);
251
252	m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
253
254	return ptr;
255}
256
257void AllocationCallbackRecorder::free (void* mem)
258{
259	ChainedAllocator::free(mem);
260
261	m_records.append(AllocationCallbackRecord::free(mem));
262}
263
264void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
265{
266	ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
267
268	m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
269}
270
271void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
272{
273	ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
274
275	m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
276}
277
278// DeterministicFailAllocator
279
280DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs, Mode initialMode)
281	: ChainedAllocator	(allocator)
282	, m_numPassingAllocs(numPassingAllocs)
283	, m_mode			(initialMode)
284	, m_allocationNdx	(0)
285{
286}
287
288DeterministicFailAllocator::~DeterministicFailAllocator (void)
289{
290}
291
292void DeterministicFailAllocator::setMode (Mode mode)
293{
294	m_mode = mode;
295}
296
297void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
298{
299	if ((m_mode == MODE_DO_NOT_COUNT) ||
300		(deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs))
301		return ChainedAllocator::allocate(size, alignment, allocationScope);
302	else
303		return DE_NULL;
304}
305
306void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
307{
308	if ((m_mode == MODE_DO_NOT_COUNT) ||
309		(deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs))
310		return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
311	else
312		return DE_NULL;
313}
314
315// Utils
316
317AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
318{
319	deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
320}
321
322void AllocationCallbackValidationResults::clear (void)
323{
324	liveAllocations.clear();
325	violations.clear();
326	deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
327}
328
329namespace
330{
331
332struct AllocationSlot
333{
334	AllocationCallbackRecord	record;
335	bool						isLive;
336
337	AllocationSlot (void)
338		: isLive	(false)
339	{}
340
341	AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
342		: record	(record_)
343		, isLive	(isLive_)
344	{}
345};
346
347size_t getAlignment (const AllocationCallbackRecord& record)
348{
349	if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
350		return record.data.allocation.alignment;
351	else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
352		return record.data.reallocation.alignment;
353	else
354	{
355		DE_ASSERT(false);
356		return 0;
357	}
358}
359
360} // anonymous
361
362void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
363{
364	std::vector<AllocationSlot>		allocations;
365	std::map<void*, size_t>			ptrToSlotIndex;
366
367	DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
368
369	for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
370		 callbackIter != recorder.getRecordsEnd();
371		 ++callbackIter)
372	{
373		const AllocationCallbackRecord&		record	= *callbackIter;
374
375		// Validate scope
376		{
377			const VkSystemAllocationScope* const	scopePtr	= record.type == AllocationCallbackRecord::TYPE_ALLOCATION			? &record.data.allocation.scope
378																: record.type == AllocationCallbackRecord::TYPE_REALLOCATION		? &record.data.reallocation.scope
379																: record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION	? &record.data.internalAllocation.scope
380																: record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE		? &record.data.internalAllocation.scope
381																: DE_NULL;
382
383			if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
384				results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
385		}
386
387		// Validate alignment
388		if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
389			record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
390		{
391			if (!deIsPowerOfTwoSize(getAlignment(record)))
392				results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
393		}
394
395		// Validate actual allocation behavior
396		switch (record.type)
397		{
398			case AllocationCallbackRecord::TYPE_ALLOCATION:
399			{
400				if (record.data.allocation.returnedPtr)
401				{
402					if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
403					{
404						ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
405						allocations.push_back(AllocationSlot(record, true));
406					}
407					else
408					{
409						const size_t		slotNdx		= ptrToSlotIndex[record.data.allocation.returnedPtr];
410						if (!allocations[slotNdx].isLive)
411						{
412							allocations[slotNdx].isLive = true;
413							allocations[slotNdx].record = record;
414						}
415						else
416						{
417							// we should not have multiple live allocations with the same pointer
418							DE_ASSERT(false);
419						}
420					}
421				}
422
423				break;
424			}
425
426			case AllocationCallbackRecord::TYPE_REALLOCATION:
427			{
428				if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
429				{
430					const size_t		origSlotNdx		= ptrToSlotIndex[record.data.reallocation.original];
431					AllocationSlot&		origSlot		= allocations[origSlotNdx];
432
433					DE_ASSERT(record.data.reallocation.original != DE_NULL);
434
435					if (record.data.reallocation.size > 0)
436					{
437						if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
438							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
439
440						if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
441						{
442							if (!origSlot.isLive)
443							{
444								results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
445								origSlot.isLive	= true; // Mark live to suppress further errors
446							}
447
448							// Just update slot record
449							allocations[origSlotNdx].record = record;
450						}
451						else
452						{
453							if (record.data.reallocation.returnedPtr)
454							{
455								allocations[origSlotNdx].isLive = false;
456								if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
457								{
458									ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
459									allocations.push_back(AllocationSlot(record, true));
460								}
461								else
462								{
463									const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
464									if (!allocations[slotNdx].isLive)
465									{
466										allocations[slotNdx].isLive = true;
467										allocations[slotNdx].record = record;
468									}
469									else
470									{
471										// we should not have multiple live allocations with the same pointer
472										DE_ASSERT(false);
473									}
474								}
475							}
476							// else original ptr remains valid and live
477						}
478					}
479					else
480					{
481						DE_ASSERT(!record.data.reallocation.returnedPtr);
482
483						origSlot.isLive = false;
484					}
485				}
486				else
487				{
488					if (record.data.reallocation.original)
489						results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
490
491					if (record.data.reallocation.returnedPtr)
492					{
493						DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
494						ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
495						allocations.push_back(AllocationSlot(record, true));
496					}
497				}
498
499				break;
500			}
501
502			case AllocationCallbackRecord::TYPE_FREE:
503			{
504				if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
505				{
506					if (de::contains(ptrToSlotIndex, record.data.free.mem))
507					{
508						const size_t	slotNdx		= ptrToSlotIndex[record.data.free.mem];
509
510						if (allocations[slotNdx].isLive)
511							allocations[slotNdx].isLive = false;
512						else
513							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
514					}
515					else
516						results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
517				}
518
519				break;
520			}
521
522			case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
523			case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
524			{
525				if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
526				{
527					size_t* const		totalAllocSizePtr	= &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
528					const size_t		size				= record.data.internalAllocation.size;
529
530					if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
531					{
532						if (*totalAllocSizePtr < size)
533						{
534							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
535							*totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
536						}
537						else
538							*totalAllocSizePtr -= size;
539					}
540					else
541						*totalAllocSizePtr += size;
542				}
543				else
544					results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
545
546				break;
547			}
548
549			default:
550				DE_ASSERT(false);
551		}
552	}
553
554	DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
555
556	// Collect live allocations
557	for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
558		 slotIter != allocations.end();
559		 ++slotIter)
560	{
561		if (slotIter->isLive)
562			results->liveAllocations.push_back(slotIter->record);
563	}
564}
565
566bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
567{
568	using tcu::TestLog;
569
570	size_t	numLeaks	= 0;
571
572	if (!results.violations.empty())
573	{
574		for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
575		{
576			log << TestLog::Message << "VIOLATION " << (violationNdx+1)
577													<< ": " << results.violations[violationNdx]
578													<< " (" << results.violations[violationNdx].record << ")"
579				<< TestLog::EndMessage;
580		}
581
582		log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
583	}
584
585	// Verify live allocations
586	for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
587	{
588		const AllocationCallbackRecord&		record	= results.liveAllocations[liveNdx];
589		const VkSystemAllocationScope		scope	= record.type == AllocationCallbackRecord::TYPE_ALLOCATION		? record.data.allocation.scope
590													: record.type == AllocationCallbackRecord::TYPE_REALLOCATION	? record.data.reallocation.scope
591													: VK_SYSTEM_ALLOCATION_SCOPE_LAST;
592
593		DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
594
595		if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
596		{
597			log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
598			numLeaks += 1;
599		}
600	}
601
602	// Verify internal allocations
603	for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
604	{
605		for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
606		{
607			const VkInternalAllocationType	type			= (VkInternalAllocationType)internalAllocTypeNdx;
608			const VkSystemAllocationScope	scope			= (VkSystemAllocationScope)scopeNdx;
609			const size_t					totalAllocated	= results.internalAllocationTotal[type][scope];
610
611			if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
612				totalAllocated > 0)
613			{
614				log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
615										<< " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
616					<< TestLog::EndMessage;
617				numLeaks += 1;
618			}
619		}
620	}
621
622	if (numLeaks > 0)
623		log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
624
625	return results.violations.empty() && numLeaks == 0;
626}
627
628bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
629{
630	AllocationCallbackValidationResults	validationResults;
631
632	validateAllocationCallbacks(recorder, &validationResults);
633
634	return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
635}
636
637size_t getLiveSystemAllocationTotal (const AllocationCallbackValidationResults& validationResults)
638{
639	size_t	allocationTotal	= 0;
640
641	DE_ASSERT(validationResults.violations.empty());
642
643	for (std::vector<AllocationCallbackRecord>::const_iterator alloc = validationResults.liveAllocations.begin();
644		 alloc != validationResults.liveAllocations.end();
645		 ++alloc)
646	{
647		DE_ASSERT(alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ||
648				  alloc->type == AllocationCallbackRecord::TYPE_REALLOCATION);
649
650		const size_t	size		= (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.size : alloc->data.reallocation.size);
651		const size_t	alignment	= (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.alignment : alloc->data.reallocation.alignment);
652
653		allocationTotal += size + alignment - (alignment > 0 ? 1 : 0);
654	}
655
656	for (int internalAllocationTypeNdx = 0; internalAllocationTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocationTypeNdx)
657	{
658		for (int internalAllocationScopeNdx = 0; internalAllocationScopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++internalAllocationScopeNdx)
659			allocationTotal += validationResults.internalAllocationTotal[internalAllocationTypeNdx][internalAllocationScopeNdx];
660	}
661
662	return allocationTotal;
663}
664
665std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
666{
667	switch (record.type)
668	{
669		case AllocationCallbackRecord::TYPE_ALLOCATION:
670			str << "ALLOCATION: size=" << record.data.allocation.size
671				<< ", alignment=" << record.data.allocation.alignment
672				<< ", scope=" << record.data.allocation.scope
673				<< ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
674			break;
675
676		case AllocationCallbackRecord::TYPE_REALLOCATION:
677			str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
678				<< ", size=" << record.data.reallocation.size
679				<< ", alignment=" << record.data.reallocation.alignment
680				<< ", scope=" << record.data.reallocation.scope
681				<< ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
682			break;
683
684		case AllocationCallbackRecord::TYPE_FREE:
685			str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
686			break;
687
688		case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
689		case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
690			str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
691				<< ": size=" << record.data.internalAllocation.size
692				<< ", type=" << record.data.internalAllocation.type
693				<< ", scope=" << record.data.internalAllocation.scope;
694			break;
695
696		default:
697			DE_ASSERT(false);
698	}
699
700	return str;
701}
702
703std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
704{
705	switch (violation.reason)
706	{
707		case AllocationCallbackViolation::REASON_DOUBLE_FREE:
708		{
709			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
710			str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
711			break;
712		}
713
714		case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
715		{
716			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
717			str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
718			break;
719		}
720
721		case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
722		{
723			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
724			str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
725			break;
726		}
727
728		case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
729		{
730			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
731			str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
732			break;
733		}
734
735		case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
736		{
737			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
738			str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
739			break;
740		}
741
742		case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
743		{
744			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
745					  violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
746			str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
747			break;
748		}
749
750		case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
751		{
752			str << "Invalid allocation scope";
753			break;
754		}
755
756		case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
757		{
758			str << "Invalid alignment";
759			break;
760		}
761
762		case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
763		{
764			str << "Reallocation with different alignment";
765			break;
766		}
767
768		default:
769			DE_ASSERT(false);
770	}
771
772	return str;
773}
774
775} // vk
776