1// Copyright 2009 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "log-utils.h" 31 32namespace v8 { 33namespace internal { 34 35#ifdef ENABLE_LOGGING_AND_PROFILING 36 37LogDynamicBuffer::LogDynamicBuffer( 38 int block_size, int max_size, const char* seal, int seal_size) 39 : block_size_(block_size), 40 max_size_(max_size - (max_size % block_size_)), 41 seal_(seal), 42 seal_size_(seal_size), 43 blocks_(max_size_ / block_size_ + 1), 44 write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) { 45 ASSERT(BlocksCount() > 0); 46 AllocateBlock(0); 47 for (int i = 1; i < BlocksCount(); ++i) { 48 blocks_[i] = NULL; 49 } 50} 51 52 53LogDynamicBuffer::~LogDynamicBuffer() { 54 for (int i = 0; i < BlocksCount(); ++i) { 55 DeleteArray(blocks_[i]); 56 } 57} 58 59 60int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) { 61 if (buf_size == 0) return 0; 62 int read_pos = from_pos; 63 int block_read_index = BlockIndex(from_pos); 64 int block_read_pos = PosInBlock(from_pos); 65 int dest_buf_pos = 0; 66 // Read until dest_buf is filled, or write_pos_ encountered. 67 while (read_pos < write_pos_ && dest_buf_pos < buf_size) { 68 const int read_size = Min(write_pos_ - read_pos, 69 Min(buf_size - dest_buf_pos, block_size_ - block_read_pos)); 70 memcpy(dest_buf + dest_buf_pos, 71 blocks_[block_read_index] + block_read_pos, read_size); 72 block_read_pos += read_size; 73 dest_buf_pos += read_size; 74 read_pos += read_size; 75 if (block_read_pos == block_size_) { 76 block_read_pos = 0; 77 ++block_read_index; 78 } 79 } 80 return dest_buf_pos; 81} 82 83 84int LogDynamicBuffer::Seal() { 85 WriteInternal(seal_, seal_size_); 86 is_sealed_ = true; 87 return 0; 88} 89 90 91int LogDynamicBuffer::Write(const char* data, int data_size) { 92 if (is_sealed_) { 93 return 0; 94 } 95 if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) { 96 return WriteInternal(data, data_size); 97 } else { 98 return Seal(); 99 } 100} 101 102 103int LogDynamicBuffer::WriteInternal(const char* data, int data_size) { 104 int data_pos = 0; 105 while (data_pos < data_size) { 106 const int write_size = 107 Min(data_size - data_pos, block_size_ - block_write_pos_); 108 memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos, 109 write_size); 110 block_write_pos_ += write_size; 111 data_pos += write_size; 112 if (block_write_pos_ == block_size_) { 113 block_write_pos_ = 0; 114 AllocateBlock(++block_index_); 115 } 116 } 117 write_pos_ += data_size; 118 return data_size; 119} 120 121 122bool Log::is_stopped_ = false; 123Log::WritePtr Log::Write = NULL; 124FILE* Log::output_handle_ = NULL; 125LogDynamicBuffer* Log::output_buffer_ = NULL; 126// Must be the same message as in Logger::PauseProfiler. 127const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n"; 128Mutex* Log::mutex_ = NULL; 129char* Log::message_buffer_ = NULL; 130 131 132void Log::Init() { 133 mutex_ = OS::CreateMutex(); 134 message_buffer_ = NewArray<char>(kMessageBufferSize); 135} 136 137 138void Log::OpenStdout() { 139 ASSERT(!IsEnabled()); 140 output_handle_ = stdout; 141 Write = WriteToFile; 142 Init(); 143} 144 145 146void Log::OpenFile(const char* name) { 147 ASSERT(!IsEnabled()); 148 output_handle_ = OS::FOpen(name, OS::LogFileOpenMode); 149 Write = WriteToFile; 150 Init(); 151} 152 153 154void Log::OpenMemoryBuffer() { 155 ASSERT(!IsEnabled()); 156 output_buffer_ = new LogDynamicBuffer( 157 kDynamicBufferBlockSize, kMaxDynamicBufferSize, 158 kDynamicBufferSeal, StrLength(kDynamicBufferSeal)); 159 Write = WriteToMemory; 160 Init(); 161} 162 163 164void Log::Close() { 165 if (Write == WriteToFile) { 166 if (output_handle_ != NULL) fclose(output_handle_); 167 output_handle_ = NULL; 168 } else if (Write == WriteToMemory) { 169 delete output_buffer_; 170 output_buffer_ = NULL; 171 } else { 172 ASSERT(Write == NULL); 173 } 174 Write = NULL; 175 176 DeleteArray(message_buffer_); 177 message_buffer_ = NULL; 178 179 delete mutex_; 180 mutex_ = NULL; 181 182 is_stopped_ = false; 183} 184 185 186int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) { 187 if (Write != WriteToMemory) return 0; 188 ASSERT(output_buffer_ != NULL); 189 ASSERT(from_pos >= 0); 190 ASSERT(max_size >= 0); 191 int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size); 192 ASSERT(actual_size <= max_size); 193 if (actual_size == 0) return 0; 194 195 // Find previous log line boundary. 196 char* end_pos = dest_buf + actual_size - 1; 197 while (end_pos >= dest_buf && *end_pos != '\n') --end_pos; 198 actual_size = static_cast<int>(end_pos - dest_buf + 1); 199 ASSERT(actual_size <= max_size); 200 return actual_size; 201} 202 203 204LogMessageBuilder::WriteFailureHandler 205 LogMessageBuilder::write_failure_handler = NULL; 206 207 208LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) { 209 ASSERT(Log::message_buffer_ != NULL); 210} 211 212 213void LogMessageBuilder::Append(const char* format, ...) { 214 Vector<char> buf(Log::message_buffer_ + pos_, 215 Log::kMessageBufferSize - pos_); 216 va_list args; 217 va_start(args, format); 218 AppendVA(format, args); 219 va_end(args); 220 ASSERT(pos_ <= Log::kMessageBufferSize); 221} 222 223 224void LogMessageBuilder::AppendVA(const char* format, va_list args) { 225 Vector<char> buf(Log::message_buffer_ + pos_, 226 Log::kMessageBufferSize - pos_); 227 int result = v8::internal::OS::VSNPrintF(buf, format, args); 228 229 // Result is -1 if output was truncated. 230 if (result >= 0) { 231 pos_ += result; 232 } else { 233 pos_ = Log::kMessageBufferSize; 234 } 235 ASSERT(pos_ <= Log::kMessageBufferSize); 236} 237 238 239void LogMessageBuilder::Append(const char c) { 240 if (pos_ < Log::kMessageBufferSize) { 241 Log::message_buffer_[pos_++] = c; 242 } 243 ASSERT(pos_ <= Log::kMessageBufferSize); 244} 245 246 247void LogMessageBuilder::Append(String* str) { 248 AssertNoAllocation no_heap_allocation; // Ensure string stay valid. 249 int length = str->length(); 250 for (int i = 0; i < length; i++) { 251 Append(static_cast<char>(str->Get(i))); 252 } 253} 254 255 256void LogMessageBuilder::AppendAddress(Address addr) { 257 static Address last_address_ = NULL; 258 AppendAddress(addr, last_address_); 259 last_address_ = addr; 260} 261 262 263void LogMessageBuilder::AppendAddress(Address addr, Address bias) { 264 if (!FLAG_compress_log) { 265 Append("0x%" V8PRIxPTR, addr); 266 } else if (bias == NULL) { 267 Append("%" V8PRIxPTR, addr); 268 } else { 269 uintptr_t delta; 270 char sign; 271 if (addr >= bias) { 272 delta = addr - bias; 273 sign = '+'; 274 } else { 275 delta = bias - addr; 276 sign = '-'; 277 } 278 Append("%c%" V8PRIxPTR, sign, delta); 279 } 280} 281 282 283void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) { 284 AssertNoAllocation no_heap_allocation; // Ensure string stay valid. 285 int len = str->length(); 286 if (len > 0x1000) 287 len = 0x1000; 288 if (show_impl_info) { 289 Append(str->IsAsciiRepresentation() ? 'a' : '2'); 290 if (StringShape(str).IsExternal()) 291 Append('e'); 292 if (StringShape(str).IsSymbol()) 293 Append('#'); 294 Append(":%i:", str->length()); 295 } 296 for (int i = 0; i < len; i++) { 297 uc32 c = str->Get(i); 298 if (c > 0xff) { 299 Append("\\u%04x", c); 300 } else if (c < 32 || c > 126) { 301 Append("\\x%02x", c); 302 } else if (c == ',') { 303 Append("\\,"); 304 } else if (c == '\\') { 305 Append("\\\\"); 306 } else { 307 Append("%lc", c); 308 } 309 } 310} 311 312 313void LogMessageBuilder::AppendStringPart(const char* str, int len) { 314 if (pos_ + len > Log::kMessageBufferSize) { 315 len = Log::kMessageBufferSize - pos_; 316 ASSERT(len >= 0); 317 if (len == 0) return; 318 } 319 Vector<char> buf(Log::message_buffer_ + pos_, 320 Log::kMessageBufferSize - pos_); 321 OS::StrNCpy(buf, str, len); 322 pos_ += len; 323 ASSERT(pos_ <= Log::kMessageBufferSize); 324} 325 326 327bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) { 328 return compressor->Store(Vector<const char>(Log::message_buffer_, pos_)); 329} 330 331 332bool LogMessageBuilder::RetrieveCompressedPrevious( 333 LogRecordCompressor* compressor, const char* prefix) { 334 pos_ = 0; 335 if (prefix[0] != '\0') Append(prefix); 336 Vector<char> prev_record(Log::message_buffer_ + pos_, 337 Log::kMessageBufferSize - pos_); 338 const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record); 339 if (!has_prev) return false; 340 pos_ += prev_record.length(); 341 return true; 342} 343 344 345void LogMessageBuilder::WriteToLogFile() { 346 ASSERT(pos_ <= Log::kMessageBufferSize); 347 const int written = Log::Write(Log::message_buffer_, pos_); 348 if (written != pos_ && write_failure_handler != NULL) { 349 write_failure_handler(); 350 } 351} 352 353 354// Formatting string for back references to the whole line. E.g. "#2" means 355// "the second line above". 356const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d"; 357 358// Formatting string for back references. E.g. "#2:10" means 359// "the second line above, start from char 10 (0-based)". 360const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d"; 361 362 363LogRecordCompressor::~LogRecordCompressor() { 364 for (int i = 0; i < buffer_.length(); ++i) { 365 buffer_[i].Dispose(); 366 } 367} 368 369 370static int GetNumberLength(int number) { 371 ASSERT(number >= 0); 372 ASSERT(number < 10000); 373 if (number < 10) return 1; 374 if (number < 100) return 2; 375 if (number < 1000) return 3; 376 return 4; 377} 378 379 380int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) { 381 // See kLineBackwardReferenceFormat and kBackwardReferenceFormat. 382 return pos == 0 ? GetNumberLength(distance) + 1 383 : GetNumberLength(distance) + GetNumberLength(pos) + 2; 384} 385 386 387void LogRecordCompressor::PrintBackwardReference(Vector<char> dest, 388 int distance, 389 int pos) { 390 if (pos == 0) { 391 OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance); 392 } else { 393 OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos); 394 } 395} 396 397 398bool LogRecordCompressor::Store(const Vector<const char>& record) { 399 // Check if the record is the same as the last stored one. 400 if (curr_ != -1) { 401 Vector<const char>& curr = buffer_[curr_]; 402 if (record.length() == curr.length() 403 && strncmp(record.start(), curr.start(), record.length()) == 0) { 404 return false; 405 } 406 } 407 // buffer_ is circular. 408 prev_ = curr_++; 409 curr_ %= buffer_.length(); 410 Vector<char> record_copy = Vector<char>::New(record.length()); 411 memcpy(record_copy.start(), record.start(), record.length()); 412 buffer_[curr_].Dispose(); 413 buffer_[curr_] = 414 Vector<const char>(record_copy.start(), record_copy.length()); 415 return true; 416} 417 418 419bool LogRecordCompressor::RetrievePreviousCompressed( 420 Vector<char>* prev_record) { 421 if (prev_ == -1) return false; 422 423 int index = prev_; 424 // Distance from prev_. 425 int distance = 0; 426 // Best compression result among records in the buffer. 427 struct { 428 intptr_t truncated_len; 429 int distance; 430 int copy_from_pos; 431 int backref_size; 432 } best = {-1, 0, 0, 0}; 433 Vector<const char>& prev = buffer_[prev_]; 434 const char* const prev_start = prev.start(); 435 const char* const prev_end = prev.start() + prev.length(); 436 do { 437 // We're moving backwards until we reach the current record. 438 // Remember that buffer_ is circular. 439 if (--index == -1) index = buffer_.length() - 1; 440 ++distance; 441 if (index == curr_) break; 442 443 Vector<const char>& data = buffer_[index]; 444 if (data.start() == NULL) break; 445 const char* const data_end = data.start() + data.length(); 446 const char* prev_ptr = prev_end; 447 const char* data_ptr = data_end; 448 // Compare strings backwards, stop on the last matching character. 449 while (prev_ptr != prev_start && data_ptr != data.start() 450 && *(prev_ptr - 1) == *(data_ptr - 1)) { 451 --prev_ptr; 452 --data_ptr; 453 } 454 const intptr_t truncated_len = prev_end - prev_ptr; 455 const int copy_from_pos = static_cast<int>(data_ptr - data.start()); 456 // Check if the length of compressed tail is enough. 457 if (truncated_len <= kMaxBackwardReferenceSize 458 && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) { 459 continue; 460 } 461 462 // Record compression results. 463 if (truncated_len > best.truncated_len) { 464 best.truncated_len = truncated_len; 465 best.distance = distance; 466 best.copy_from_pos = copy_from_pos; 467 best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos); 468 } 469 } while (true); 470 471 if (best.distance == 0) { 472 // Can't compress the previous record. Return as is. 473 ASSERT(prev_record->length() >= prev.length()); 474 memcpy(prev_record->start(), prev.start(), prev.length()); 475 prev_record->Truncate(prev.length()); 476 } else { 477 // Copy the uncompressible part unchanged. 478 const intptr_t unchanged_len = prev.length() - best.truncated_len; 479 // + 1 for '\0'. 480 ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1); 481 memcpy(prev_record->start(), prev.start(), unchanged_len); 482 // Append the backward reference. 483 Vector<char> backref( 484 prev_record->start() + unchanged_len, best.backref_size + 1); 485 PrintBackwardReference(backref, best.distance, best.copy_from_pos); 486 ASSERT(strlen(backref.start()) - best.backref_size == 0); 487 prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size)); 488 } 489 return true; 490} 491 492#endif // ENABLE_LOGGING_AND_PROFILING 493 494} } // namespace v8::internal 495