lsan_common.cc revision c519335c2d6d32acaac32c0595f08a05081567e7
1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "sanitizer_common/sanitizer_stoptheworld.h"
23#include "sanitizer_common/sanitizer_suppressions.h"
24#include "sanitizer_common/sanitizer_report_decorator.h"
25
26#if CAN_SANITIZE_LEAKS
27namespace __lsan {
28
29// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
30BlockingMutex global_mutex(LINKER_INITIALIZED);
31
32THREADLOCAL int disable_counter;
33bool DisabledInThisThread() { return disable_counter > 0; }
34
35Flags lsan_flags;
36
37static void InitializeFlags() {
38  Flags *f = flags();
39  // Default values.
40  f->report_objects = false;
41  f->resolution = 0;
42  f->max_leaks = 0;
43  f->exitcode = 23;
44  f->suppressions="";
45  f->use_registers = true;
46  f->use_globals = true;
47  f->use_stacks = true;
48  f->use_tls = true;
49  f->use_unaligned = false;
50  f->verbosity = 0;
51  f->log_pointers = false;
52  f->log_threads = false;
53
54  const char *options = GetEnv("LSAN_OPTIONS");
55  if (options) {
56    ParseFlag(options, &f->use_registers, "use_registers");
57    ParseFlag(options, &f->use_globals, "use_globals");
58    ParseFlag(options, &f->use_stacks, "use_stacks");
59    ParseFlag(options, &f->use_tls, "use_tls");
60    ParseFlag(options, &f->use_unaligned, "use_unaligned");
61    ParseFlag(options, &f->report_objects, "report_objects");
62    ParseFlag(options, &f->resolution, "resolution");
63    CHECK_GE(&f->resolution, 0);
64    ParseFlag(options, &f->max_leaks, "max_leaks");
65    CHECK_GE(&f->max_leaks, 0);
66    ParseFlag(options, &f->verbosity, "verbosity");
67    ParseFlag(options, &f->log_pointers, "log_pointers");
68    ParseFlag(options, &f->log_threads, "log_threads");
69    ParseFlag(options, &f->exitcode, "exitcode");
70    ParseFlag(options, &f->suppressions, "suppressions");
71  }
72}
73
74SuppressionContext *suppression_ctx;
75
76void InitializeSuppressions() {
77  CHECK(!suppression_ctx);
78  ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
79  suppression_ctx = new(placeholder_) SuppressionContext;
80  char *suppressions_from_file;
81  uptr buffer_size;
82  if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
83                       &buffer_size, 1 << 26 /* max_len */))
84    suppression_ctx->Parse(suppressions_from_file);
85  if (flags()->suppressions[0] && !buffer_size) {
86    Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
87           flags()->suppressions);
88    Die();
89  }
90  if (&__lsan_default_suppressions)
91    suppression_ctx->Parse(__lsan_default_suppressions());
92}
93
94void InitCommonLsan() {
95  InitializeFlags();
96  InitializeSuppressions();
97  InitializePlatformSpecificModules();
98}
99
100class Decorator: private __sanitizer::AnsiColorDecorator {
101 public:
102  Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
103  const char *Error() { return Red(); }
104  const char *Leak() { return Blue(); }
105  const char *End() { return Default(); }
106};
107
108static inline bool CanBeAHeapPointer(uptr p) {
109  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
110  // bound on heap addresses.
111  const uptr kMinAddress = 4 * 4096;
112  if (p < kMinAddress) return false;
113#ifdef __x86_64__
114  // Accept only canonical form user-space addresses.
115  return ((p >> 47) == 0);
116#else
117  return true;
118#endif
119}
120
121// Scans the memory range, looking for byte patterns that point into allocator
122// chunks. Marks those chunks with |tag| and adds them to |frontier|.
123// There are two usage modes for this function: finding reachable or ignored
124// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
125// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
126// so |frontier| = 0.
127void ScanRangeForPointers(uptr begin, uptr end,
128                          Frontier *frontier,
129                          const char *region_type, ChunkTag tag) {
130  const uptr alignment = flags()->pointer_alignment();
131  if (flags()->log_pointers)
132    Report("Scanning %s range %p-%p.\n", region_type, begin, end);
133  uptr pp = begin;
134  if (pp % alignment)
135    pp = pp + alignment - pp % alignment;
136  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
137    void *p = *reinterpret_cast<void **>(pp);
138    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
139    uptr chunk = PointsIntoChunk(p);
140    if (!chunk) continue;
141    LsanMetadata m(chunk);
142    // Reachable beats ignored beats leaked.
143    if (m.tag() == kReachable) continue;
144    if (m.tag() == kIgnored && tag != kReachable) continue;
145    m.set_tag(tag);
146    if (flags()->log_pointers)
147      Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
148             chunk, chunk + m.requested_size(), m.requested_size());
149    if (frontier)
150      frontier->push_back(chunk);
151  }
152}
153
154void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
155  Frontier *frontier = reinterpret_cast<Frontier *>(arg);
156  ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
157}
158
159// Scans thread data (stacks and TLS) for heap pointers.
160static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
161                           Frontier *frontier) {
162  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
163  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
164  uptr registers_end = registers_begin + registers.size();
165  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
166    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
167    if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
168    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
169    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
170                                              &tls_begin, &tls_end,
171                                              &cache_begin, &cache_end);
172    if (!thread_found) {
173      // If a thread can't be found in the thread registry, it's probably in the
174      // process of destruction. Log this event and move on.
175      if (flags()->log_threads)
176        Report("Thread %d not found in registry.\n", os_id);
177      continue;
178    }
179    uptr sp;
180    bool have_registers =
181        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
182    if (!have_registers) {
183      Report("Unable to get registers from thread %d.\n");
184      // If unable to get SP, consider the entire stack to be reachable.
185      sp = stack_begin;
186    }
187
188    if (flags()->use_registers && have_registers)
189      ScanRangeForPointers(registers_begin, registers_end, frontier,
190                           "REGISTERS", kReachable);
191
192    if (flags()->use_stacks) {
193      if (flags()->log_threads)
194        Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
195      if (sp < stack_begin || sp >= stack_end) {
196        // SP is outside the recorded stack range (e.g. the thread is running a
197        // signal handler on alternate stack). Again, consider the entire stack
198        // range to be reachable.
199        if (flags()->log_threads)
200          Report("WARNING: stack pointer not in stack range.\n");
201      } else {
202        // Shrink the stack range to ignore out-of-scope values.
203        stack_begin = sp;
204      }
205      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
206                           kReachable);
207      ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
208    }
209
210    if (flags()->use_tls) {
211      if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
212      if (cache_begin == cache_end) {
213        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
214      } else {
215        // Because LSan should not be loaded with dlopen(), we can assume
216        // that allocator cache will be part of static TLS image.
217        CHECK_LE(tls_begin, cache_begin);
218        CHECK_GE(tls_end, cache_end);
219        if (tls_begin < cache_begin)
220          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
221                               kReachable);
222        if (tls_end > cache_end)
223          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
224      }
225    }
226  }
227}
228
229static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
230  while (frontier->size()) {
231    uptr next_chunk = frontier->back();
232    frontier->pop_back();
233    LsanMetadata m(next_chunk);
234    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
235                         "HEAP", tag);
236  }
237}
238
239// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
240// which are reachable from it as indirectly leaked.
241static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
242  chunk = GetUserBegin(chunk);
243  LsanMetadata m(chunk);
244  if (m.allocated() && m.tag() != kReachable) {
245    ScanRangeForPointers(chunk, chunk + m.requested_size(),
246                         /* frontier */ 0, "HEAP", kIndirectlyLeaked);
247  }
248}
249
250// ForEachChunk callback. If chunk is marked as ignored, adds its address to
251// frontier.
252static void CollectIgnoredCb(uptr chunk, void *arg) {
253  CHECK(arg);
254  chunk = GetUserBegin(chunk);
255  LsanMetadata m(chunk);
256  if (m.allocated() && m.tag() == kIgnored)
257    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
258}
259
260// Sets the appropriate tag on each chunk.
261static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
262  // Holds the flood fill frontier.
263  Frontier frontier(GetPageSizeCached());
264
265  if (flags()->use_globals)
266    ProcessGlobalRegions(&frontier);
267  ProcessThreads(suspended_threads, &frontier);
268  FloodFillTag(&frontier, kReachable);
269  // The check here is relatively expensive, so we do this in a separate flood
270  // fill. That way we can skip the check for chunks that are reachable
271  // otherwise.
272  ProcessPlatformSpecificAllocations(&frontier);
273  FloodFillTag(&frontier, kReachable);
274
275  if (flags()->log_pointers)
276    Report("Scanning ignored chunks.\n");
277  CHECK_EQ(0, frontier.size());
278  ForEachChunk(CollectIgnoredCb, &frontier);
279  FloodFillTag(&frontier, kIgnored);
280
281  // Iterate over leaked chunks and mark those that are reachable from other
282  // leaked chunks.
283  if (flags()->log_pointers)
284    Report("Scanning leaked chunks.\n");
285  ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
286}
287
288static void PrintStackTraceById(u32 stack_trace_id) {
289  CHECK(stack_trace_id);
290  uptr size = 0;
291  const uptr *trace = StackDepotGet(stack_trace_id, &size);
292  StackTrace::PrintStack(trace, size, common_flags()->symbolize, 0);
293}
294
295// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
296static void CollectLeaksCb(uptr chunk, void *arg) {
297  CHECK(arg);
298  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
299  chunk = GetUserBegin(chunk);
300  LsanMetadata m(chunk);
301  if (!m.allocated()) return;
302  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
303    uptr resolution = flags()->resolution;
304    if (resolution > 0) {
305      uptr size = 0;
306      const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
307      size = Min(size, resolution);
308      leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
309    } else {
310      leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
311    }
312  }
313}
314
315// ForEachChunkCallback. Prints addresses of unreachable chunks.
316static void PrintLeakedCb(uptr chunk, void *arg) {
317  chunk = GetUserBegin(chunk);
318  LsanMetadata m(chunk);
319  if (!m.allocated()) return;
320  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
321    Printf("%s leaked %zu byte object at %p.\n",
322           m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
323           m.requested_size(), chunk);
324  }
325}
326
327static void PrintMatchedSuppressions() {
328  InternalMmapVector<Suppression *> matched(1);
329  suppression_ctx->GetMatched(&matched);
330  if (!matched.size())
331    return;
332  const char *line = "-----------------------------------------------------";
333  Printf("%s\n", line);
334  Printf("Suppressions used:\n");
335  Printf("  count      bytes template\n");
336  for (uptr i = 0; i < matched.size(); i++)
337    Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
338           matched[i]->weight, matched[i]->templ);
339  Printf("%s\n\n", line);
340}
341
342static void PrintLeaked() {
343  Printf("\n");
344  Printf("Reporting individual objects:\n");
345  ForEachChunk(PrintLeakedCb, 0 /* arg */);
346}
347
348struct DoLeakCheckParam {
349  bool success;
350  LeakReport leak_report;
351};
352
353static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
354                                void *arg) {
355  DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
356  CHECK(param);
357  CHECK(!param->success);
358  CHECK(param->leak_report.IsEmpty());
359  ClassifyAllChunks(suspended_threads);
360  ForEachChunk(CollectLeaksCb, &param->leak_report);
361  if (!param->leak_report.IsEmpty() && flags()->report_objects)
362    PrintLeaked();
363  param->success = true;
364}
365
366void DoLeakCheck() {
367  EnsureMainThreadIDIsCorrect();
368  BlockingMutexLock l(&global_mutex);
369  static bool already_done;
370  if (already_done) return;
371  already_done = true;
372  if (&__lsan_is_turned_off && __lsan_is_turned_off())
373    return;
374
375  DoLeakCheckParam param;
376  param.success = false;
377  LockThreadRegistry();
378  LockAllocator();
379  StopTheWorld(DoLeakCheckCallback, &param);
380  UnlockAllocator();
381  UnlockThreadRegistry();
382
383  if (!param.success) {
384    Report("LeakSanitizer has encountered a fatal error.\n");
385    Die();
386  }
387  uptr have_unsuppressed = param.leak_report.ApplySuppressions();
388  if (have_unsuppressed) {
389    Decorator d;
390    Printf("\n"
391           "================================================================="
392           "\n");
393    Printf("%s", d.Error());
394    Report("ERROR: LeakSanitizer: detected memory leaks\n");
395    Printf("%s", d.End());
396    param.leak_report.PrintLargest(flags()->max_leaks);
397  }
398  if (have_unsuppressed || (flags()->verbosity >= 1)) {
399    PrintMatchedSuppressions();
400    param.leak_report.PrintSummary();
401  }
402  if (have_unsuppressed && flags()->exitcode)
403    internal__exit(flags()->exitcode);
404}
405
406static Suppression *GetSuppressionForAddr(uptr addr) {
407  static const uptr kMaxAddrFrames = 16;
408  InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
409  for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
410  uptr addr_frames_num =
411      getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames);
412  for (uptr i = 0; i < addr_frames_num; i++) {
413    Suppression* s;
414    if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
415        suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
416        suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
417      return s;
418  }
419  return 0;
420}
421
422static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
423  uptr size = 0;
424  const uptr *trace = StackDepotGet(stack_trace_id, &size);
425  for (uptr i = 0; i < size; i++) {
426    Suppression *s =
427        GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
428    if (s) return s;
429  }
430  return 0;
431}
432
433///// LeakReport implementation. /////
434
435// A hard limit on the number of distinct leaks, to avoid quadratic complexity
436// in LeakReport::Add(). We don't expect to ever see this many leaks in
437// real-world applications.
438// FIXME: Get rid of this limit by changing the implementation of LeakReport to
439// use a hash table.
440const uptr kMaxLeaksConsidered = 5000;
441
442void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
443  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
444  bool is_directly_leaked = (tag == kDirectlyLeaked);
445  for (uptr i = 0; i < leaks_.size(); i++)
446    if (leaks_[i].stack_trace_id == stack_trace_id &&
447        leaks_[i].is_directly_leaked == is_directly_leaked) {
448      leaks_[i].hit_count++;
449      leaks_[i].total_size += leaked_size;
450      return;
451    }
452  if (leaks_.size() == kMaxLeaksConsidered) return;
453  Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
454                is_directly_leaked, /* is_suppressed */ false };
455  leaks_.push_back(leak);
456}
457
458static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
459  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
460    return leak1.total_size > leak2.total_size;
461  else
462    return leak1.is_directly_leaked;
463}
464
465void LeakReport::PrintLargest(uptr num_leaks_to_print) {
466  CHECK(leaks_.size() <= kMaxLeaksConsidered);
467  Printf("\n");
468  if (leaks_.size() == kMaxLeaksConsidered)
469    Printf("Too many leaks! Only the first %zu leaks encountered will be "
470           "reported.\n",
471           kMaxLeaksConsidered);
472
473  uptr unsuppressed_count = 0;
474  for (uptr i = 0; i < leaks_.size(); i++)
475    if (!leaks_[i].is_suppressed) unsuppressed_count++;
476  if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
477    Printf("The %zu largest leak(s):\n", num_leaks_to_print);
478  InternalSort(&leaks_, leaks_.size(), LeakComparator);
479  uptr leaks_printed = 0;
480  Decorator d;
481  for (uptr i = 0; i < leaks_.size(); i++) {
482    if (leaks_[i].is_suppressed) continue;
483    Printf("%s", d.Leak());
484    Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
485           leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
486           leaks_[i].total_size, leaks_[i].hit_count);
487    Printf("%s", d.End());
488    PrintStackTraceById(leaks_[i].stack_trace_id);
489    Printf("\n");
490    leaks_printed++;
491    if (leaks_printed == num_leaks_to_print) break;
492  }
493  if (leaks_printed < unsuppressed_count) {
494    uptr remaining = unsuppressed_count - leaks_printed;
495    Printf("Omitting %zu more leak(s).\n", remaining);
496  }
497}
498
499void LeakReport::PrintSummary() {
500  CHECK(leaks_.size() <= kMaxLeaksConsidered);
501  uptr bytes = 0, allocations = 0;
502  for (uptr i = 0; i < leaks_.size(); i++) {
503      if (leaks_[i].is_suppressed) continue;
504      bytes += leaks_[i].total_size;
505      allocations += leaks_[i].hit_count;
506  }
507  const int kMaxSummaryLength = 128;
508  InternalScopedBuffer<char> summary(kMaxSummaryLength);
509  internal_snprintf(summary.data(), kMaxSummaryLength,
510                    "LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
511                    bytes, allocations);
512  __sanitizer_report_error_summary(summary.data());
513}
514
515uptr LeakReport::ApplySuppressions() {
516  uptr unsuppressed_count = 0;
517  for (uptr i = 0; i < leaks_.size(); i++) {
518    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
519    if (s) {
520      s->weight += leaks_[i].total_size;
521      s->hit_count += leaks_[i].hit_count;
522      leaks_[i].is_suppressed = true;
523    } else {
524    unsuppressed_count++;
525    }
526  }
527  return unsuppressed_count;
528}
529}  // namespace __lsan
530#endif  // CAN_SANITIZE_LEAKS
531
532using namespace __lsan;  // NOLINT
533
534extern "C" {
535SANITIZER_INTERFACE_ATTRIBUTE
536void __lsan_ignore_object(const void *p) {
537#if CAN_SANITIZE_LEAKS
538  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
539  // locked.
540  BlockingMutexLock l(&global_mutex);
541  IgnoreObjectResult res = IgnoreObjectLocked(p);
542  if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
543    Report("__lsan_ignore_object(): no heap object found at %p", p);
544  if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
545    Report("__lsan_ignore_object(): "
546           "heap object at %p is already being ignored\n", p);
547  if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
548    Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
549#endif  // CAN_SANITIZE_LEAKS
550}
551
552SANITIZER_INTERFACE_ATTRIBUTE
553void __lsan_disable() {
554#if CAN_SANITIZE_LEAKS
555  __lsan::disable_counter++;
556#endif
557}
558
559SANITIZER_INTERFACE_ATTRIBUTE
560void __lsan_enable() {
561#if CAN_SANITIZE_LEAKS
562  if (!__lsan::disable_counter) {
563    Report("Unmatched call to __lsan_enable().\n");
564    Die();
565  }
566  __lsan::disable_counter--;
567#endif
568}
569
570SANITIZER_INTERFACE_ATTRIBUTE
571void __lsan_do_leak_check() {
572#if CAN_SANITIZE_LEAKS
573  if (common_flags()->detect_leaks)
574    __lsan::DoLeakCheck();
575#endif  // CAN_SANITIZE_LEAKS
576}
577
578#if !SANITIZER_SUPPORTS_WEAK_HOOKS
579SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
580int __lsan_is_turned_off() {
581  return 0;
582}
583#endif
584}  // extern "C"
585