Add statistics about free space to microdump format.

When a crash occurs as a result of an allocation failure, it is useful
to know approximately what regions of the virtual address space remain
available, so that we know whether the crash should be attributed to
memory fragmentation, or some other cause.

BUG=525938
R=primiano@chromium.org

Review URL: https://codereview.chromium.org/1796803003 .
This commit is contained in:
Primiano Tucci 2016-05-23 16:06:26 +01:00
parent e35167de75
commit adca10c8ff

View File

@ -32,9 +32,9 @@
#include "client/linux/microdump_writer/microdump_writer.h"
#include <sys/utsname.h>
#include <limits>
#include <algorithm>
#include <sys/utsname.h>
#include "client/linux/dump_writer_common/thread_info.h"
#include "client/linux/dump_writer_common/ucontext_reader.h"
@ -44,6 +44,7 @@
#include "client/linux/minidump_writer/linux_ptrace_dumper.h"
#include "common/linux/file_id.h"
#include "common/linux/linux_libc_support.h"
#include "common/memory.h"
namespace {
@ -61,6 +62,66 @@ using google_breakpad::UContextReader;
const size_t kLineBufferSize = 2048;
template <typename Dst, typename Src>
Dst saturated_cast(Src src) {
if (src >= std::numeric_limits<Dst>::max())
return std::numeric_limits<Dst>::max();
if (src <= std::numeric_limits<Dst>::min())
return std::numeric_limits<Dst>::min();
return static_cast<Dst>(src);
}
int Log2Floor(uint64_t n) {
// Copied from chromium src/base/bits.h
if (n == 0)
return -1;
int log = 0;
uint64_t value = n;
for (int i = 5; i >= 0; --i) {
int shift = (1 << i);
uint64_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
assert(value == 1u);
return log;
}
bool MappingsAreAdjacent(const MappingInfo& a, const MappingInfo& b) {
// Because of load biasing, we can end up with a situation where two
// mappings actually overlap. So we will define adjacency to also include a
// b start address that lies within a's address range (including starting
// immediately after a).
// Because load biasing only ever moves the start address backwards, the end
// address should still increase.
return a.start_addr <= b.start_addr && a.start_addr + a.size >= b.start_addr;
}
bool MappingLessThan(const MappingInfo* a, const MappingInfo* b) {
// Return true if mapping a is before mapping b.
// For the same reason (load biasing) we compare end addresses, which - unlike
// start addresses - will not have been modified.
return a->start_addr + a->size < b->start_addr + b->size;
}
size_t NextOrderedMapping(
const google_breakpad::wasteful_vector<MappingInfo*>& mappings,
size_t curr) {
// Find the mapping that directly follows mappings[curr].
// If no such mapping exists, return |invalid| to indicate this.
const size_t invalid = std::numeric_limits<size_t>::max();
size_t best = invalid;
for (size_t next = 0; next < mappings.size(); ++next) {
if (MappingLessThan(mappings[curr], mappings[next]) &&
(best == invalid || MappingLessThan(mappings[next], mappings[best]))) {
best = next;
}
}
return best;
}
class MicrodumpWriter {
public:
MicrodumpWriter(const ExceptionHandler::CrashContext* context,
@ -98,6 +159,9 @@ class MicrodumpWriter {
DumpProductInformation();
DumpOSInformation();
DumpGPUInformation();
#if !defined(__LP64__)
DumpFreeSpace();
#endif
success = DumpCrashingThread();
if (success)
success = DumpMappings();
@ -391,6 +455,81 @@ class MicrodumpWriter {
LogCommitLine();
}
#if !defined(__LP64__)
void DumpFreeSpace() {
const google_breakpad::wasteful_vector<MappingInfo*>& mappings =
dumper_->mappings();
if (mappings.size() == 0) return;
// This is complicated by the fact that mappings is not in order. It should
// be mostly in order, however the mapping that contains the entry point for
// the process is always at the front of the vector.
static const int HBITS = sizeof(size_t) * 8;
size_t hole_histogram[HBITS];
my_memset(hole_histogram, 0, sizeof(hole_histogram));
// Find the lowest address mapping.
size_t curr = 0;
for (size_t i = 1; i < mappings.size(); ++i) {
if (mappings[i]->start_addr < mappings[curr]->start_addr) curr = i;
}
uintptr_t lo_addr = mappings[curr]->start_addr;
size_t hole_cnt = 0;
size_t hole_max = 0;
size_t hole_sum = 0;
while (true) {
// Skip to the end of an adjacent run of mappings. This is an optimization
// for the fact that mappings is mostly sorted.
while (curr != mappings.size() - 1 &&
MappingsAreAdjacent(*mappings[curr], *mappings[curr + 1])) {
++curr;
}
size_t next = NextOrderedMapping(mappings, curr);
if (next == std::numeric_limits<size_t>::max())
break;
uintptr_t hole_lo = mappings[curr]->start_addr + mappings[curr]->size;
uintptr_t hole_hi = mappings[next]->start_addr;
if (hole_hi > hole_lo) {
size_t hole_sz = hole_hi - hole_lo;
int log2_hole_sz = Log2Floor(hole_sz);
hole_sum += hole_sz;
hole_max = std::max(hole_sz, hole_max);
++hole_cnt;
++hole_histogram[Log2Floor(hole_sz)];
}
curr = next;
}
uintptr_t hi_addr = mappings[curr]->start_addr + mappings[curr]->size;
LogAppend("H ");
LogAppend(lo_addr);
LogAppend(" ");
LogAppend(hi_addr);
LogAppend(" ");
LogAppend(saturated_cast<uint16_t>(hole_cnt));
LogAppend(" ");
LogAppend(hole_max);
LogAppend(" ");
LogAppend(hole_sum);
for (unsigned int i = 0; i < HBITS; ++i) {
if (!hole_histogram[i]) continue;
LogAppend(" ");
LogAppend(saturated_cast<uint8_t>(i));
LogAppend(":");
LogAppend(saturated_cast<uint8_t>(hole_histogram[i]));
}
LogCommitLine();
}
#endif
// Write information about the mappings in effect.
bool DumpMappings() {
// First write all the mappings from the dumper