blob: 69370eba24aa915562813356c4ac4e843e6afb6c [file] [log] [blame]
/*--------------------------------------------------------------------*/
/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
/*--- accessibility (A) and validity (V) status of each byte. ---*/
/*--- mc_main.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2013 Julian Seward
jseward@acm.org
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "pub_tool_basics.h"
#include "pub_tool_aspacemgr.h"
#include "pub_tool_gdbserver.h"
#include "pub_tool_poolalloc.h"
#include "pub_tool_hashtable.h" // For mc_include.h
#include "pub_tool_libcbase.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_libcprint.h"
#include "pub_tool_machine.h"
#include "pub_tool_mallocfree.h"
#include "pub_tool_options.h"
#include "pub_tool_oset.h"
#include "pub_tool_rangemap.h"
#include "pub_tool_replacemalloc.h"
#include "pub_tool_tooliface.h"
#include "pub_tool_threadstate.h"
#include "mc_include.h"
#include "memcheck.h" /* for client requests */
/* Set to 1 to do a little more sanity checking */
#define VG_DEBUG_MEMORY 0
#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
static void ocache_sarp_Set_Origins ( Addr, UWord, UInt ); /* fwds */
static void ocache_sarp_Clear_Origins ( Addr, UWord ); /* fwds */
/*------------------------------------------------------------*/
/*--- Fast-case knobs ---*/
/*------------------------------------------------------------*/
// Comment these out to disable the fast cases (don't just set them to zero).
#define PERF_FAST_LOADV 1
#define PERF_FAST_STOREV 1
#define PERF_FAST_SARP 1
#define PERF_FAST_STACK 1
#define PERF_FAST_STACK2 1
/* Change this to 1 to enable assertions on origin tracking cache fast
paths */
#define OC_ENABLE_ASSERTIONS 0
/*------------------------------------------------------------*/
/*--- Comments on the origin tracking implementation ---*/
/*------------------------------------------------------------*/
/* See detailed comment entitled
AN OVERVIEW OF THE ORIGIN TRACKING IMPLEMENTATION
which is contained further on in this file. */
/*------------------------------------------------------------*/
/*--- V bits and A bits ---*/
/*------------------------------------------------------------*/
/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
thinks the corresponding value bit is defined. And every memory byte
has an A bit, which tracks whether Memcheck thinks the program can access
it safely (ie. it's mapped, and has at least one of the RWX permission bits
set). So every N-bit register is shadowed with N V bits, and every memory
byte is shadowed with 8 V bits and one A bit.
In the implementation, we use two forms of compression (compressed V bits
and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
for memory.
Memcheck also tracks extra information about each heap block that is
allocated, for detecting memory leaks and other purposes.
*/
/*------------------------------------------------------------*/
/*--- Basic A/V bitmap representation. ---*/
/*------------------------------------------------------------*/
/* All reads and writes are checked against a memory map (a.k.a. shadow
memory), which records the state of all memory in the process.
On 32-bit machines the memory map is organised as follows.
The top 16 bits of an address are used to index into a top-level
map table, containing 65536 entries. Each entry is a pointer to a
second-level map, which records the accesibililty and validity
permissions for the 65536 bytes indexed by the lower 16 bits of the
address. Each byte is represented by two bits (details are below). So
each second-level map contains 16384 bytes. This two-level arrangement
conveniently divides the 4G address space into 64k lumps, each size 64k
bytes.
All entries in the primary (top-level) map must point to a valid
secondary (second-level) map. Since many of the 64kB chunks will
have the same status for every bit -- ie. noaccess (for unused
address space) or entirely addressable and defined (for code segments) --
there are three distinguished secondary maps, which indicate 'noaccess',
'undefined' and 'defined'. For these uniform 64kB chunks, the primary
map entry points to the relevant distinguished map. In practice,
typically more than half of the addressable memory is represented with
the 'undefined' or 'defined' distinguished secondary map, so it gives a
good saving. It also lets us set the V+A bits of large address regions
quickly in set_address_range_perms().
On 64-bit machines it's more complicated. If we followed the same basic
scheme we'd have a four-level table which would require too many memory
accesses. So instead the top-level map table has 2^19 entries (indexed
using bits 16..34 of the address); this covers the bottom 32GB. Any
accesses above 32GB are handled with a slow, sparse auxiliary table.
Valgrind's address space manager tries very hard to keep things below
this 32GB barrier so that performance doesn't suffer too much.
Note that this file has a lot of different functions for reading and
writing shadow memory. Only a couple are strictly necessary (eg.
get_vabits2 and set_vabits2), most are just specialised for specific
common cases to improve performance.
Aside: the V+A bits are less precise than they could be -- we have no way
of marking memory as read-only. It would be great if we could add an
extra state VA_BITSn_READONLY. But then we'd have 5 different states,
which requires 2.3 bits to hold, and there's no way to do that elegantly
-- we'd have to double up to 4 bits of metadata per byte, which doesn't
seem worth it.
*/
/* --------------- Basic configuration --------------- */
/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
#if VG_WORDSIZE == 4
/* cover the entire address space */
# define N_PRIMARY_BITS 16
#else
/* Just handle the first 64G fast and the rest via auxiliary
primaries. If you change this, Memcheck will assert at startup.
See the definition of UNALIGNED_OR_HIGH for extensive comments. */
# define N_PRIMARY_BITS 20
#endif
/* Do not change this. */
#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
/* Do not change this. */
#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
/* --------------- Secondary maps --------------- */
// Each byte of memory conceptually has an A bit, which indicates its
// addressability, and 8 V bits, which indicates its definedness.
//
// But because very few bytes are partially defined, we can use a nice
// compression scheme to reduce the size of shadow memory. Each byte of
// memory has 2 bits which indicates its state (ie. V+A bits):
//
// 00: noaccess (unaddressable but treated as fully defined)
// 01: undefined (addressable and fully undefined)
// 10: defined (addressable and fully defined)
// 11: partdefined (addressable and partially defined)
//
// In the "partdefined" case, we use a secondary table to store the V bits.
// Each entry in the secondary-V-bits table maps a byte address to its 8 V
// bits.
//
// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
// four bytes (32 bits) of memory are in each chunk. Hence the name
// "vabits8". This lets us get the V+A bits for four bytes at a time
// easily (without having to do any shifting and/or masking), and that is a
// very common operation. (Note that although each vabits8 chunk
// is 8 bits in size, it represents 32 bits of memory.)
//
// The representation is "inverse" little-endian... each 4 bytes of
// memory is represented by a 1 byte value, where:
//
// - the status of byte (a+0) is held in bits [1..0]
// - the status of byte (a+1) is held in bits [3..2]
// - the status of byte (a+2) is held in bits [5..4]
// - the status of byte (a+3) is held in bits [7..6]
//
// It's "inverse" because endianness normally describes a mapping from
// value bits to memory addresses; in this case the mapping is inverted.
// Ie. instead of particular value bits being held in certain addresses, in
// this case certain addresses are represented by particular value bits.
// See insert_vabits2_into_vabits8() for an example.
//
// But note that we don't compress the V bits stored in registers; they
// need to be explicit to made the shadow operations possible. Therefore
// when moving values between registers and memory we need to convert
// between the expanded in-register format and the compressed in-memory
// format. This isn't so difficult, it just requires careful attention in a
// few places.
// These represent eight bits of memory.
#define VA_BITS2_NOACCESS 0x0 // 00b
#define VA_BITS2_UNDEFINED 0x1 // 01b
#define VA_BITS2_DEFINED 0x2 // 10b
#define VA_BITS2_PARTDEFINED 0x3 // 11b
// These represent 16 bits of memory.
#define VA_BITS4_NOACCESS 0x0 // 00_00b
#define VA_BITS4_UNDEFINED 0x5 // 01_01b
#define VA_BITS4_DEFINED 0xa // 10_10b
// These represent 32 bits of memory.
#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
// These represent 64 bits of memory.
#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
#define SM_CHUNKS 16384
#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
// Paranoia: it's critical for performance that the requested inlining
// occurs. So try extra hard.
#define INLINE inline __attribute__((always_inline))
static INLINE Addr start_of_this_sm ( Addr a ) {
return (a & (~SM_MASK));
}
static INLINE Bool is_start_of_sm ( Addr a ) {
return (start_of_this_sm(a) == a);
}
typedef
struct {
UChar vabits8[SM_CHUNKS];
}
SecMap;
// 3 distinguished secondary maps, one for no-access, one for
// accessible but undefined, and one for accessible and defined.
// Distinguished secondaries may never be modified.
#define SM_DIST_NOACCESS 0
#define SM_DIST_UNDEFINED 1
#define SM_DIST_DEFINED 2
static SecMap sm_distinguished[3];
static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
}
// Forward declaration
static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
/* dist_sm points to one of our three distinguished secondaries. Make
a copy of it so that we can write to it.
*/
static SecMap* copy_for_writing ( SecMap* dist_sm )
{
SecMap* new_sm;
tl_assert(dist_sm == &sm_distinguished[0]
|| dist_sm == &sm_distinguished[1]
|| dist_sm == &sm_distinguished[2]);
new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
if (new_sm == NULL)
VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
sizeof(SecMap) );
VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
update_SM_counts(dist_sm, new_sm);
return new_sm;
}
/* --------------- Stats --------------- */
static Int n_issued_SMs = 0;
static Int n_deissued_SMs = 0;
static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
static Int n_undefined_SMs = 0;
static Int n_defined_SMs = 0;
static Int n_non_DSM_SMs = 0;
static Int max_noaccess_SMs = 0;
static Int max_undefined_SMs = 0;
static Int max_defined_SMs = 0;
static Int max_non_DSM_SMs = 0;
/* # searches initiated in auxmap_L1, and # base cmps required */
static ULong n_auxmap_L1_searches = 0;
static ULong n_auxmap_L1_cmps = 0;
/* # of searches that missed in auxmap_L1 and therefore had to
be handed to auxmap_L2. And the number of nodes inserted. */
static ULong n_auxmap_L2_searches = 0;
static ULong n_auxmap_L2_nodes = 0;
static Int n_sanity_cheap = 0;
static Int n_sanity_expensive = 0;
static Int n_secVBit_nodes = 0;
static Int max_secVBit_nodes = 0;
static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
{
if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
else { n_non_DSM_SMs --;
n_deissued_SMs ++; }
if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
else { n_non_DSM_SMs ++;
n_issued_SMs ++; }
if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
}
/* --------------- Primary maps --------------- */
/* The main primary map. This covers some initial part of the address
space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
handled using the auxiliary primary map.
*/
static SecMap* primary_map[N_PRIMARY_MAP];
/* An entry in the auxiliary primary map. base must be a 64k-aligned
value, and sm points at the relevant secondary map. As with the
main primary map, the secondary may be either a real secondary, or
one of the three distinguished secondaries. DO NOT CHANGE THIS
LAYOUT: the first word has to be the key for OSet fast lookups.
*/
typedef
struct {
Addr base;
SecMap* sm;
}
AuxMapEnt;
/* Tunable parameter: How big is the L1 queue? */
#define N_AUXMAP_L1 24
/* Tunable parameter: How far along the L1 queue to insert
entries resulting from L2 lookups? */
#define AUXMAP_L1_INSERT_IX 12
static struct {
Addr base;
AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
}
auxmap_L1[N_AUXMAP_L1];
static OSet* auxmap_L2 = NULL;
static void init_auxmap_L1_L2 ( void )
{
Int i;
for (i = 0; i < N_AUXMAP_L1; i++) {
auxmap_L1[i].base = 0;
auxmap_L1[i].ent = NULL;
}
tl_assert(0 == offsetof(AuxMapEnt,base));
tl_assert(sizeof(Addr) == sizeof(void*));
auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
/*fastCmp*/ NULL,
VG_(malloc), "mc.iaLL.1", VG_(free) );
}
/* Check representation invariants; if OK return NULL; else a
descriptive bit of text. Also return the number of
non-distinguished secondary maps referred to from the auxiliary
primary maps. */
static const HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
{
Word i, j;
/* On a 32-bit platform, the L2 and L1 tables should
both remain empty forever.
On a 64-bit platform:
In the L2 table:
all .base & 0xFFFF == 0
all .base > MAX_PRIMARY_ADDRESS
In the L1 table:
all .base & 0xFFFF == 0
all (.base > MAX_PRIMARY_ADDRESS
.base & 0xFFFF == 0
and .ent points to an AuxMapEnt with the same .base)
or
(.base == 0 and .ent == NULL)
*/
*n_secmaps_found = 0;
if (sizeof(void*) == 4) {
/* 32-bit platform */
if (VG_(OSetGen_Size)(auxmap_L2) != 0)
return "32-bit: auxmap_L2 is non-empty";
for (i = 0; i < N_AUXMAP_L1; i++)
if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
return "32-bit: auxmap_L1 is non-empty";
} else {
/* 64-bit platform */
UWord elems_seen = 0;
AuxMapEnt *elem, *res;
AuxMapEnt key;
/* L2 table */
VG_(OSetGen_ResetIter)(auxmap_L2);
while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
elems_seen++;
if (0 != (elem->base & (Addr)0xFFFF))
return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
if (elem->base <= MAX_PRIMARY_ADDRESS)
return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
if (elem->sm == NULL)
return "64-bit: .sm in _L2 is NULL";
if (!is_distinguished_sm(elem->sm))
(*n_secmaps_found)++;
}
if (elems_seen != n_auxmap_L2_nodes)
return "64-bit: disagreement on number of elems in _L2";
/* Check L1-L2 correspondence */
for (i = 0; i < N_AUXMAP_L1; i++) {
if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
continue;
if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
if (auxmap_L1[i].ent == NULL)
return "64-bit: .ent is NULL in auxmap_L1";
if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
return "64-bit: _L1 and _L2 bases are inconsistent";
/* Look it up in auxmap_L2. */
key.base = auxmap_L1[i].base;
key.sm = 0;
res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
if (res == NULL)
return "64-bit: _L1 .base not found in _L2";
if (res != auxmap_L1[i].ent)
return "64-bit: _L1 .ent disagrees with _L2 entry";
}
/* Check L1 contains no duplicates */
for (i = 0; i < N_AUXMAP_L1; i++) {
if (auxmap_L1[i].base == 0)
continue;
for (j = i+1; j < N_AUXMAP_L1; j++) {
if (auxmap_L1[j].base == 0)
continue;
if (auxmap_L1[j].base == auxmap_L1[i].base)
return "64-bit: duplicate _L1 .base entries";
}
}
}
return NULL; /* ok */
}
static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
{
Word i;
tl_assert(ent);
tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
for (i = N_AUXMAP_L1-1; i > rank; i--)
auxmap_L1[i] = auxmap_L1[i-1];
auxmap_L1[rank].base = ent->base;
auxmap_L1[rank].ent = ent;
}
static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
{
AuxMapEnt key;
AuxMapEnt* res;
Word i;
tl_assert(a > MAX_PRIMARY_ADDRESS);
a &= ~(Addr)0xFFFF;
/* First search the front-cache, which is a self-organising
list containing the most popular entries. */
if (LIKELY(auxmap_L1[0].base == a))
return auxmap_L1[0].ent;
if (LIKELY(auxmap_L1[1].base == a)) {
Addr t_base = auxmap_L1[0].base;
AuxMapEnt* t_ent = auxmap_L1[0].ent;
auxmap_L1[0].base = auxmap_L1[1].base;
auxmap_L1[0].ent = auxmap_L1[1].ent;
auxmap_L1[1].base = t_base;
auxmap_L1[1].ent = t_ent;
return auxmap_L1[0].ent;
}
n_auxmap_L1_searches++;
for (i = 0; i < N_AUXMAP_L1; i++) {
if (auxmap_L1[i].base == a) {
break;
}
}
tl_assert(i >= 0 && i <= N_AUXMAP_L1);
n_auxmap_L1_cmps += (ULong)(i+1);
if (i < N_AUXMAP_L1) {
if (i > 0) {
Addr t_base = auxmap_L1[i-1].base;
AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
auxmap_L1[i-1].base = auxmap_L1[i-0].base;
auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
auxmap_L1[i-0].base = t_base;
auxmap_L1[i-0].ent = t_ent;
i--;
}
return auxmap_L1[i].ent;
}
n_auxmap_L2_searches++;
/* First see if we already have it. */
key.base = a;
key.sm = 0;
res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
if (res)
insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
return res;
}
static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
{
AuxMapEnt *nyu, *res;
/* First see if we already have it. */
res = maybe_find_in_auxmap( a );
if (LIKELY(res))
return res;
/* Ok, there's no entry in the secondary map, so we'll have
to allocate one. */
a &= ~(Addr)0xFFFF;
nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
tl_assert(nyu);
nyu->base = a;
nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
VG_(OSetGen_Insert)( auxmap_L2, nyu );
insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
n_auxmap_L2_nodes++;
return nyu;
}
/* --------------- SecMap fundamentals --------------- */
// In all these, 'low' means it's definitely in the main primary map,
// 'high' means it's definitely in the auxiliary table.
static INLINE SecMap** get_secmap_low_ptr ( Addr a )
{
UWord pm_off = a >> 16;
# if VG_DEBUG_MEMORY >= 1
tl_assert(pm_off < N_PRIMARY_MAP);
# endif
return &primary_map[ pm_off ];
}
static INLINE SecMap** get_secmap_high_ptr ( Addr a )
{
AuxMapEnt* am = find_or_alloc_in_auxmap(a);
return &am->sm;
}
static INLINE SecMap** get_secmap_ptr ( Addr a )
{
return ( a <= MAX_PRIMARY_ADDRESS
? get_secmap_low_ptr(a)
: get_secmap_high_ptr(a));
}
static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
{
return *get_secmap_low_ptr(a);
}
static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
{
return *get_secmap_high_ptr(a);
}
static INLINE SecMap* get_secmap_for_writing_low(Addr a)
{
SecMap** p = get_secmap_low_ptr(a);
if (UNLIKELY(is_distinguished_sm(*p)))
*p = copy_for_writing(*p);
return *p;
}
static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
{
SecMap** p = get_secmap_high_ptr(a);
if (UNLIKELY(is_distinguished_sm(*p)))
*p = copy_for_writing(*p);
return *p;
}
/* Produce the secmap for 'a', either from the primary map or by
ensuring there is an entry for it in the aux primary map. The
secmap may be a distinguished one as the caller will only want to
be able to read it.
*/
static INLINE SecMap* get_secmap_for_reading ( Addr a )
{
return ( a <= MAX_PRIMARY_ADDRESS
? get_secmap_for_reading_low (a)
: get_secmap_for_reading_high(a) );
}
/* Produce the secmap for 'a', either from the primary map or by
ensuring there is an entry for it in the aux primary map. The
secmap may not be a distinguished one, since the caller will want
to be able to write it. If it is a distinguished secondary, make a
writable copy of it, install it, and return the copy instead. (COW
semantics).
*/
static INLINE SecMap* get_secmap_for_writing ( Addr a )
{
return ( a <= MAX_PRIMARY_ADDRESS
? get_secmap_for_writing_low (a)
: get_secmap_for_writing_high(a) );
}
/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
allocate one if one doesn't already exist. This is used by the
leak checker.
*/
static SecMap* maybe_get_secmap_for ( Addr a )
{
if (a <= MAX_PRIMARY_ADDRESS) {
return get_secmap_for_reading_low(a);
} else {
AuxMapEnt* am = maybe_find_in_auxmap(a);
return am ? am->sm : NULL;
}
}
/* --------------- Fundamental functions --------------- */
static INLINE
void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
{
UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
*vabits8 &= ~(0x3 << shift); // mask out the two old bits
*vabits8 |= (vabits2 << shift); // mask in the two new bits
}
static INLINE
void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
{
UInt shift;
tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
shift = (a & 2) << 1; // shift by 0 or 4
*vabits8 &= ~(0xf << shift); // mask out the four old bits
*vabits8 |= (vabits4 << shift); // mask in the four new bits
}
static INLINE
UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
{
UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
vabits8 >>= shift; // shift the two bits to the bottom
return 0x3 & vabits8; // mask out the rest
}
static INLINE
UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
{
UInt shift;
tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
shift = (a & 2) << 1; // shift by 0 or 4
vabits8 >>= shift; // shift the four bits to the bottom
return 0xf & vabits8; // mask out the rest
}
// Note that these four are only used in slow cases. The fast cases do
// clever things like combine the auxmap check (in
// get_secmap_{read,writ}able) with alignment checks.
// *** WARNING! ***
// Any time this function is called, if it is possible that vabits2
// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
// sec-V-bits table must also be set!
static INLINE
void set_vabits2 ( Addr a, UChar vabits2 )
{
SecMap* sm = get_secmap_for_writing(a);
UWord sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
}
static INLINE
UChar get_vabits2 ( Addr a )
{
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off = SM_OFF(a);
UChar vabits8 = sm->vabits8[sm_off];
return extract_vabits2_from_vabits8(a, vabits8);
}
// *** WARNING! ***
// Any time this function is called, if it is possible that any of the
// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
// corresponding entry(s) in the sec-V-bits table must also be set!
static INLINE
UChar get_vabits8_for_aligned_word32 ( Addr a )
{
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off = SM_OFF(a);
UChar vabits8 = sm->vabits8[sm_off];
return vabits8;
}
static INLINE
void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
{
SecMap* sm = get_secmap_for_writing(a);
UWord sm_off = SM_OFF(a);
sm->vabits8[sm_off] = vabits8;
}
// Forward declarations
static UWord get_sec_vbits8(Addr a);
static void set_sec_vbits8(Addr a, UWord vbits8);
// Returns False if there was an addressability error.
static INLINE
Bool set_vbits8 ( Addr a, UChar vbits8 )
{
Bool ok = True;
UChar vabits2 = get_vabits2(a);
if ( VA_BITS2_NOACCESS != vabits2 ) {
// Addressable. Convert in-register format to in-memory format.
// Also remove any existing sec V bit entry for the byte if no
// longer necessary.
if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
else { vabits2 = VA_BITS2_PARTDEFINED;
set_sec_vbits8(a, vbits8); }
set_vabits2(a, vabits2);
} else {
// Unaddressable! Do nothing -- when writing to unaddressable
// memory it acts as a black hole, and the V bits can never be seen
// again. So we don't have to write them at all.
ok = False;
}
return ok;
}
// Returns False if there was an addressability error. In that case, we put
// all defined bits into vbits8.
static INLINE
Bool get_vbits8 ( Addr a, UChar* vbits8 )
{
Bool ok = True;
UChar vabits2 = get_vabits2(a);
// Convert the in-memory format to in-register format.
if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
else if ( VA_BITS2_NOACCESS == vabits2 ) {
*vbits8 = V_BITS8_DEFINED; // Make V bits defined!
ok = False;
} else {
tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
*vbits8 = get_sec_vbits8(a);
}
return ok;
}
/* --------------- Secondary V bit table ------------ */
// This table holds the full V bit pattern for partially-defined bytes
// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
// memory.
//
// Note: the nodes in this table can become stale. Eg. if you write a PDB,
// then overwrite the same address with a fully defined byte, the sec-V-bit
// node will not necessarily be removed. This is because checking for
// whether removal is necessary would slow down the fast paths.
//
// To avoid the stale nodes building up too much, we periodically (once the
// table reaches a certain size) garbage collect (GC) the table by
// traversing it and evicting any nodes not having PDB.
// If more than a certain proportion of nodes survived, we increase the
// table size so that GCs occur less often.
//
// This policy is designed to avoid bad table bloat in the worst case where
// a program creates huge numbers of stale PDBs -- we would get this bloat
// if we had no GC -- while handling well the case where a node becomes
// stale but shortly afterwards is rewritten with a PDB and so becomes
// non-stale again (which happens quite often, eg. in perf/bz2). If we just
// remove all stale nodes as soon as possible, we just end up re-adding a
// lot of them in later again. The "sufficiently stale" approach avoids
// this. (If a program has many live PDBs, performance will just suck,
// there's no way around that.)
//
// Further comments, JRS 14 Feb 2012. It turns out that the policy of
// holding on to stale entries for 2 GCs before discarding them can lead
// to massive space leaks. So we're changing to an arrangement where
// lines are evicted as soon as they are observed to be stale during a
// GC. This also has a side benefit of allowing the sufficiently_stale
// field to be removed from the SecVBitNode struct, reducing its size by
// 8 bytes, which is a substantial space saving considering that the
// struct was previously 32 or so bytes, on a 64 bit target.
//
// In order to try and mitigate the problem that the "sufficiently stale"
// heuristic was designed to avoid, the table size is allowed to drift
// up ("DRIFTUP") slowly to 80000, even if the residency is low. This
// means that nodes will exist in the table longer on average, and hopefully
// will be deleted and re-added less frequently.
//
// The previous scaling up mechanism (now called STEPUP) is retained:
// if residency exceeds 50%, the table is scaled up, although by a
// factor sqrt(2) rather than 2 as before. This effectively doubles the
// frequency of GCs when there are many PDBs at reduces the tendency of
// stale PDBs to reside for long periods in the table.
static OSet* secVBitTable;
// Stats
static ULong sec_vbits_new_nodes = 0;
static ULong sec_vbits_updates = 0;
// This must be a power of two; this is checked in mc_pre_clo_init().
// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
// a larger address range) they take more space but we can get multiple
// partially-defined bytes in one if they are close to each other, reducing
// the number of total nodes. In practice sometimes they are clustered (eg.
// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
// row), but often not. So we choose something intermediate.
#define BYTES_PER_SEC_VBIT_NODE 16
// We make the table bigger by a factor of STEPUP_GROWTH_FACTOR if
// more than this many nodes survive a GC.
#define STEPUP_SURVIVOR_PROPORTION 0.5
#define STEPUP_GROWTH_FACTOR 1.414213562
// If the above heuristic doesn't apply, then we may make the table
// slightly bigger, by a factor of DRIFTUP_GROWTH_FACTOR, if more than
// this many nodes survive a GC, _and_ the total table size does
// not exceed a fixed limit. The numbers are somewhat arbitrary, but
// work tolerably well on long Firefox runs. The scaleup ratio of 1.5%
// effectively although gradually reduces residency and increases time
// between GCs for programs with small numbers of PDBs. The 80000 limit
// effectively limits the table size to around 2MB for programs with
// small numbers of PDBs, whilst giving a reasonably long lifetime to
// entries, to try and reduce the costs resulting from deleting and
// re-adding of entries.
#define DRIFTUP_SURVIVOR_PROPORTION 0.15
#define DRIFTUP_GROWTH_FACTOR 1.015
#define DRIFTUP_MAX_SIZE 80000
// We GC the table when it gets this many nodes in it, ie. it's effectively
// the table size. It can change.
static Int secVBitLimit = 1000;
// The number of GCs done, used to age sec-V-bit nodes for eviction.
// Because it's unsigned, wrapping doesn't matter -- the right answer will
// come out anyway.
static UInt GCs_done = 0;
typedef
struct {
Addr a;
UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
}
SecVBitNode;
static OSet* createSecVBitTable(void)
{
OSet* newSecVBitTable;
newSecVBitTable = VG_(OSetGen_Create_With_Pool)
( offsetof(SecVBitNode, a),
NULL, // use fast comparisons
VG_(malloc), "mc.cSVT.1 (sec VBit table)",
VG_(free),
1000,
sizeof(SecVBitNode));
return newSecVBitTable;
}
static void gcSecVBitTable(void)
{
OSet* secVBitTable2;
SecVBitNode* n;
Int i, n_nodes = 0, n_survivors = 0;
GCs_done++;
// Create the new table.
secVBitTable2 = createSecVBitTable();
// Traverse the table, moving fresh nodes into the new table.
VG_(OSetGen_ResetIter)(secVBitTable);
while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
// Keep node if any of its bytes are non-stale. Using
// get_vabits2() for the lookup is not very efficient, but I don't
// think it matters.
for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
// Found a non-stale byte, so keep =>
// Insert a copy of the node into the new table.
SecVBitNode* n2 =
VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
*n2 = *n;
VG_(OSetGen_Insert)(secVBitTable2, n2);
break;
}
}
}
// Get the before and after sizes.
n_nodes = VG_(OSetGen_Size)(secVBitTable);
n_survivors = VG_(OSetGen_Size)(secVBitTable2);
// Destroy the old table, and put the new one in its place.
VG_(OSetGen_Destroy)(secVBitTable);
secVBitTable = secVBitTable2;
if (VG_(clo_verbosity) > 1) {
HChar percbuf[7];
VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)\n",
n_nodes, n_survivors, percbuf);
}
// Increase table size if necessary.
if ((Double)n_survivors
> ((Double)secVBitLimit * STEPUP_SURVIVOR_PROPORTION)) {
secVBitLimit = (Int)((Double)secVBitLimit * (Double)STEPUP_GROWTH_FACTOR);
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_DebugMsg,
"memcheck GC: %d new table size (stepup)\n",
secVBitLimit);
}
else
if (secVBitLimit < DRIFTUP_MAX_SIZE
&& (Double)n_survivors
> ((Double)secVBitLimit * DRIFTUP_SURVIVOR_PROPORTION)) {
secVBitLimit = (Int)((Double)secVBitLimit * (Double)DRIFTUP_GROWTH_FACTOR);
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_DebugMsg,
"memcheck GC: %d new table size (driftup)\n",
secVBitLimit);
}
}
static UWord get_sec_vbits8(Addr a)
{
Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
Int amod = a % BYTES_PER_SEC_VBIT_NODE;
SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
UChar vbits8;
tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
// Shouldn't be fully defined or fully undefined -- those cases shouldn't
// make it to the secondary V bits table.
vbits8 = n->vbits8[amod];
tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
return vbits8;
}
static void set_sec_vbits8(Addr a, UWord vbits8)
{
Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
// Shouldn't be fully defined or fully undefined -- those cases shouldn't
// make it to the secondary V bits table.
tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
if (n) {
n->vbits8[amod] = vbits8; // update
sec_vbits_updates++;
} else {
// Do a table GC if necessary. Nb: do this before creating and
// inserting the new node, to avoid erroneously GC'ing the new node.
if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
gcSecVBitTable();
}
// New node: assign the specific byte, make the rest invalid (they
// should never be read as-is, but be cautious).
n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
n->a = aAligned;
for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
n->vbits8[i] = V_BITS8_UNDEFINED;
}
n->vbits8[amod] = vbits8;
// Insert the new node.
VG_(OSetGen_Insert)(secVBitTable, n);
sec_vbits_new_nodes++;
n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
if (n_secVBit_nodes > max_secVBit_nodes)
max_secVBit_nodes = n_secVBit_nodes;
}
}
/* --------------- Endianness helpers --------------- */
/* Returns the offset in memory of the byteno-th most significant byte
in a wordszB-sized word, given the specified endianness. */
static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
UWord byteno ) {
return bigendian ? (wordszB-1-byteno) : byteno;
}
/* --------------- Ignored address ranges --------------- */
/* Denotes the address-error-reportability status for address ranges:
IAR_NotIgnored: the usual case -- report errors in this range
IAR_CommandLine: don't report errors -- from command line setting
IAR_ClientReq: don't report errors -- from client request
*/
typedef
enum { IAR_INVALID=99,
IAR_NotIgnored,
IAR_CommandLine,
IAR_ClientReq }
IARKind;
static const HChar* showIARKind ( IARKind iark )
{
switch (iark) {
case IAR_INVALID: return "INVALID";
case IAR_NotIgnored: return "NotIgnored";
case IAR_CommandLine: return "CommandLine";
case IAR_ClientReq: return "ClientReq";
default: return "???";
}
}
// RangeMap<IARKind>
static RangeMap* gIgnoredAddressRanges = NULL;
static void init_gIgnoredAddressRanges ( void )
{
if (LIKELY(gIgnoredAddressRanges != NULL))
return;
gIgnoredAddressRanges = VG_(newRangeMap)( VG_(malloc), "mc.igIAR.1",
VG_(free), IAR_NotIgnored );
tl_assert(gIgnoredAddressRanges != NULL);
}
INLINE Bool MC_(in_ignored_range) ( Addr a )
{
if (LIKELY(gIgnoredAddressRanges == NULL))
return False;
UWord how = IAR_INVALID;
UWord key_min = ~(UWord)0;
UWord key_max = (UWord)0;
VG_(lookupRangeMap)(&key_min, &key_max, &how, gIgnoredAddressRanges, a);
tl_assert(key_min <= a && a <= key_max);
switch (how) {
case IAR_NotIgnored: return False;
case IAR_CommandLine: return True;
case IAR_ClientReq: return True;
default: break; /* invalid */
}
VG_(tool_panic)("MC_(in_ignore_range)");
/*NOTREACHED*/
}
/* Parse two Addr separated by a dash, or fail. */
static Bool parse_range ( const HChar** ppc, Addr* result1, Addr* result2 )
{
Bool ok = VG_(parse_Addr) (ppc, result1);
if (!ok)
return False;
if (**ppc != '-')
return False;
(*ppc)++;
ok = VG_(parse_Addr) (ppc, result2);
if (!ok)
return False;
return True;
}
/* Parse a set of ranges separated by commas into 'ignoreRanges', or
fail. If they are valid, add them to the global set of ignored
ranges. */
static Bool parse_ignore_ranges ( const HChar* str0 )
{
init_gIgnoredAddressRanges();
const HChar* str = str0;
const HChar** ppc = &str;
while (1) {
Addr start = ~(Addr)0;
Addr end = (Addr)0;
Bool ok = parse_range(ppc, &start, &end);
if (!ok)
return False;
if (start > end)
return False;
VG_(bindRangeMap)( gIgnoredAddressRanges, start, end, IAR_CommandLine );
if (**ppc == 0)
return True;
if (**ppc != ',')
return False;
(*ppc)++;
}
/*NOTREACHED*/
return False;
}
/* Add or remove [start, +len) from the set of ignored ranges. */
static Bool modify_ignore_ranges ( Bool addRange, Addr start, Addr len )
{
init_gIgnoredAddressRanges();
const Bool verbose = (VG_(clo_verbosity) > 1);
if (len == 0) {
return False;
}
if (addRange) {
VG_(bindRangeMap)(gIgnoredAddressRanges,
start, start+len-1, IAR_ClientReq);
if (verbose)
VG_(dmsg)("memcheck: modify_ignore_ranges: add %p %p\n",
(void*)start, (void*)(start+len-1));
} else {
VG_(bindRangeMap)(gIgnoredAddressRanges,
start, start+len-1, IAR_NotIgnored);
if (verbose)
VG_(dmsg)("memcheck: modify_ignore_ranges: del %p %p\n",
(void*)start, (void*)(start+len-1));
}
if (verbose) {
VG_(dmsg)("memcheck: now have %ld ranges:\n",
VG_(sizeRangeMap)(gIgnoredAddressRanges));
Word i;
for (i = 0; i < VG_(sizeRangeMap)(gIgnoredAddressRanges); i++) {
UWord val = IAR_INVALID;
UWord key_min = ~(UWord)0;
UWord key_max = (UWord)0;
VG_(indexRangeMap)( &key_min, &key_max, &val,
gIgnoredAddressRanges, i );
VG_(dmsg)("memcheck: [%ld] %016llx-%016llx %s\n",
i, (ULong)key_min, (ULong)key_max, showIARKind(val));
}
}
return True;
}
/* --------------- Load/store slow cases. --------------- */
static
__attribute__((noinline))
void mc_LOADV_128_or_256_slow ( /*OUT*/ULong* res,
Addr a, SizeT nBits, Bool bigendian )
{
ULong pessim[4]; /* only used when p-l-ok=yes */
SSizeT szB = nBits / 8;
SSizeT szL = szB / 8; /* Size in Longs (64-bit units) */
SSizeT i, j; /* Must be signed. */
SizeT n_addrs_bad = 0;
Addr ai;
UChar vbits8;
Bool ok;
/* Code below assumes load size is a power of two and at least 64
bits. */
tl_assert((szB & (szB-1)) == 0 && szL > 0);
/* If this triggers, you probably just need to increase the size of
the pessim array. */
tl_assert(szL <= sizeof(pessim) / sizeof(pessim[0]));
for (j = 0; j < szL; j++) {
pessim[j] = V_BITS64_DEFINED;
res[j] = V_BITS64_UNDEFINED;
}
/* Make up a result V word, which contains the loaded data for
valid addresses and Defined for invalid addresses. Iterate over
the bytes in the word, from the most significant down to the
least. The vbits to return are calculated into vbits128. Also
compute the pessimising value to be used when
--partial-loads-ok=yes. n_addrs_bad is redundant (the relevant
info can be gleaned from the pessim array) but is used as a
cross-check. */
for (j = szL-1; j >= 0; j--) {
ULong vbits64 = V_BITS64_UNDEFINED;
ULong pessim64 = V_BITS64_DEFINED;
UWord long_index = byte_offset_w(szL, bigendian, j);
for (i = 8-1; i >= 0; i--) {
PROF_EVENT(29, "mc_LOADV_128_or_256_slow(loop)");
ai = a + 8*long_index + byte_offset_w(8, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
vbits64 |= vbits8;
if (!ok) n_addrs_bad++;
pessim64 <<= 8;
pessim64 |= (ok ? V_BITS8_DEFINED : V_BITS8_UNDEFINED);
}
res[long_index] = vbits64;
pessim[long_index] = pessim64;
}
/* In the common case, all the addresses involved are valid, so we
just return the computed V bits and have done. */
if (LIKELY(n_addrs_bad == 0))
return;
/* If there's no possibility of getting a partial-loads-ok
exemption, report the error and quit. */
if (!MC_(clo_partial_loads_ok)) {
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
return;
}
/* The partial-loads-ok excemption might apply. Find out if it
does. If so, don't report an addressing error, but do return
Undefined for the bytes that are out of range, so as to avoid
false negatives. If it doesn't apply, just report an addressing
error in the usual way. */
/* Some code steps along byte strings in aligned chunks
even when there is only a partially defined word at the end (eg,
optimised strlen). This is allowed by the memory model of
modern machines, since an aligned load cannot span two pages and
thus cannot "partially fault".
Therefore, a load from a partially-addressible place is allowed
if all of the following hold:
- the command-line flag is set [by default, it isn't]
- it's an aligned load
- at least one of the addresses in the word *is* valid
Since this suppresses the addressing error, we avoid false
negatives by marking bytes undefined when they come from an
invalid address.
*/
/* "at least one of the addresses is invalid" */
ok = False;
for (j = 0; j < szL; j++)
ok |= pessim[j] != V_BITS64_DEFINED;
tl_assert(ok);
if (0 == (a & (szB - 1)) && n_addrs_bad < szB) {
/* Exemption applies. Use the previously computed pessimising
value and return the combined result, but don't flag an
addressing error. The pessimising value is Defined for valid
addresses and Undefined for invalid addresses. */
/* for assumption that doing bitwise or implements UifU */
tl_assert(V_BIT_UNDEFINED == 1 && V_BIT_DEFINED == 0);
/* (really need "UifU" here...)
vbits[j] UifU= pessim[j] (is pessimised by it, iow) */
for (j = szL-1; j >= 0; j--)
res[j] |= pessim[j];
return;
}
/* Exemption doesn't apply. Flag an addressing error in the normal
way. */
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
}
static
__attribute__((noinline))
ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
{
PROF_EVENT(30, "mc_LOADVn_slow");
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
cases on 64-bit platforms. Are merely a speedup hack; can be
omitted without loss of correctness/functionality. Note that in
both cases the "sizeof(void*) == 8" causes these cases to be
folded out by compilers on 32-bit platforms. These are derived
from LOADV64 and LOADV32.
*/
if (LIKELY(sizeof(void*) == 8
&& nBits == 64 && VG_IS_8_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off16 = SM_OFF_16(a);
UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
if (LIKELY(vabits16 == VA_BITS16_DEFINED))
return V_BITS64_DEFINED;
if (LIKELY(vabits16 == VA_BITS16_UNDEFINED))
return V_BITS64_UNDEFINED;
/* else fall into the slow case */
}
if (LIKELY(sizeof(void*) == 8
&& nBits == 32 && VG_IS_4_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off = SM_OFF(a);
UWord vabits8 = sm->vabits8[sm_off];
if (LIKELY(vabits8 == VA_BITS8_DEFINED))
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
if (LIKELY(vabits8 == VA_BITS8_UNDEFINED))
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
/* else fall into slow case */
}
/* ------------ END semi-fast cases ------------ */
ULong vbits64 = V_BITS64_UNDEFINED; /* result */
ULong pessim64 = V_BITS64_DEFINED; /* only used when p-l-ok=yes */
SSizeT szB = nBits / 8;
SSizeT i; /* Must be signed. */
SizeT n_addrs_bad = 0;
Addr ai;
UChar vbits8;
Bool ok;
tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
/* Make up a 64-bit result V word, which contains the loaded data
for valid addresses and Defined for invalid addresses. Iterate
over the bytes in the word, from the most significant down to
the least. The vbits to return are calculated into vbits64.
Also compute the pessimising value to be used when
--partial-loads-ok=yes. n_addrs_bad is redundant (the relevant
info can be gleaned from pessim64) but is used as a
cross-check. */
for (i = szB-1; i >= 0; i--) {
PROF_EVENT(31, "mc_LOADVn_slow(loop)");
ai = a + byte_offset_w(szB, bigendian, i);
ok = get_vbits8(ai, &vbits8);
vbits64 <<= 8;
vbits64 |= vbits8;
if (!ok) n_addrs_bad++;
pessim64 <<= 8;
pessim64 |= (ok ? V_BITS8_DEFINED : V_BITS8_UNDEFINED);
}
/* In the common case, all the addresses involved are valid, so we
just return the computed V bits and have done. */
if (LIKELY(n_addrs_bad == 0))
return vbits64;
/* If there's no possibility of getting a partial-loads-ok
exemption, report the error and quit. */
if (!MC_(clo_partial_loads_ok)) {
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
return vbits64;
}
/* The partial-loads-ok excemption might apply. Find out if it
does. If so, don't report an addressing error, but do return
Undefined for the bytes that are out of range, so as to avoid
false negatives. If it doesn't apply, just report an addressing
error in the usual way. */
/* Some code steps along byte strings in aligned word-sized chunks
even when there is only a partially defined word at the end (eg,
optimised strlen). This is allowed by the memory model of
modern machines, since an aligned load cannot span two pages and
thus cannot "partially fault". Despite such behaviour being
declared undefined by ANSI C/C++.
Therefore, a load from a partially-addressible place is allowed
if all of the following hold:
- the command-line flag is set [by default, it isn't]
- it's a word-sized, word-aligned load
- at least one of the addresses in the word *is* valid
Since this suppresses the addressing error, we avoid false
negatives by marking bytes undefined when they come from an
invalid address.
*/
/* "at least one of the addresses is invalid" */
tl_assert(pessim64 != V_BITS64_DEFINED);
if (szB == VG_WORDSIZE && VG_IS_WORD_ALIGNED(a)
&& n_addrs_bad < VG_WORDSIZE) {
/* Exemption applies. Use the previously computed pessimising
value for vbits64 and return the combined result, but don't
flag an addressing error. The pessimising value is Defined
for valid addresses and Undefined for invalid addresses. */
/* for assumption that doing bitwise or implements UifU */
tl_assert(V_BIT_UNDEFINED == 1 && V_BIT_DEFINED == 0);
/* (really need "UifU" here...)
vbits64 UifU= pessim64 (is pessimised by it, iow) */
vbits64 |= pessim64;
return vbits64;
}
/* Also, in appears that gcc generates string-stepping code in
32-bit chunks on 64 bit platforms. So, also grant an exception
for this case. Note that the first clause of the conditional
(VG_WORDSIZE == 8) is known at compile time, so the whole clause
will get folded out in 32 bit builds. */
if (VG_WORDSIZE == 8
&& VG_IS_4_ALIGNED(a) && nBits == 32 && n_addrs_bad < 4) {
tl_assert(V_BIT_UNDEFINED == 1 && V_BIT_DEFINED == 0);
/* (really need "UifU" here...)
vbits64 UifU= pessim64 (is pessimised by it, iow) */
vbits64 |= pessim64;
/* Mark the upper 32 bits as undefined, just to be on the safe
side. */
vbits64 |= (((ULong)V_BITS32_UNDEFINED) << 32);
return vbits64;
}
/* Exemption doesn't apply. Flag an addressing error in the normal
way. */
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
return vbits64;
}
static
__attribute__((noinline))
void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
{
SizeT szB = nBits / 8;
SizeT i, n_addrs_bad = 0;
UChar vbits8;
Addr ai;
Bool ok;
PROF_EVENT(35, "mc_STOREVn_slow");
/* ------------ BEGIN semi-fast cases ------------ */
/* These deal quickly-ish with the common auxiliary primary map
cases on 64-bit platforms. Are merely a speedup hack; can be
omitted without loss of correctness/functionality. Note that in
both cases the "sizeof(void*) == 8" causes these cases to be
folded out by compilers on 32-bit platforms. The logic below
is somewhat similar to some cases extensively commented in
MC_(helperc_STOREV8).
*/
if (LIKELY(sizeof(void*) == 8
&& nBits == 64 && VG_IS_8_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off16 = SM_OFF_16(a);
UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
if (LIKELY( !is_distinguished_sm(sm) &&
(VA_BITS16_DEFINED == vabits16 ||
VA_BITS16_UNDEFINED == vabits16) )) {
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (LIKELY(V_BITS64_DEFINED == vbytes)) {
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
return;
} else if (V_BITS64_UNDEFINED == vbytes) {
((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
return;
}
/* else fall into the slow case */
}
/* else fall into the slow case */
}
if (LIKELY(sizeof(void*) == 8
&& nBits == 32 && VG_IS_4_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off = SM_OFF(a);
UWord vabits8 = sm->vabits8[sm_off];
if (LIKELY( !is_distinguished_sm(sm) &&
(VA_BITS8_DEFINED == vabits8 ||
VA_BITS8_UNDEFINED == vabits8) )) {
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (LIKELY(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
sm->vabits8[sm_off] = VA_BITS8_DEFINED;
return;
} else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
return;
}
/* else fall into the slow case */
}
/* else fall into the slow case */
}
/* ------------ END semi-fast cases ------------ */
tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
/* Dump vbytes in memory, iterating from least to most significant
byte. At the same time establish addressibility of the location. */
for (i = 0; i < szB; i++) {
PROF_EVENT(36, "mc_STOREVn_slow(loop)");
ai = a + byte_offset_w(szB, bigendian, i);
vbits8 = vbytes & 0xff;
ok = set_vbits8(ai, vbits8);
if (!ok) n_addrs_bad++;
vbytes >>= 8;
}
/* If an address error has happened, report it. */
if (n_addrs_bad > 0)
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
}
/*------------------------------------------------------------*/
/*--- Setting permissions over address ranges. ---*/
/*------------------------------------------------------------*/
static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
UWord dsm_num )
{
UWord sm_off, sm_off16;
UWord vabits2 = vabits16 & 0x3;
SizeT lenA, lenB, len_to_next_secmap;
Addr aNext;
SecMap* sm;
SecMap** sm_ptr;
SecMap* example_dsm;
PROF_EVENT(150, "set_address_range_perms");
/* Check the V+A bits make sense. */
tl_assert(VA_BITS16_NOACCESS == vabits16 ||
VA_BITS16_UNDEFINED == vabits16 ||
VA_BITS16_DEFINED == vabits16);
// This code should never write PDBs; ensure this. (See comment above
// set_vabits2().)
tl_assert(VA_BITS2_PARTDEFINED != vabits2);
if (lenT == 0)
return;
if (lenT > 256 * 1024 * 1024) {
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
const HChar* s = "unknown???";
if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
"large range [0x%lx, 0x%lx) (%s)\n",
a, a + lenT, s);
}
}
#ifndef PERF_FAST_SARP
/*------------------ debug-only case ------------------ */
{
// Endianness doesn't matter here because all bytes are being set to
// the same value.
// Nb: We don't have to worry about updating the sec-V-bits table
// after these set_vabits2() calls because this code never writes
// VA_BITS2_PARTDEFINED values.
SizeT i;
for (i = 0; i < lenT; i++) {
set_vabits2(a + i, vabits2);
}
return;
}
#endif
/*------------------ standard handling ------------------ */
/* Get the distinguished secondary that we might want
to use (part of the space-compression scheme). */
example_dsm = &sm_distinguished[dsm_num];
// We have to handle ranges covering various combinations of partial and
// whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
// Cases marked with a '*' are common.
//
// TYPE PARTS USED
// ---- ----------
// * one partial sec-map (p) 1
// - one whole sec-map (P) 2
//
// * two partial sec-maps (pp) 1,3
// - one partial, one whole sec-map (pP) 1,2
// - one whole, one partial sec-map (Pp) 2,3
// - two whole sec-maps (PP) 2,2
//
// * one partial, one whole, one partial (pPp) 1,2,3
// - one partial, two whole (pPP) 1,2,2
// - two whole, one partial (PPp) 2,2,3
// - three whole (PPP) 2,2,2
//
// * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
// - one partial, N-1 whole (pP...PP) 1,2...2,2
// - N-1 whole, one partial (PP...Pp) 2,2...2,3
// - N whole (PP...PP) 2,2...2,3
// Break up total length (lenT) into two parts: length in the first
// sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
aNext = start_of_this_sm(a) + SM_SIZE;
len_to_next_secmap = aNext - a;
if ( lenT <= len_to_next_secmap ) {
// Range entirely within one sec-map. Covers almost all cases.
PROF_EVENT(151, "set_address_range_perms-single-secmap");
lenA = lenT;
lenB = 0;
} else if (is_start_of_sm(a)) {
// Range spans at least one whole sec-map, and starts at the beginning
// of a sec-map; skip to Part 2.
PROF_EVENT(152, "set_address_range_perms-startof-secmap");
lenA = 0;
lenB = lenT;
goto part2;
} else {
// Range spans two or more sec-maps, first one is partial.
PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
lenA = len_to_next_secmap;
lenB = lenT - lenA;
}
//------------------------------------------------------------------------
// Part 1: Deal with the first sec_map. Most of the time the range will be
// entirely within a sec_map and this part alone will suffice. Also,
// doing it this way lets us avoid repeatedly testing for the crossing of
// a sec-map boundary within these loops.
//------------------------------------------------------------------------
// If it's distinguished, make it undistinguished if necessary.
sm_ptr = get_secmap_ptr(a);
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so skip.
PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
a = aNext;
lenA = 0;
} else {
PROF_EVENT(155, "set_address_range_perms-dist-sm1");
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
sm = *sm_ptr;
// 1 byte steps
while (True) {
if (VG_IS_8_ALIGNED(a)) break;
if (lenA < 1) break;
PROF_EVENT(156, "set_address_range_perms-loop1a");
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
lenA -= 1;
}
// 8-aligned, 8 byte steps
while (True) {
if (lenA < 8) break;
PROF_EVENT(157, "set_address_range_perms-loop8a");
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
lenA -= 8;
}
// 1 byte steps
while (True) {
if (lenA < 1) break;
PROF_EVENT(158, "set_address_range_perms-loop1b");
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
lenA -= 1;
}
// We've finished the first sec-map. Is that it?
if (lenB == 0)
return;
//------------------------------------------------------------------------
// Part 2: Fast-set entire sec-maps at a time.
//------------------------------------------------------------------------
part2:
// 64KB-aligned, 64KB steps.
// Nb: we can reach here with lenB < SM_SIZE
tl_assert(0 == lenA);
while (True) {
if (lenB < SM_SIZE) break;
tl_assert(is_start_of_sm(a));
PROF_EVENT(159, "set_address_range_perms-loop64K");
sm_ptr = get_secmap_ptr(a);
if (!is_distinguished_sm(*sm_ptr)) {
PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
// Free the non-distinguished sec-map that we're replacing. This
// case happens moderately often, enough to be worthwhile.
SysRes sres = VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
tl_assert2(! sr_isError(sres), "SecMap valgrind munmap failure\n");
}
update_SM_counts(*sm_ptr, example_dsm);
// Make the sec-map entry point to the example DSM
*sm_ptr = example_dsm;
lenB -= SM_SIZE;
a += SM_SIZE;
}
// We've finished the whole sec-maps. Is that it?
if (lenB == 0)
return;
//------------------------------------------------------------------------
// Part 3: Finish off the final partial sec-map, if necessary.
//------------------------------------------------------------------------
tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
// If it's distinguished, make it undistinguished if necessary.
sm_ptr = get_secmap_ptr(a);
if (is_distinguished_sm(*sm_ptr)) {
if (*sm_ptr == example_dsm) {
// Sec-map already has the V+A bits that we want, so stop.
PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
return;
} else {
PROF_EVENT(162, "set_address_range_perms-dist-sm2");
*sm_ptr = copy_for_writing(*sm_ptr);
}
}
sm = *sm_ptr;
// 8-aligned, 8 byte steps
while (True) {
if (lenB < 8) break;
PROF_EVENT(163, "set_address_range_perms-loop8b");
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = vabits16;
a += 8;
lenB -= 8;
}
// 1 byte steps
while (True) {
if (lenB < 1) return;
PROF_EVENT(164, "set_address_range_perms-loop1c");
sm_off = SM_OFF(a);
insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
a += 1;
lenB -= 1;
}
}
/* --- Set permissions for arbitrary address ranges --- */
void MC_(make_mem_noaccess) ( Addr a, SizeT len )
{
PROF_EVENT(40, "MC_(make_mem_noaccess)");
DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
ocache_sarp_Clear_Origins ( a, len );
}
static void make_mem_undefined ( Addr a, SizeT len )
{
PROF_EVENT(41, "make_mem_undefined");
DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
}
void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
{
PROF_EVENT(43, "MC_(make_mem_undefined)");
DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
ocache_sarp_Set_Origins ( a, len, otag );
}
static
void make_mem_undefined_w_tid_and_okind ( Addr a, SizeT len,
ThreadId tid, UInt okind )
{
UInt ecu;
ExeContext* here;
/* VG_(record_ExeContext) checks for validity of tid, and asserts
if it is invalid. So no need to do it here. */
tl_assert(okind <= 3);
here = VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ );
tl_assert(here);
ecu = VG_(get_ECU_from_ExeContext)(here);
tl_assert(VG_(is_plausible_ECU)(ecu));
MC_(make_mem_undefined_w_otag) ( a, len, ecu | okind );
}
static
void mc_new_mem_w_tid_make_ECU ( Addr a, SizeT len, ThreadId tid )
{
make_mem_undefined_w_tid_and_okind ( a, len, tid, MC_OKIND_UNKNOWN );
}
static
void mc_new_mem_w_tid_no_ECU ( Addr a, SizeT len, ThreadId tid )
{
MC_(make_mem_undefined_w_otag) ( a, len, MC_OKIND_UNKNOWN );
}
void MC_(make_mem_defined) ( Addr a, SizeT len )
{
PROF_EVENT(42, "MC_(make_mem_defined)");
DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
if (UNLIKELY( MC_(clo_mc_level) == 3 ))
ocache_sarp_Clear_Origins ( a, len );
}
/* For each byte in [a,a+len), if the byte is addressable, make it be
defined, but if it isn't addressible, leave it alone. In other
words a version of MC_(make_mem_defined) that doesn't mess with
addressibility. Low-performance implementation. */
static void make_mem_defined_if_addressable ( Addr a, SizeT len )
{
SizeT i;
UChar vabits2;
DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
for (i = 0; i < len; i++) {
vabits2 = get_vabits2( a+i );
if (LIKELY(VA_BITS2_NOACCESS != vabits2)) {
set_vabits2(a+i, VA_BITS2_DEFINED);
if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
}
}
}
}
/* Similarly (needed for mprotect handling ..) */
static void make_mem_defined_if_noaccess ( Addr a, SizeT len )
{
SizeT i;
UChar vabits2;
DEBUG("make_mem_defined_if_noaccess(%p, %llu)\n", a, (ULong)len);
for (i = 0; i < len; i++) {
vabits2 = get_vabits2( a+i );
if (LIKELY(VA_BITS2_NOACCESS == vabits2)) {
set_vabits2(a+i, VA_BITS2_DEFINED);
if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
}
}
}
}
/* --- Block-copy permissions (needed for implementing realloc() and
sys_mremap). --- */
void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
{
SizeT i, j;
UChar vabits2, vabits8;
Bool aligned, nooverlap;
DEBUG("MC_(copy_address_range_state)\n");
PROF_EVENT(50, "MC_(copy_address_range_state)");
if (len == 0 || src == dst)
return;
aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
nooverlap = src+len <= dst || dst+len <= src;
if (nooverlap && aligned) {
/* Vectorised fast case, when no overlap and suitably aligned */
/* vector loop */
i = 0;
while (len >= 4) {
vabits8 = get_vabits8_for_aligned_word32( src+i );
set_vabits8_for_aligned_word32( dst+i, vabits8 );
if (LIKELY(VA_BITS8_DEFINED == vabits8
|| VA_BITS8_UNDEFINED == vabits8
|| VA_BITS8_NOACCESS == vabits8)) {
/* do nothing */
} else {
/* have to copy secondary map info */
if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
}
i += 4;
len -= 4;
}
/* fixup loop */
while (len >= 1) {
vabits2 = get_vabits2( src+i );
set_vabits2( dst+i, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
}
i++;
len--;
}
} else {
/* We have to do things the slow way */
if (src < dst) {
for (i = 0, j = len-1; i < len; i++, j--) {
PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
vabits2 = get_vabits2( src+j );
set_vabits2( dst+j, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
}
}
}
if (src > dst) {
for (i = 0; i < len; i++) {
PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
vabits2 = get_vabits2( src+i );
set_vabits2( dst+i, vabits2 );
if (VA_BITS2_PARTDEFINED == vabits2) {
set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
}
}
}
}
}
/*------------------------------------------------------------*/
/*--- Origin tracking stuff - cache basics ---*/
/*------------------------------------------------------------*/
/* AN OVERVIEW OF THE ORIGIN TRACKING IMPLEMENTATION
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Note that this implementation draws inspiration from the "origin
tracking by value piggybacking" scheme described in "Tracking Bad
Apples: Reporting the Origin of Null and Undefined Value Errors"
(Michael Bond, Nicholas Nethercote, Stephen Kent, Samuel Guyer,
Kathryn McKinley, OOPSLA07, Montreal, Oct 2007) but in fact it is
implemented completely differently.
Origin tags and ECUs -- about the shadow values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This implementation tracks the defining point of all uninitialised
values using so called "origin tags", which are 32-bit integers,
rather than using the values themselves to encode the origins. The
latter, so-called value piggybacking", is what the OOPSLA07 paper
describes.
Origin tags, as tracked by the machinery below, are 32-bit unsigned
ints (UInts), regardless of the machine's word size. Each tag
comprises an upper 30-bit ECU field and a lower 2-bit
'kind' field. The ECU field is a number given out by m_execontext
and has a 1-1 mapping with ExeContext*s. An ECU can be used
directly as an origin tag (otag), but in fact we want to put
additional information 'kind' field to indicate roughly where the
tag came from. This helps print more understandable error messages
for the user -- it has no other purpose. In summary:
* Both ECUs and origin tags are represented as 32-bit words
* m_execontext and the core-tool interface deal purely in ECUs.
They have no knowledge of origin tags - that is a purely
Memcheck-internal matter.
* all valid ECUs have the lowest 2 bits zero and at least
one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
* to convert from an ECU to an otag, OR in one of the MC_OKIND_
constants defined in mc_include.h.
* to convert an otag back to an ECU, AND it with ~3
One important fact is that no valid otag is zero. A zero otag is
used by the implementation to indicate "no origin", which could
mean that either the value is defined, or it is undefined but the
implementation somehow managed to lose the origin.
The ECU used for memory created by malloc etc is derived from the
stack trace at the time the malloc etc happens. This means the
mechanism can show the exact allocation point for heap-created
uninitialised values.
In contrast, it is simply too expensive to create a complete
backtrace for each stack allocation. Therefore we merely use a
depth-1 backtrace for stack allocations, which can be done once at
translation time, rather than N times at run time. The result of
this is that, for stack created uninitialised values, Memcheck can
only show the allocating function, and not what called it.
Furthermore, compilers tend to move the stack pointer just once at
the start of the function, to allocate all locals, and so in fact
the stack origin almost always simply points to the opening brace
of the function. Net result is, for stack origins, the mechanism
can tell you in which function the undefined value was created, but
that's all. Users will need to carefully check all locals in the
specified function.
Shadowing registers and memory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Memory is shadowed using a two level cache structure (ocacheL1 and
ocacheL2). Memory references are first directed to ocacheL1. This
is a traditional 2-way set associative cache with 32-byte lines and
approximate LRU replacement within each set.
A naive implementation would require storing one 32 bit otag for
each byte of memory covered, a 4:1 space overhead. Instead, there
is one otag for every 4 bytes of memory covered, plus a 4-bit mask
that shows which of the 4 bytes have that shadow value and which
have a shadow value of zero (indicating no origin). Hence a lot of
space is saved, but the cost is that only one different origin per
4 bytes of address space can be represented. This is a source of
imprecision, but how much of a problem it really is remains to be
seen.
A cache line that contains all zeroes ("no origins") contains no
useful information, and can be ejected from the L1 cache "for
free", in the sense that a read miss on the L1 causes a line of
zeroes to be installed. However, ejecting a line containing
nonzeroes risks losing origin information permanently. In order to
prevent such lossage, ejected nonzero lines are placed in a
secondary cache (ocacheL2), which is an OSet (AVL tree) of cache
lines. This can grow arbitrarily large, and so should ensure that
Memcheck runs out of memory in preference to losing useful origin
info due to cache size limitations.
Shadowing registers is a bit tricky, because the shadow values are
32 bits, regardless of the size of the register. That gives a
problem for registers smaller than 32 bits. The solution is to
find spaces in the guest state that are unused, and use those to
shadow guest state fragments smaller than 32 bits. For example, on
ppc32/64, each vector register is 16 bytes long. If 4 bytes of the
shadow are allocated for the register's otag, then there are still
12 bytes left over which could be used to shadow 3 other values.
This implies there is some non-obvious mapping from guest state
(start,length) pairs to the relevant shadow offset (for the origin
tags). And it is unfortunately guest-architecture specific. The
mapping is contained in mc_machine.c, which is quite lengthy but
straightforward.
Instrumenting the IR
~~~~~~~~~~~~~~~~~~~~
Instrumentation is largely straightforward, and done by the
functions schemeE and schemeS in mc_translate.c. These generate
code for handling the origin tags of expressions (E) and statements
(S) respectively. The rather strange names are a reference to the
"compilation schemes" shown in Simon Peyton Jones' book "The
Implementation of Functional Programming Languages" (Prentice Hall,
1987, see
http://research.microsoft.com/~simonpj/papers/slpj-book-1987/index.htm).
schemeS merely arranges to move shadow values around the guest
state to track the incoming IR. schemeE is largely trivial too.
The only significant point is how to compute the otag corresponding
to binary (or ternary, quaternary, etc) operator applications. The
rule is simple: just take whichever value is larger (32-bit
unsigned max). Constants get the special value zero. Hence this
rule always propagates a nonzero (known) otag in preference to a
zero (unknown, or more likely, value-is-defined) tag, as we want.
If two different undefined values are inputs to a binary operator
application, then which is propagated is arbitrary, but that
doesn't matter, since the program is erroneous in using either of
the values, and so there's no point in attempting to propagate
both.
Since constants are abstracted to (otag) zero, much of the
instrumentation code can be folded out without difficulty by the
generic post-instrumentation IR cleanup pass, using these rules:
Max32U(0,x) -> x, Max32U(x,0) -> x, Max32(x,y) where x and y are
constants is evaluated at JIT time. And the resulting dead code
removal. In practice this causes surprisingly few Max32Us to
survive through to backend code generation.
Integration with the V-bits machinery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is again largely straightforward. Mostly the otag and V bits
stuff are independent. The only point of interaction is when the V
bits instrumenter creates a call to a helper function to report an
uninitialised value error -- in that case it must first use schemeE
to get hold of the origin tag expression for the value, and pass
that to the helper too.
There is the usual stuff to do with setting address range
permissions. When memory is painted undefined, we must also know
the origin tag to paint with, which involves some tedious plumbing,
particularly to do with the fast case stack handlers. When memory
is painted defined or noaccess then the origin tags must be forced
to zero.
One of the goals of the implementation was to ensure that the
non-origin tracking mode isn't slowed down at all. To do this,
various functions to do with memory permissions setting (again,
mostly pertaining to the stack) are duplicated for the with- and
without-otag case.
Dealing with stack redzones, and the NIA cache
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is one of the few non-obvious parts of the implementation.
Some ABIs (amd64-ELF, ppc64-ELF, ppc32/64-XCOFF) define a small
reserved area below the stack pointer, that can be used as scratch
space by compiler generated code for functions. In the Memcheck
sources this is referred to as the "stack redzone". The important
thing here is that such redzones are considered volatile across
function calls and returns. So Memcheck takes care to mark them as
undefined for each call and return, on the afflicted platforms.
Past experience shows this is essential in order to get reliable
messages about uninitialised values that come from the stack.
So the question is, when we paint a redzone undefined, what origin
tag should we use for it? Consider a function f() calling g(). If
we paint the redzone using an otag derived from the ExeContext of
the CALL/BL instruction in f, then any errors in g causing it to
use uninitialised values that happen to lie in the redzone, will be
reported as having their origin in f. Which is highly confusing.
The same applies for returns: if, on a return, we paint the redzone
using a origin tag derived from the ExeContext of the RET/BLR
instruction in g, then any later errors in f causing it to use
uninitialised values in the redzone, will be reported as having
their origin in g. Which is just as confusing.
To do it right, in both cases we need to use an origin tag which
pertains to the instruction which dynamically follows the CALL/BL
or RET/BLR. In short, one derived from the NIA - the "next
instruction address".
To make this work, Memcheck's redzone-painting helper,
MC_(helperc_MAKE_STACK_UNINIT), now takes a third argument, the
NIA. It converts the NIA to a 1-element ExeContext, and uses that
ExeContext's ECU as the basis for the otag used to paint the
redzone. The expensive part of this is converting an NIA into an
ECU, since this happens once for every call and every return. So
we use a simple 511-line, 2-way set associative cache
(nia_to_ecu_cache) to cache the mappings, and that knocks most of
the cost out.
Further background comments
~~~~~~~~~~~~~~~~~~~~~~~~~~~
> Question: why is otag a UInt? Wouldn't a UWord be better? Isn't
> it really just the address of the relevant ExeContext?
Well, it's not the address, but a value which has a 1-1 mapping
with ExeContexts, and is guaranteed not to be zero, since zero
denotes (to memcheck) "unknown origin or defined value". So these
UInts are just numbers starting at 4 and incrementing by 4; each
ExeContext is given a number when it is created. (*** NOTE this
confuses otags and ECUs; see comments above ***).
Making these otags 32-bit regardless of the machine's word size
makes the 64-bit implementation easier (next para). And it doesn't
really limit us in any way, since for the tags to overflow would
require that the program somehow caused 2^30-1 different
ExeContexts to be created, in which case it is probably in deep
trouble. Not to mention V will have soaked up many tens of
gigabytes of memory merely to store them all.
So having 64-bit origins doesn't really buy you anything, and has
the following downsides:
Suppose that instead, an otag is a UWord. This would mean that, on
a 64-bit target,
1. It becomes hard to shadow any element of guest state which is
smaller than 8 bytes. To do so means you'd need to find some
8-byte-sized hole in the guest state which you don't want to
shadow, and use that instead to hold the otag. On ppc64, the
condition code register(s) are split into 20 UChar sized pieces,
all of which need to be tracked (guest_XER_SO .. guest_CR7_0)
and so that would entail finding 160 bytes somewhere else in the
guest state.
Even on x86, I want to track origins for %AH .. %DH (bits 15:8
of %EAX .. %EDX) that are separate from %AL .. %DL (bits 7:0 of
same) and so I had to look for 4 untracked otag-sized areas in
the guest state to make that possible.
The same problem exists of course when origin tags are only 32
bits, but it's less extreme.
2. (More compelling) it doubles the size of the origin shadow
memory. Given that the shadow memory is organised as a fixed
size cache, and that accuracy of tracking is limited by origins
falling out the cache due to space conflicts, this isn't good.
> Another question: is the origin tracking perfect, or are there
> cases where it fails to determine an origin?
It is imperfect for at least for the following reasons, and
probably more:
* Insufficient capacity in the origin cache. When a line is
evicted from the cache it is gone forever, and so subsequent
queries for the line produce zero, indicating no origin
information. Interestingly, a line containing all zeroes can be
evicted "free" from the cache, since it contains no useful
information, so there is scope perhaps for some cleverer cache
management schemes. (*** NOTE, with the introduction of the
second level origin tag cache, ocacheL2, this is no longer a
problem. ***)
* The origin cache only stores one otag per 32-bits of address
space, plus 4 bits indicating which of the 4 bytes has that tag
and which are considered defined. The result is that if two
undefined bytes in the same word are stored in memory, the first
stored byte's origin will be lost and replaced by the origin for
the second byte.
* Nonzero origin tags for defined values. Consider a binary
operator application op(x,y). Suppose y is undefined (and so has
a valid nonzero origin tag), and x is defined, but erroneously
has a nonzero origin tag (defined values should have tag zero).
If the erroneous tag has a numeric value greater than y's tag,
then the rule for propagating origin tags though binary
operations, which is simply to take the unsigned max of the two
tags, will erroneously propagate x's tag rather than y's.
* Some obscure uses of x86/amd64 byte registers can cause lossage
or confusion of origins. %AH .. %DH are treated as different
from, and unrelated to, their parent registers, %EAX .. %EDX.
So some wierd sequences like
movb undefined-value, %AH
movb defined-value, %AL
.. use %AX or %EAX ..
will cause the origin attributed to %AH to be ignored, since %AL,
%AX, %EAX are treated as the same register, and %AH as a
completely separate one.
But having said all that, it actually seems to work fairly well in
practice.
*/
static UWord stats_ocacheL1_find = 0;
static UWord stats_ocacheL1_found_at_1 = 0;
static UWord stats_ocacheL1_found_at_N = 0;
static UWord stats_ocacheL1_misses = 0;
static UWord stats_ocacheL1_lossage = 0;
static UWord stats_ocacheL1_movefwds = 0;
static UWord stats__ocacheL2_refs = 0;
static UWord stats__ocacheL2_misses = 0;
static UWord stats__ocacheL2_n_nodes_max = 0;
/* Cache of 32-bit values, one every 32 bits of address space */
#define OC_BITS_PER_LINE 5
#define OC_W32S_PER_LINE (1 << (OC_BITS_PER_LINE - 2))
static INLINE UWord oc_line_offset ( Addr a ) {
return (a >> 2) & (OC_W32S_PER_LINE - 1);
}
static INLINE Bool is_valid_oc_tag ( Addr tag ) {
return 0 == (tag & ((1 << OC_BITS_PER_LINE) - 1));
}
#define OC_LINES_PER_SET 2
#define OC_N_SET_BITS 20
#define OC_N_SETS (1 << OC_N_SET_BITS)
/* These settings give:
64 bit host: ocache: 100,663,296 sizeB 67,108,864 useful
32 bit host: ocache: 92,274,688 sizeB 67,108,864 useful
*/
#define OC_MOVE_FORWARDS_EVERY_BITS 7
typedef
struct {
Addr tag;
UInt w32[OC_W32S_PER_LINE];
UChar descr[OC_W32S_PER_LINE];
}
OCacheLine;
/* Classify and also sanity-check 'line'. Return 'e' (empty) if not
in use, 'n' (nonzero) if it contains at least one valid origin tag,
and 'z' if all the represented tags are zero. */
static UChar classify_OCacheLine ( OCacheLine* line )
{
UWord i;
if (line->tag == 1/*invalid*/)
return 'e'; /* EMPTY */
tl_assert(is_valid_oc_tag(line->tag));
for (i = 0; i < OC_W32S_PER_LINE; i++) {
tl_assert(0 == ((~0xF) & line->descr[i]));
if (line->w32[i] > 0 && line->descr[i] > 0)
return 'n'; /* NONZERO - contains useful info */
}
return 'z'; /* ZERO - no useful info */
}
typedef
struct {
OCacheLine line[OC_LINES_PER_SET];
}
OCacheSet;
typedef
struct {
OCacheSet set[OC_N_SETS];
}
OCache;
static OCache* ocacheL1 = NULL;
static UWord ocacheL1_event_ctr = 0;
static void init_ocacheL2 ( void ); /* fwds */
static void init_OCache ( void )
{
UWord line, set;
tl_assert(MC_(clo_mc_level) >= 3);
tl_assert(ocacheL1 == NULL);
ocacheL1 = VG_(am_shadow_alloc)(sizeof(OCache));
if (ocacheL1 == NULL) {
VG_(out_of_memory_NORETURN)( "memcheck:allocating ocacheL1",
sizeof(OCache) );
}
tl_assert(ocacheL1 != NULL);
for (set = 0; set < OC_N_SETS; set++) {
for (line = 0; line < OC_LINES_PER_SET; line++) {
ocacheL1->set[set].line[line].tag = 1/*invalid*/;
}
}
init_ocacheL2();
}
static void moveLineForwards ( OCacheSet* set, UWord lineno )
{
OCacheLine tmp;
stats_ocacheL1_movefwds++;
tl_assert(lineno > 0 && lineno < OC_LINES_PER_SET);
tmp = set->line[lineno-1];
set->line[lineno-1] = set->line[lineno];
set->line[lineno] = tmp;
}
static void zeroise_OCacheLine ( OCacheLine* line, Addr tag ) {
UWord i;
for (i = 0; i < OC_W32S_PER_LINE; i++) {
line->w32[i] = 0; /* NO ORIGIN */
line->descr[i] = 0; /* REALLY REALLY NO ORIGIN! */
}
line->tag = tag;
}
//////////////////////////////////////////////////////////////
//// OCache backing store
static OSet* ocacheL2 = NULL;
static void* ocacheL2_malloc ( const HChar* cc, SizeT szB ) {
return VG_(malloc)(cc, szB);
}
static void ocacheL2_free ( void* v ) {
VG_(free)( v );
}
/* Stats: # nodes currently in tree */
static UWord stats__ocacheL2_n_nodes = 0;
static void init_ocacheL2 ( void )
{
tl_assert(!ocacheL2);
tl_assert(sizeof(Word) == sizeof(Addr)); /* since OCacheLine.tag :: Addr */
tl_assert(0 == offsetof(OCacheLine,tag));
ocacheL2
= VG_(OSetGen_Create)( offsetof(OCacheLine,tag),
NULL, /* fast cmp */
ocacheL2_malloc, "mc.ioL2", ocacheL2_free);
tl_assert(ocacheL2);
stats__ocacheL2_n_nodes = 0;
}
/* Find line with the given tag in the tree, or NULL if not found. */
static OCacheLine* ocacheL2_find_tag ( Addr tag )
{
OCacheLine* line;
tl_assert(is_valid_oc_tag(tag));
stats__ocacheL2_refs++;
line = VG_(OSetGen_Lookup)( ocacheL2, &tag );
return line;
}
/* Delete the line with the given tag from the tree, if it is present, and
free up the associated memory. */
static void ocacheL2_del_tag ( Addr tag )
{
OCacheLine* line;
tl_assert(is_valid_oc_tag(tag));
stats__ocacheL2_refs++;
line = VG_(OSetGen_Remove)( ocacheL2, &tag );
if (line) {
VG_(OSetGen_FreeNode)(ocacheL2, line);
tl_assert(stats__ocacheL2_n_nodes > 0);
stats__ocacheL2_n_nodes--;
}
}
/* Add a copy of the given line to the tree. It must not already be
present. */
static void ocacheL2_add_line ( OCacheLine* line )
{
OCacheLine* copy;
tl_assert(is_valid_oc_tag(line->tag));
copy = VG_(OSetGen_AllocNode)( ocacheL2, sizeof(OCacheLine) );
tl_assert(copy);
*copy = *line;
stats__ocacheL2_refs++;
VG_(OSetGen_Insert)( ocacheL2, copy );
stats__ocacheL2_n_nodes++;
if (stats__ocacheL2_n_nodes > stats__ocacheL2_n_nodes_max)
stats__ocacheL2_n_nodes_max = stats__ocacheL2_n_nodes;
}
////
//////////////////////////////////////////////////////////////
__attribute__((noinline))
static OCacheLine* find_OCacheLine_SLOW ( Addr a )
{
OCacheLine *victim, *inL2;
UChar c;
UWord line;
UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
UWord tag = a & tagmask;
tl_assert(setno >= 0 && setno < OC_N_SETS);
/* we already tried line == 0; skip therefore. */
for (line = 1; line < OC_LINES_PER_SET; line++) {
if (ocacheL1->set[setno].line[line].tag == tag) {
if (line == 1) {
stats_ocacheL1_found_at_1++;
} else {
stats_ocacheL1_found_at_N++;
}
if (UNLIKELY(0 == (ocacheL1_event_ctr++
& ((1<<OC_MOVE_FORWARDS_EVERY_BITS)-1)))) {
moveLineForwards( &ocacheL1->set[setno], line );
line--;
}
return &ocacheL1->set[setno].line[line];
}
}
/* A miss. Use the last slot. Implicitly this means we're
ejecting the line in the last slot. */
stats_ocacheL1_misses++;
tl_assert(line == OC_LINES_PER_SET);
line--;
tl_assert(line > 0);
/* First, move the to-be-ejected line to the L2 cache. */
victim = &ocacheL1->set[setno].line[line];
c = classify_OCacheLine(victim);
switch (c) {
case 'e':
/* the line is empty (has invalid tag); ignore it. */
break;
case 'z':
/* line contains zeroes. We must ensure the backing store is
updated accordingly, either by copying the line there
verbatim, or by ensuring it isn't present there. We
chosse the latter on the basis that it reduces the size of
the backing store. */
ocacheL2_del_tag( victim->tag );
break;
case 'n':
/* line contains at least one real, useful origin. Copy it
to the backing store. */
stats_ocacheL1_lossage++;
inL2 = ocacheL2_find_tag( victim->tag );
if (inL2) {
*inL2 = *victim;
} else {
ocacheL2_add_line( victim );
}
break;
default:
tl_assert(0);
}
/* Now we must reload the L1 cache from the backing tree, if
possible. */
tl_assert(tag != victim->tag); /* stay sane */
inL2 = ocacheL2_find_tag( tag );
if (inL2) {
/* We're in luck. It's in the L2. */
ocacheL1->set[setno].line[line] = *inL2;
} else {
/* Missed at both levels of the cache hierarchy. We have to
declare it as full of zeroes (unknown origins). */
stats__ocacheL2_misses++;
zeroise_OCacheLine( &ocacheL1->set[setno].line[line], tag );
}
/* Move it one forwards */
moveLineForwards( &ocacheL1->set[setno], line );
line--;
return &ocacheL1->set[setno].line[line];
}
static INLINE OCacheLine* find_OCacheLine ( Addr a )
{
UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
UWord tag = a & tagmask;
stats_ocacheL1_find++;
if (OC_ENABLE_ASSERTIONS) {
tl_assert(setno >= 0 && setno < OC_N_SETS);
tl_assert(0 == (tag & (4 * OC_W32S_PER_LINE - 1)));
}
if (LIKELY(ocacheL1->set[setno].line[0].tag == tag)) {
return &ocacheL1->set[setno].line[0];
}
return find_OCacheLine_SLOW( a );
}
static INLINE void set_aligned_word64_Origin_to_undef ( Addr a, UInt otag )
{
//// BEGIN inlined, specialised version of MC_(helperc_b_store8)
//// Set the origins for a+0 .. a+7
{ OCacheLine* line;
UWord lineoff = oc_line_offset(a);
if (OC_ENABLE_ASSERTIONS) {
tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
}
line = find_OCacheLine( a );
line->descr[lineoff+0] = 0xF;
line->descr[lineoff+1] = 0xF;
line->w32[lineoff+0] = otag;
line->w32[lineoff+1] = otag;
}
//// END inlined, specialised version of MC_(helperc_b_store8)
}
/*------------------------------------------------------------*/
/*--- Aligned fast case permission setters, ---*/
/*--- for dealing with stacks ---*/
/*------------------------------------------------------------*/
/*--------------------- 32-bit ---------------------*/
/* Nb: by "aligned" here we mean 4-byte aligned */
static INLINE void make_aligned_word32_undefined ( Addr a )
{
PROF_EVENT(300, "make_aligned_word32_undefined");
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 4);
#else
{
UWord sm_off;
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
make_mem_undefined(a, 4);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off = SM_OFF(a);
sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
}
#endif
}
static INLINE
void make_aligned_word32_undefined_w_otag ( Addr a, UInt otag )
{
make_aligned_word32_undefined(a);
//// BEGIN inlined, specialised version of MC_(helperc_b_store4)
//// Set the origins for a+0 .. a+3
{ OCacheLine* line;
UWord lineoff = oc_line_offset(a);
if (OC_ENABLE_ASSERTIONS) {
tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
}
line = find_OCacheLine( a );
line->descr[lineoff] = 0xF;
line->w32[lineoff] = otag;
}
//// END inlined, specialised version of MC_(helperc_b_store4)
}
static INLINE
void make_aligned_word32_noaccess ( Addr a )
{
PROF_EVENT(310, "make_aligned_word32_noaccess");
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 4);
#else
{
UWord sm_off;
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
MC_(make_mem_noaccess)(a, 4);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off = SM_OFF(a);
sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
//// BEGIN inlined, specialised version of MC_(helperc_b_store4)
//// Set the origins for a+0 .. a+3.
if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
OCacheLine* line;
UWord lineoff = oc_line_offset(a);
if (OC_ENABLE_ASSERTIONS) {
tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
}
line = find_OCacheLine( a );
line->descr[lineoff] = 0;
}
//// END inlined, specialised version of MC_(helperc_b_store4)
}
#endif
}
/*--------------------- 64-bit ---------------------*/
/* Nb: by "aligned" here we mean 8-byte aligned */
static INLINE void make_aligned_word64_undefined ( Addr a )
{
PROF_EVENT(320, "make_aligned_word64_undefined");
#ifndef PERF_FAST_STACK2
make_mem_undefined(a, 8);
#else
{
UWord sm_off16;
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
make_mem_undefined(a, 8);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
}
#endif
}
static INLINE
void make_aligned_word64_undefined_w_otag ( Addr a, UInt otag )
{
make_aligned_word64_undefined(a);
//// BEGIN inlined, specialised version of MC_(helperc_b_store8)
//// Set the origins for a+0 .. a+7
{ OCacheLine* line;
UWord lineoff = oc_line_offset(a);
tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
line = find_OCacheLine( a );
line->descr[lineoff+0] = 0xF;
line->descr[lineoff+1] = 0xF;
line->w32[lineoff+0] = otag;
line->w32[lineoff+1] = otag;
}
//// END inlined, specialised version of MC_(helperc_b_store8)
}
static INLINE
void make_aligned_word64_noaccess ( Addr a )
{
PROF_EVENT(330, "make_aligned_word64_noaccess");
#ifndef PERF_FAST_STACK2
MC_(make_mem_noaccess)(a, 8);
#else
{
UWord sm_off16;
SecMap* sm;
if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
MC_(make_mem_noaccess)(a, 8);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off16 = SM_OFF_16(a);
((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
//// BEGIN inlined, specialised version of MC_(helperc_b_store8)
//// Clear the origins for a+0 .. a+7.
if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
OCacheLine* line;
UWord lineoff = oc_line_offset(a);
tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
line = find_OCacheLine( a );
line->descr[lineoff+0] = 0;
line->descr[lineoff+1] = 0;
}
//// END inlined, specialised version of MC_(helperc_b_store8)
}
#endif
}
/*------------------------------------------------------------*/
/*--- Stack pointer adjustment ---*/
/*------------------------------------------------------------*/
#ifdef PERF_FAST_STACK
# define MAYBE_USED
#else
# define MAYBE_USED __attribute__((unused))
#endif
/*--------------- adjustment by 4 bytes ---------------*/
MAYBE_USED
static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
PROF_EVENT(110, "new_mem_stack_4");
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
} else {
MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 4, otag );
}
}
MAYBE_USED
static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
{
PROF_EVENT(110, "new_mem_stack_4");
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else {
make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
}
}
MAYBE_USED
static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
{
PROF_EVENT(120, "die_mem_stack_4");
if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
}
}
/*--------------- adjustment by 8 bytes ---------------*/
MAYBE_USED
static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
{
UInt otag = ecu | MC_OKIND_STACK;
PROF_EVENT(111, "new_mem_stack_8");
if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
make_aligned_word64_undefined_w_otag (