[PATCH v2 2/2] ari, btrace: avoid unsigned long long
Pedro Alves
palves@redhat.com
Fri Jul 10 14:05:00 GMT 2015
On 07/10/2015 08:16 AM, Metzger, Markus T wrote:
>> -----Original Message-----
>> From: Pedro Alves [mailto:palves@redhat.com]
>> Sent: Thursday, July 9, 2015 1:15 PM
>> To: Metzger, Markus T
>> Cc: gdb-patches@sourceware.org
>> Subject: Re: [PATCH v2 2/2] ari, btrace: avoid unsigned long long
>>
>> On 07/09/2015 07:08 AM, Markus Metzger wrote:
>>> Fix the ARI warning about the use of unsigned long long. We can't use
>> ULONGEST
>>> as this is defined unsigned long on 64-bit systems.
>>
>> But, what exactly would break?
>
> I changed the commit message to this:
>
> Fix the ARI warning about the use of unsigned long long. We can't use
> ULONGEST as this is defined unsigned long on 64-bit systems. This will
> result in a compile error when storing a pointer to an unsigned long long
> structure field (declared in perf_evene.h as __u64) in a ULONGEST * variable.
>
> Use unsigned long to hold the buffer size inside GDB and __u64 when
> interfacing the kernel.
>
>
> Is that OK?
>
That's much clearer, thanks. Typo in perf_evene.h.
>>> Use unsigned long to hold
>>> the buffer size inside GDB
>>
>> Why not use size_t instead then?
>
> It's another typedef. And without a clearly defined size.
But it's the right type to use to represent a buffer size,
and it's the type that mmap expects too. So I'd find it
all self-documenting that way. See adjusted version of your patch
below. I added a few extra comments to clarify the "unsigned int"
and UINT_MAX uses, which were not clear at all to me before.
(build tested on 64-bit and 32-bit, but otherwise not
tested; I'm still with the machine that doesn't do btrace.)
WDYT?
From: Markus Metzger <markus.t.metzger@intel.com>
Date: 2015-07-10 14:55:53 +0100
---
gdb/btrace.c | 6 ++-
gdb/common/btrace-common.h | 12 +++++-
gdb/nat/linux-btrace.c | 84 ++++++++++++++++++++++++++++----------------
gdb/nat/linux-btrace.h | 6 ++-
4 files changed, 68 insertions(+), 40 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 731d237..cc442ce 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -1414,17 +1414,17 @@ parse_xml_btrace_block (struct gdb_xml_parser *parser,
static void
parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
- gdb_byte **pdata, unsigned long *psize)
+ gdb_byte **pdata, size_t *psize)
{
struct cleanup *cleanup;
gdb_byte *data, *bin;
- unsigned long size;
+ size_t size;
size_t len;
len = strlen (body_text);
size = len / 2;
- if ((size_t) size * 2 != len)
+ if (size * 2 != len)
gdb_xml_error (parser, _("Bad raw data size."));
bin = data = xmalloc (size);
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h
index f22efc5..96df283 100644
--- a/gdb/common/btrace-common.h
+++ b/gdb/common/btrace-common.h
@@ -96,7 +96,10 @@ struct btrace_cpu
struct btrace_config_bts
{
- /* The size of the branch trace buffer in bytes. */
+ /* The size of the branch trace buffer in bytes. This is an
+ unsigned int instead of a size_t because it is registered as
+ control variable for the "set record btrace bts buffer-size"
+ command. */
unsigned int size;
};
@@ -104,7 +107,10 @@ struct btrace_config_bts
struct btrace_config_pt
{
- /* The size of the branch trace buffer in bytes. */
+ /* The size of the branch trace buffer in bytes. This is an
+ unsigned int instead of a size_t because it is registered as
+ control variable for the "set record btrace pt buffer-size"
+ command. */
unsigned int size;
};
@@ -152,7 +158,7 @@ struct btrace_data_pt
gdb_byte *data;
/* The size of DATA in bytes. */
- unsigned long size;
+ size_t size;
};
/* The branch trace data. */
diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c
index b6e13d3..3ccf833 100644
--- a/gdb/nat/linux-btrace.c
+++ b/gdb/nat/linux-btrace.c
@@ -111,12 +111,13 @@ perf_event_new_data (const struct perf_event_buffer *pev)
The caller is responsible for freeing the memory. */
static gdb_byte *
-perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
- unsigned long size)
+perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
+ size_t size)
{
const gdb_byte *begin, *end, *start, *stop;
gdb_byte *buffer;
- unsigned long data_tail, buffer_size;
+ size_t buffer_size;
+ __u64 data_tail;
if (size == 0)
return NULL;
@@ -149,9 +150,10 @@ perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
static void
perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
- unsigned long *psize)
+ size_t *psize)
{
- unsigned long data_head, size;
+ size_t size;
+ __u64 data_head;
data_head = *pev->data_head;
@@ -270,11 +272,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
static VEC (btrace_block_s) *
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
const uint8_t *end, const uint8_t *start,
- unsigned long long size)
+ size_t size)
{
VEC (btrace_block_s) *btrace = NULL;
struct perf_event_sample sample;
- unsigned long long read = 0;
+ size_t read = 0;
struct btrace_block block = { 0, 0 };
struct regcache *regcache;
@@ -642,7 +644,8 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
struct perf_event_mmap_page *header;
struct btrace_target_info *tinfo;
struct btrace_tinfo_bts *bts;
- unsigned long long size, pages, data_offset, data_size;
+ size_t size, pages;
+ __u64 data_offset;
int pid, pg;
tinfo = xzalloc (sizeof (*tinfo));
@@ -674,16 +677,16 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
goto err_out;
/* Convert the requested size in bytes to pages (rounding up). */
- pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = ((size_t) conf->size + PAGE_SIZE - 1) / PAGE_SIZE;
/* We need at least one page. */
if (pages == 0)
pages = 1;
/* The buffer size can be requested in powers of two pages. Adjust PAGES
to the next power of two. */
- for (pg = 0; pages != (1u << pg); ++pg)
- if ((pages & (1u << pg)) != 0)
- pages += (1u << pg);
+ for (pg = 0; pages != (((size_t) 1) << pg); ++pg)
+ if ((pages & (((size_t) 1) << pg)) != 0)
+ pages += (((size_t) 1) << pg);
/* We try to allocate the requested size.
If that fails, try to get as much as we can. */
@@ -692,12 +695,14 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
size_t length;
size = pages * PAGE_SIZE;
- length = size + PAGE_SIZE;
- /* Check for overflows. */
- if ((unsigned long long) length < size)
+ /* Don't ask for more than we can represent in the configuration
+ (with "set record btrace bts buffer-size"). */
+ if (UINT_MAX < size)
continue;
+ length = size + PAGE_SIZE;
+
/* The number of pages we request needs to be a power of two. */
header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
if (header != MAP_FAILED)
@@ -708,23 +713,33 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
goto err_file;
data_offset = PAGE_SIZE;
- data_size = size;
#if defined (PERF_ATTR_SIZE_VER5)
if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
{
+ __u64 data_size;
+
data_offset = header->data_offset;
data_size = header->data_size;
+
+ size = (size_t) data_size;
+
+ /* Check for overflow. */
+ if ((__u64) size != data_size)
+ {
+ munmap ((void *) header, size + PAGE_SIZE);
+ goto err_file;
+ }
}
#endif /* defined (PERF_ATTR_SIZE_VER5) */
bts->header = header;
bts->bts.mem = ((const uint8_t *) header) + data_offset;
- bts->bts.size = data_size;
+ bts->bts.size = size;
bts->bts.data_head = &header->data_head;
bts->bts.last_head = 0;
- tinfo->conf.bts.size = data_size;
+ tinfo->conf.bts.size = (unsigned int) size;
return tinfo;
err_file:
@@ -746,7 +761,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
struct perf_event_mmap_page *header;
struct btrace_target_info *tinfo;
struct btrace_tinfo_pt *pt;
- unsigned long long pages, size;
+ size_t size, pages;
int pid, pg, errcode, type;
if (conf->size == 0)
@@ -788,16 +803,16 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
header->aux_offset = header->data_offset + header->data_size;
/* Convert the requested size in bytes to pages (rounding up). */
- pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = ((size_t) conf->size + PAGE_SIZE - 1) / PAGE_SIZE;
/* We need at least one page. */
if (pages == 0)
pages = 1;
/* The buffer size can be requested in powers of two pages. Adjust PAGES
to the next power of two. */
- for (pg = 0; pages != (1u << pg); ++pg)
- if ((pages & (1u << pg)) != 0)
- pages += (1u << pg);
+ for (pg = 0; pages != (((size_t) 1) << pg); ++pg)
+ if ((pages & (((size_t) 1) << pg)) != 0)
+ pages += (((size_t) 1) << pg);
/* We try to allocate the requested size.
If that fails, try to get as much as we can. */
@@ -806,12 +821,13 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
size_t length;
size = pages * PAGE_SIZE;
- length = size;
- /* Check for overflows. */
- if ((unsigned long long) length < size)
+ /* Don't ask for more than we can represent in the configuration
+ (with "set record btrace pt buffer-size"). */
+ if (UINT_MAX < size)
continue;
+ length = size;
header->aux_size = size;
pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
@@ -827,7 +843,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
pt->pt.size = size;
pt->pt.data_head = &header->aux_head;
- tinfo->conf.pt.size = size;
+ tinfo->conf.pt.size = (unsigned int) size;
return tinfo;
err_conf:
@@ -938,7 +954,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
{
struct perf_event_buffer *pevent;
const uint8_t *begin, *end, *start;
- unsigned long long data_head, data_tail, buffer_size, size;
+ size_t buffer_size, size;
+ __u64 data_head, data_tail;
unsigned int retries = 5;
pevent = &tinfo->variant.bts.bts;
@@ -961,6 +978,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
if (type == BTRACE_READ_DELTA)
{
+ __u64 data_size;
+
/* Determine the number of bytes to read and check for buffer
overflows. */
@@ -971,9 +990,12 @@ linux_read_bts (struct btrace_data_bts *btrace,
return BTRACE_ERR_OVERFLOW;
/* If the buffer is smaller than the trace delta, we overflowed. */
- size = data_head - data_tail;
- if (buffer_size < size)
+ data_size = data_head - data_tail;
+ if (buffer_size < data_size)
return BTRACE_ERR_OVERFLOW;
+
+ /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
+ size = data_size;
}
else
{
@@ -982,7 +1004,7 @@ linux_read_bts (struct btrace_data_bts *btrace,
/* Adjust the size if the buffer has not overflowed, yet. */
if (data_head < size)
- size = data_head;
+ size = (size_t) data_head;
}
/* Data_head keeps growing; the buffer itself is circular. */
diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
index b680bf5..5ea87a8 100644
--- a/gdb/nat/linux-btrace.h
+++ b/gdb/nat/linux-btrace.h
@@ -38,13 +38,13 @@ struct perf_event_buffer
const uint8_t *mem;
/* The size of the mapped memory in bytes. */
- unsigned long long size;
+ size_t size;
/* A pointer to the data_head field for this buffer. */
- volatile unsigned long long *data_head;
+ volatile __u64 *data_head;
/* The data_head value from the last read. */
- unsigned long long last_head;
+ __u64 last_head;
};
/* Branch trace target information for BTS tracing. */
More information about the Gdb-patches
mailing list