Adjust read_value_memory to use to_xfer_partial

As the new to_xfer_partial implementations are done in ctf and tfile
targets, read_value_memory can be simplified a lot.  Call
target_xfer_partial in a loop, check return value, and set bytes
unavailable when necessary.

gdb:

2014-02-23  Yao Qi  <yao@codesourcery.com>

	* valops.c (read_value_memory): Rewrite it.  Call
	target_xfer_partial in a loop.
	* exec.h (section_table_available_memory): Remove declaration.
	Move comments to ...
	* exec.c (section_table_available_memory): ... here.  Make it static.
This commit is contained in:
Yao Qi 2013-11-05 09:50:21 +08:00
parent 1ee79381dd
commit 5a2eb0ef7f
4 changed files with 36 additions and 83 deletions

View File

@ -1,3 +1,12 @@
2014-02-23 Yao Qi <yao@codesourcery.com>
* valops.c (read_value_memory): Rewrite it. Call
target_xfer_partial in a loop.
* exec.h (section_table_available_memory): Remove declaration.
Move comments to ...
* exec.c (section_table_available_memory): ... here. Make it
static.
2014-02-23 Yao Qi <yao@codesourcery.com>
* exec.c (section_table_read_available_memory): New function.

View File

@ -577,7 +577,12 @@ exec_read_partial_read_only (gdb_byte *readbuf, ULONGEST offset,
return TARGET_XFER_E_IO;
}
VEC(mem_range_s) *
/* Appends all read-only memory ranges found in the target section
table defined by SECTIONS and SECTIONS_END, starting at (and
intersected with) MEMADDR for LEN bytes. Returns the augmented
VEC. */
static VEC(mem_range_s) *
section_table_available_memory (VEC(mem_range_s) *memory,
CORE_ADDR memaddr, ULONGEST len,
struct target_section *sections,

View File

@ -55,17 +55,6 @@ extern enum target_xfer_status
exec_read_partial_read_only (gdb_byte *readbuf, ULONGEST offset,
ULONGEST len, ULONGEST *xfered_len);
/* Appends all read-only memory ranges found in the target section
table defined by SECTIONS and SECTIONS_END, starting at (and
intersected with) MEMADDR for LEN bytes. Returns the augmented
VEC. */
extern VEC(mem_range_s) *
section_table_available_memory (VEC(mem_range_s) *ranges,
CORE_ADDR memaddr, ULONGEST len,
struct target_section *sections,
struct target_section *sections_end);
/* Read or write from mappable sections of BFD executable files.
Request to transfer up to LEN 8-bit bytes of the target sections

View File

@ -949,81 +949,31 @@ read_value_memory (struct value *val, int embedded_offset,
int stack, CORE_ADDR memaddr,
gdb_byte *buffer, size_t length)
{
if (length)
ULONGEST xfered = 0;
while (xfered < length)
{
VEC(mem_range_s) *available_memory;
enum target_xfer_status status;
ULONGEST xfered_len;
if (!traceframe_available_memory (&available_memory, memaddr, length))
{
if (stack)
read_stack (memaddr, buffer, length);
else
read_memory (memaddr, buffer, length);
}
status = target_xfer_partial (current_target.beneath,
TARGET_OBJECT_MEMORY, NULL,
buffer + xfered, NULL,
memaddr + xfered, length - xfered,
&xfered_len);
if (status == TARGET_XFER_OK)
/* nothing */;
else if (status == TARGET_XFER_E_UNAVAILABLE)
mark_value_bytes_unavailable (val, embedded_offset + xfered,
xfered_len);
else if (status == TARGET_XFER_EOF)
memory_error (TARGET_XFER_E_IO, memaddr + xfered);
else
{
struct target_section_table *table;
struct cleanup *old_chain;
CORE_ADDR unavail;
mem_range_s *r;
int i;
memory_error (status, memaddr + xfered);
/* Fallback to reading from read-only sections. */
table = target_get_section_table (&exec_ops);
available_memory =
section_table_available_memory (available_memory,
memaddr, length,
table->sections,
table->sections_end);
old_chain = make_cleanup (VEC_cleanup(mem_range_s),
&available_memory);
normalize_mem_ranges (available_memory);
/* Mark which bytes are unavailable, and read those which
are available. */
unavail = memaddr;
for (i = 0;
VEC_iterate (mem_range_s, available_memory, i, r);
i++)
{
if (mem_ranges_overlap (r->start, r->length,
memaddr, length))
{
CORE_ADDR lo1, hi1, lo2, hi2;
CORE_ADDR start, end;
/* Get the intersection window. */
lo1 = memaddr;
hi1 = memaddr + length;
lo2 = r->start;
hi2 = r->start + r->length;
start = max (lo1, lo2);
end = min (hi1, hi2);
gdb_assert (end - memaddr <= length);
if (start > unavail)
mark_value_bytes_unavailable (val,
(embedded_offset
+ unavail - memaddr),
start - unavail);
unavail = end;
read_memory (start, buffer + start - memaddr, end - start);
}
}
if (unavail != memaddr + length)
mark_value_bytes_unavailable (val,
embedded_offset + unavail - memaddr,
(memaddr + length) - unavail);
do_cleanups (old_chain);
}
xfered += xfered_len;
QUIT;
}
}