Remove save_inferior_ptid
This removes save_inferior_ptid, a cleanup function, in favor of scoped_restore. This also fixes a possible (it seems unlikely that it could happen in practice) memory leak -- save_inferior_ptid should have used make_cleanup_dtor, because it allocated memory. I tested this on the buildbot. However, there are two caveats to this. First, sometimes it seems I misread the results. Second, I think this patch touches some platforms that can't be tested by the buildbot. So, extra care seems warranted. ChangeLog 2017-08-18 Tom Tromey <tom@tromey.com> Pedro Alves <palves@redhat.com> * spu-multiarch.c (parse_spufs_run): Use scoped_restore. * sol-thread.c (sol_thread_resume, sol_thread_wait) (sol_thread_xfer_partial, rw_common): Use scoped_restore. * procfs.c (procfs_do_thread_registers): Use scoped_restore. * proc-service.c (ps_xfer_memory): Use scoped_restore. * linux-tdep.c (linux_corefile_thread): Remove a cleanup. (linux_get_siginfo_data): Add "thread" argument. Use scoped_restore. * linux-nat.c (linux_child_follow_fork) (check_stopped_by_watchpoint): Use scoped_restore. * infrun.c (displaced_step_prepare_throw, write_memory_ptid) (THREAD_STOPPED_BY, handle_signal_stop): Use scoped_restore. (restore_inferior_ptid, save_inferior_ptid): Remove. * btrace.c (btrace_fetch): Use scoped_restore. * bsd-uthread.c (bsd_uthread_fetch_registers) (bsd_uthread_store_registers): Use scoped_restore. * breakpoint.c (reattach_breakpoints, detach_breakpoints): Use scoped_restore. * aix-thread.c (aix_thread_resume, aix_thread_wait) (aix_thread_xfer_partial): Use scoped_restore. * inferior.h (save_inferior_ptid): Remove.
This commit is contained in:
parent
e60eb28803
commit
2989a3651d
@ -1,3 +1,28 @@
|
||||
2017-08-18 Tom Tromey <tom@tromey.com>
|
||||
Pedro Alves <palves@redhat.com>
|
||||
|
||||
* spu-multiarch.c (parse_spufs_run): Use scoped_restore.
|
||||
* sol-thread.c (sol_thread_resume, sol_thread_wait)
|
||||
(sol_thread_xfer_partial, rw_common): Use scoped_restore.
|
||||
* procfs.c (procfs_do_thread_registers): Use scoped_restore.
|
||||
* proc-service.c (ps_xfer_memory): Use scoped_restore.
|
||||
* linux-tdep.c (linux_corefile_thread): Remove a cleanup.
|
||||
(linux_get_siginfo_data): Add "thread" argument. Use
|
||||
scoped_restore.
|
||||
* linux-nat.c (linux_child_follow_fork)
|
||||
(check_stopped_by_watchpoint): Use scoped_restore.
|
||||
* infrun.c (displaced_step_prepare_throw, write_memory_ptid)
|
||||
(THREAD_STOPPED_BY, handle_signal_stop): Use scoped_restore.
|
||||
(restore_inferior_ptid, save_inferior_ptid): Remove.
|
||||
* btrace.c (btrace_fetch): Use scoped_restore.
|
||||
* bsd-uthread.c (bsd_uthread_fetch_registers)
|
||||
(bsd_uthread_store_registers): Use scoped_restore.
|
||||
* breakpoint.c (reattach_breakpoints, detach_breakpoints): Use
|
||||
scoped_restore.
|
||||
* aix-thread.c (aix_thread_resume, aix_thread_wait)
|
||||
(aix_thread_xfer_partial): Use scoped_restore.
|
||||
* inferior.h (save_inferior_ptid): Remove.
|
||||
|
||||
2017-08-18 Yao Qi <yao.qi@linaro.org>
|
||||
|
||||
PR tdep/21818
|
||||
|
@ -985,12 +985,11 @@ aix_thread_resume (struct target_ops *ops,
|
||||
|
||||
if (!PD_TID (ptid))
|
||||
{
|
||||
struct cleanup *cleanup = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
inferior_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
|
||||
beneath->to_resume (beneath, ptid, step, sig);
|
||||
do_cleanups (cleanup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1022,14 +1021,16 @@ static ptid_t
|
||||
aix_thread_wait (struct target_ops *ops,
|
||||
ptid_t ptid, struct target_waitstatus *status, int options)
|
||||
{
|
||||
struct cleanup *cleanup = save_inferior_ptid ();
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
pid_to_prc (&ptid);
|
||||
{
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
inferior_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
|
||||
ptid = beneath->to_wait (beneath, ptid, status, options);
|
||||
do_cleanups (cleanup);
|
||||
pid_to_prc (&ptid);
|
||||
|
||||
inferior_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
|
||||
ptid = beneath->to_wait (beneath, ptid, status, options);
|
||||
}
|
||||
|
||||
if (ptid_get_pid (ptid) == -1)
|
||||
return pid_to_ptid (-1);
|
||||
@ -1684,16 +1685,12 @@ aix_thread_xfer_partial (struct target_ops *ops, enum target_object object,
|
||||
const gdb_byte *writebuf,
|
||||
ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
|
||||
{
|
||||
struct cleanup *old_chain = save_inferior_ptid ();
|
||||
enum target_xfer_status xfer;
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
inferior_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
|
||||
xfer = beneath->to_xfer_partial (beneath, object, annex, readbuf,
|
||||
return beneath->to_xfer_partial (beneath, object, annex, readbuf,
|
||||
writebuf, offset, len, xfered_len);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
return xfer;
|
||||
}
|
||||
|
||||
/* Clean up after the inferior exits. */
|
||||
|
@ -3302,7 +3302,6 @@ remove_breakpoints_pid (int pid)
|
||||
int
|
||||
reattach_breakpoints (int pid)
|
||||
{
|
||||
struct cleanup *old_chain;
|
||||
struct bp_location *bl, **blp_tmp;
|
||||
int val;
|
||||
int dummy1 = 0, dummy2 = 0, dummy3 = 0;
|
||||
@ -3314,8 +3313,8 @@ reattach_breakpoints (int pid)
|
||||
return 1;
|
||||
|
||||
inf = find_inferior_pid (pid);
|
||||
old_chain = save_inferior_ptid ();
|
||||
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = tp->ptid;
|
||||
|
||||
string_file tmp_error_stream;
|
||||
@ -3330,13 +3329,9 @@ reattach_breakpoints (int pid)
|
||||
bl->inserted = 0;
|
||||
val = insert_bp_location (bl, &tmp_error_stream, &dummy1, &dummy2, &dummy3);
|
||||
if (val != 0)
|
||||
{
|
||||
do_cleanups (old_chain);
|
||||
return val;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
}
|
||||
do_cleanups (old_chain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3913,7 +3908,7 @@ detach_breakpoints (ptid_t ptid)
|
||||
{
|
||||
struct bp_location *bl, **blp_tmp;
|
||||
int val = 0;
|
||||
struct cleanup *old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
struct inferior *inf = current_inferior ();
|
||||
|
||||
if (ptid_get_pid (ptid) == ptid_get_pid (inferior_ptid))
|
||||
@ -3939,7 +3934,6 @@ detach_breakpoints (ptid_t ptid)
|
||||
val |= remove_breakpoint_1 (bl, DETACH_BREAKPOINT);
|
||||
}
|
||||
|
||||
do_cleanups (old_chain);
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ bsd_uthread_fetch_registers (struct target_ops *ops,
|
||||
CORE_ADDR addr = ptid_get_tid (ptid);
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
CORE_ADDR active_addr;
|
||||
struct cleanup *cleanup = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
/* We are doing operations (e.g. reading memory) that rely on
|
||||
inferior_ptid. */
|
||||
@ -315,8 +315,6 @@ bsd_uthread_fetch_registers (struct target_ops *ops,
|
||||
uthread_ops->supply_uthread (regcache, regnum,
|
||||
addr + bsd_uthread_thread_ctx_offset);
|
||||
}
|
||||
|
||||
do_cleanups (cleanup);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -330,7 +328,7 @@ bsd_uthread_store_registers (struct target_ops *ops,
|
||||
ptid_t ptid = regcache_get_ptid (regcache);
|
||||
CORE_ADDR addr = ptid_get_tid (ptid);
|
||||
CORE_ADDR active_addr;
|
||||
struct cleanup *cleanup = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
/* We are doing operations (e.g. reading memory) that rely on
|
||||
inferior_ptid. */
|
||||
@ -349,8 +347,6 @@ bsd_uthread_store_registers (struct target_ops *ops,
|
||||
request to the layer beneath. */
|
||||
beneath->to_store_registers (beneath, regcache, regnum);
|
||||
}
|
||||
|
||||
do_cleanups (cleanup);
|
||||
}
|
||||
|
||||
static ptid_t
|
||||
|
@ -1908,14 +1908,14 @@ btrace_fetch (struct thread_info *tp)
|
||||
/* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
|
||||
can store a gdb.Record object in Python referring to a different thread
|
||||
than the current one, temporarily set INFERIOR_PTID. */
|
||||
cleanup = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = tp->ptid;
|
||||
|
||||
/* We should not be called on running or exited threads. */
|
||||
gdb_assert (can_access_registers_ptid (tp->ptid));
|
||||
|
||||
btrace_data_init (&btrace);
|
||||
make_cleanup_btrace_data (&btrace);
|
||||
cleanup = make_cleanup_btrace_data (&btrace);
|
||||
|
||||
/* Let's first try to extend the trace we already have. */
|
||||
if (!btinfo->functions.empty ())
|
||||
|
@ -73,11 +73,6 @@ extern void discard_infcall_control_state (struct infcall_control_state *);
|
||||
extern struct regcache *
|
||||
get_infcall_suspend_state_regcache (struct infcall_suspend_state *);
|
||||
|
||||
/* Save value of inferior_ptid so that it may be restored by
|
||||
a later call to do_cleanups(). Returns the struct cleanup
|
||||
pointer needed for later doing the cleanup. */
|
||||
extern struct cleanup * save_inferior_ptid (void);
|
||||
|
||||
extern void set_sigint_trap (void);
|
||||
|
||||
extern void clear_sigint_trap (void);
|
||||
|
53
gdb/infrun.c
53
gdb/infrun.c
@ -1756,7 +1756,7 @@ displaced_step_dump_bytes (struct ui_file *file,
|
||||
static int
|
||||
displaced_step_prepare_throw (ptid_t ptid)
|
||||
{
|
||||
struct cleanup *old_cleanups, *ignore_cleanups;
|
||||
struct cleanup *ignore_cleanups;
|
||||
struct thread_info *tp = find_thread_ptid (ptid);
|
||||
struct regcache *regcache = get_thread_regcache (ptid);
|
||||
struct gdbarch *gdbarch = get_regcache_arch (regcache);
|
||||
@ -1808,7 +1808,7 @@ displaced_step_prepare_throw (ptid_t ptid)
|
||||
|
||||
displaced_step_clear (displaced);
|
||||
|
||||
old_cleanups = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = ptid;
|
||||
|
||||
original = regcache_read_pc (regcache);
|
||||
@ -1834,7 +1834,6 @@ displaced_step_prepare_throw (ptid_t ptid)
|
||||
"Stepping over breakpoint in-line instead.\n");
|
||||
}
|
||||
|
||||
do_cleanups (old_cleanups);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1864,7 +1863,7 @@ displaced_step_prepare_throw (ptid_t ptid)
|
||||
/* The architecture doesn't know how or want to displaced step
|
||||
this instruction or instruction sequence. Fallback to
|
||||
stepping over the breakpoint in-line. */
|
||||
do_cleanups (old_cleanups);
|
||||
do_cleanups (ignore_cleanups);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1883,8 +1882,6 @@ displaced_step_prepare_throw (ptid_t ptid)
|
||||
|
||||
discard_cleanups (ignore_cleanups);
|
||||
|
||||
do_cleanups (old_cleanups);
|
||||
|
||||
if (debug_displaced)
|
||||
fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
|
||||
paddress (gdbarch, copy));
|
||||
@ -1941,11 +1938,10 @@ static void
|
||||
write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
|
||||
const gdb_byte *myaddr, int len)
|
||||
{
|
||||
struct cleanup *ptid_cleanup = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
inferior_ptid = ptid;
|
||||
write_memory (memaddr, myaddr, len);
|
||||
do_cleanups (ptid_cleanup);
|
||||
}
|
||||
|
||||
/* Restore the contents of the copy area for thread PTID. */
|
||||
@ -4366,17 +4362,10 @@ wait_one (struct target_waitstatus *ws)
|
||||
static int \
|
||||
thread_stopped_by_ ## REASON (ptid_t ptid) \
|
||||
{ \
|
||||
struct cleanup *old_chain; \
|
||||
int res; \
|
||||
\
|
||||
old_chain = save_inferior_ptid (); \
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
|
||||
inferior_ptid = ptid; \
|
||||
\
|
||||
res = target_stopped_by_ ## REASON (); \
|
||||
\
|
||||
do_cleanups (old_chain); \
|
||||
\
|
||||
return res; \
|
||||
return target_stopped_by_ ## REASON (); \
|
||||
}
|
||||
|
||||
/* Generate thread_stopped_by_watchpoint. */
|
||||
@ -5706,7 +5695,7 @@ handle_signal_stop (struct execution_control_state *ecs)
|
||||
{
|
||||
struct regcache *regcache = get_thread_regcache (ecs->ptid);
|
||||
struct gdbarch *gdbarch = get_regcache_arch (regcache);
|
||||
struct cleanup *old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
inferior_ptid = ecs->ptid;
|
||||
|
||||
@ -5726,8 +5715,6 @@ handle_signal_stop (struct execution_control_state *ecs)
|
||||
fprintf_unfiltered (gdb_stdlog,
|
||||
"infrun: (no data address available)\n");
|
||||
}
|
||||
|
||||
do_cleanups (old_chain);
|
||||
}
|
||||
|
||||
/* This is originated from start_remote(), start_inferior() and
|
||||
@ -9116,32 +9103,6 @@ discard_infcall_control_state (struct infcall_control_state *inf_status)
|
||||
xfree (inf_status);
|
||||
}
|
||||
|
||||
/* restore_inferior_ptid() will be used by the cleanup machinery
|
||||
to restore the inferior_ptid value saved in a call to
|
||||
save_inferior_ptid(). */
|
||||
|
||||
static void
|
||||
restore_inferior_ptid (void *arg)
|
||||
{
|
||||
ptid_t *saved_ptid_ptr = (ptid_t *) arg;
|
||||
|
||||
inferior_ptid = *saved_ptid_ptr;
|
||||
xfree (arg);
|
||||
}
|
||||
|
||||
/* Save the value of inferior_ptid so that it may be restored by a
|
||||
later call to do_cleanups(). Returns the struct cleanup pointer
|
||||
needed for later doing the cleanup. */
|
||||
|
||||
struct cleanup *
|
||||
save_inferior_ptid (void)
|
||||
{
|
||||
ptid_t *saved_ptid_ptr = XNEW (ptid_t);
|
||||
|
||||
*saved_ptid_ptr = inferior_ptid;
|
||||
return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
|
||||
}
|
||||
|
||||
/* See infrun.h. */
|
||||
|
||||
void
|
||||
|
@ -477,7 +477,6 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child,
|
||||
{
|
||||
struct lwp_info *child_lp = NULL;
|
||||
int status = W_STOPCODE (0);
|
||||
struct cleanup *old_chain;
|
||||
int has_vforked;
|
||||
ptid_t parent_ptid, child_ptid;
|
||||
int parent_pid, child_pid;
|
||||
@ -490,16 +489,15 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child,
|
||||
child_pid = ptid_get_lwp (child_ptid);
|
||||
|
||||
/* We're already attached to the parent, by default. */
|
||||
old_chain = save_inferior_ptid ();
|
||||
inferior_ptid = child_ptid;
|
||||
child_lp = add_lwp (inferior_ptid);
|
||||
child_lp = add_lwp (child_ptid);
|
||||
child_lp->stopped = 1;
|
||||
child_lp->last_resume_kind = resume_stop;
|
||||
|
||||
/* Detach new forked process? */
|
||||
if (detach_fork)
|
||||
{
|
||||
make_cleanup (delete_lwp_cleanup, child_lp);
|
||||
struct cleanup *old_chain = make_cleanup (delete_lwp_cleanup,
|
||||
child_lp);
|
||||
|
||||
if (linux_nat_prepare_to_resume != NULL)
|
||||
linux_nat_prepare_to_resume (child_lp);
|
||||
@ -513,7 +511,7 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child,
|
||||
once before detaching to clear the flags. */
|
||||
|
||||
if (!gdbarch_software_single_step_p (target_thread_architecture
|
||||
(child_lp->ptid)))
|
||||
(child_lp->ptid)))
|
||||
{
|
||||
linux_disable_event_reporting (child_pid);
|
||||
if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
|
||||
@ -533,17 +531,18 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child,
|
||||
ptrace (PTRACE_DETACH, child_pid, 0, signo);
|
||||
}
|
||||
|
||||
/* Resets value of inferior_ptid to parent ptid. */
|
||||
do_cleanups (old_chain);
|
||||
}
|
||||
else
|
||||
{
|
||||
scoped_restore save_inferior_ptid
|
||||
= make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = child_ptid;
|
||||
|
||||
/* Let the thread_db layer learn about this new process. */
|
||||
check_for_thread_db ();
|
||||
}
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
if (has_vforked)
|
||||
{
|
||||
struct lwp_info *parent_lp;
|
||||
@ -2458,12 +2457,10 @@ maybe_clear_ignore_sigint (struct lwp_info *lp)
|
||||
static int
|
||||
check_stopped_by_watchpoint (struct lwp_info *lp)
|
||||
{
|
||||
struct cleanup *old_chain;
|
||||
|
||||
if (linux_ops->to_stopped_by_watchpoint == NULL)
|
||||
return 0;
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = lp->ptid;
|
||||
|
||||
if (linux_ops->to_stopped_by_watchpoint (linux_ops))
|
||||
@ -2478,8 +2475,6 @@ check_stopped_by_watchpoint (struct lwp_info *lp)
|
||||
lp->stopped_data_address_p = 0;
|
||||
}
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
|
||||
}
|
||||
|
||||
|
@ -1649,14 +1649,15 @@ linux_collect_thread_registers (const struct regcache *regcache,
|
||||
return data.note_data;
|
||||
}
|
||||
|
||||
/* Fetch the siginfo data for the current thread, if it exists. If
|
||||
/* Fetch the siginfo data for the specified thread, if it exists. If
|
||||
there is no data, or we could not read it, return NULL. Otherwise,
|
||||
return a newly malloc'd buffer holding the data and fill in *SIZE
|
||||
with the size of the data. The caller is responsible for freeing
|
||||
the data. */
|
||||
|
||||
static gdb_byte *
|
||||
linux_get_siginfo_data (struct gdbarch *gdbarch, LONGEST *size)
|
||||
linux_get_siginfo_data (thread_info *thread, struct gdbarch *gdbarch,
|
||||
LONGEST *size)
|
||||
{
|
||||
struct type *siginfo_type;
|
||||
gdb_byte *buf;
|
||||
@ -1666,6 +1667,9 @@ linux_get_siginfo_data (struct gdbarch *gdbarch, LONGEST *size)
|
||||
if (!gdbarch_get_siginfo_type_p (gdbarch))
|
||||
return NULL;
|
||||
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = thread->ptid;
|
||||
|
||||
siginfo_type = gdbarch_get_siginfo_type (gdbarch);
|
||||
|
||||
buf = (gdb_byte *) xmalloc (TYPE_LENGTH (siginfo_type));
|
||||
@ -1710,11 +1714,8 @@ linux_corefile_thread (struct thread_info *info,
|
||||
|
||||
regcache = get_thread_arch_regcache (info->ptid, args->gdbarch);
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
inferior_ptid = info->ptid;
|
||||
target_fetch_registers (regcache, -1);
|
||||
siginfo_data = linux_get_siginfo_data (args->gdbarch, &siginfo_size);
|
||||
do_cleanups (old_chain);
|
||||
siginfo_data = linux_get_siginfo_data (info, args->gdbarch, &siginfo_size);
|
||||
|
||||
old_chain = make_cleanup (xfree, siginfo_data);
|
||||
|
||||
|
@ -87,7 +87,7 @@ static ps_err_e
|
||||
ps_xfer_memory (const struct ps_prochandle *ph, psaddr_t addr,
|
||||
gdb_byte *buf, size_t len, int write)
|
||||
{
|
||||
struct cleanup *old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
int ret;
|
||||
CORE_ADDR core_addr = ps_addr_to_core_addr (addr);
|
||||
|
||||
@ -98,8 +98,6 @@ ps_xfer_memory (const struct ps_prochandle *ph, psaddr_t addr,
|
||||
else
|
||||
ret = target_read_memory (core_addr, buf, len);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return (ret == 0 ? PS_OK : PS_ERR);
|
||||
}
|
||||
|
||||
|
@ -5316,7 +5316,6 @@ procfs_do_thread_registers (bfd *obfd, ptid_t ptid,
|
||||
gdb_gregset_t gregs;
|
||||
gdb_fpregset_t fpregs;
|
||||
unsigned long merged_pid;
|
||||
struct cleanup *old_chain;
|
||||
|
||||
merged_pid = ptid_get_lwp (ptid) << 16 | ptid_get_pid (ptid);
|
||||
|
||||
@ -5325,7 +5324,7 @@ procfs_do_thread_registers (bfd *obfd, ptid_t ptid,
|
||||
once it is implemented in this platform:
|
||||
gdbarch_iterate_over_regset_sections(). */
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = ptid;
|
||||
target_fetch_registers (regcache, -1);
|
||||
|
||||
@ -5352,8 +5351,6 @@ procfs_do_thread_registers (bfd *obfd, ptid_t ptid,
|
||||
&fpregs,
|
||||
sizeof (fpregs));
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return note_data;
|
||||
}
|
||||
|
||||
|
@ -367,10 +367,9 @@ static void
|
||||
sol_thread_resume (struct target_ops *ops,
|
||||
ptid_t ptid, int step, enum gdb_signal signo)
|
||||
{
|
||||
struct cleanup *old_chain;
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
inferior_ptid = thread_to_lwp (inferior_ptid, ptid_get_pid (main_ph.ptid));
|
||||
if (ptid_get_pid (inferior_ptid) == -1)
|
||||
@ -389,8 +388,6 @@ sol_thread_resume (struct target_ops *ops,
|
||||
}
|
||||
|
||||
beneath->to_resume (beneath, ptid, step, signo);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
}
|
||||
|
||||
/* Wait for any threads to stop. We may have to convert PTID from a
|
||||
@ -403,10 +400,9 @@ sol_thread_wait (struct target_ops *ops,
|
||||
ptid_t rtnval;
|
||||
ptid_t save_ptid;
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
struct cleanup *old_chain;
|
||||
|
||||
save_ptid = inferior_ptid;
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
inferior_ptid = thread_to_lwp (inferior_ptid, ptid_get_pid (main_ph.ptid));
|
||||
if (ptid_get_pid (inferior_ptid) == -1)
|
||||
@ -445,8 +441,6 @@ sol_thread_wait (struct target_ops *ops,
|
||||
package being initialized, since that can only happen after we've
|
||||
found the shared libs. */
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return rtnval;
|
||||
}
|
||||
|
||||
@ -569,11 +563,9 @@ sol_thread_xfer_partial (struct target_ops *ops, enum target_object object,
|
||||
const gdb_byte *writebuf,
|
||||
ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
|
||||
{
|
||||
enum target_xfer_status retval;
|
||||
struct cleanup *old_chain;
|
||||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
if (ptid_tid_p (inferior_ptid) || !target_thread_alive (inferior_ptid))
|
||||
{
|
||||
@ -585,12 +577,8 @@ sol_thread_xfer_partial (struct target_ops *ops, enum target_object object,
|
||||
inferior_ptid = procfs_first_available ();
|
||||
}
|
||||
|
||||
retval = beneath->to_xfer_partial (beneath, object, annex, readbuf,
|
||||
writebuf, offset, len, xfered_len);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return retval;
|
||||
return beneath->to_xfer_partial (beneath, object, annex, readbuf,
|
||||
writebuf, offset, len, xfered_len);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -800,9 +788,8 @@ rw_common (int dowrite, const struct ps_prochandle *ph, gdb_ps_addr_t addr,
|
||||
gdb_byte *buf, int size)
|
||||
{
|
||||
int ret;
|
||||
struct cleanup *old_chain;
|
||||
|
||||
old_chain = save_inferior_ptid ();
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
|
||||
if (ptid_tid_p (inferior_ptid) || !target_thread_alive (inferior_ptid))
|
||||
{
|
||||
@ -826,8 +813,6 @@ rw_common (int dowrite, const struct ps_prochandle *ph, gdb_ps_addr_t addr,
|
||||
else
|
||||
ret = target_read_memory (addr, (gdb_byte *) buf, size);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
|
||||
return (ret == 0 ? PS_OK : PS_ERR);
|
||||
}
|
||||
|
||||
|
@ -76,10 +76,11 @@ parse_spufs_run (ptid_t ptid, int *fd, CORE_ADDR *addr)
|
||||
tdep = gdbarch_tdep (target_gdbarch ());
|
||||
|
||||
/* Fetch instruction preceding current NIP. */
|
||||
old_chain = save_inferior_ptid ();
|
||||
inferior_ptid = ptid;
|
||||
regval = target_read_memory (regcache_read_pc (regcache) - 4, buf, 4);
|
||||
do_cleanups (old_chain);
|
||||
{
|
||||
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
|
||||
inferior_ptid = ptid;
|
||||
regval = target_read_memory (regcache_read_pc (regcache) - 4, buf, 4);
|
||||
}
|
||||
if (regval != 0)
|
||||
return 0;
|
||||
/* It should be a "sc" instruction. */
|
||||
|
Loading…
Reference in New Issue
Block a user