[PATCH] ppc64: Don't pass the pointers to xItLpQueue around

Because there's only one ItLpQueue and we know where it is, ie. xItLpQueue,
there's no point passing pointers to it it around all over the place.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Michael Ellerman 2005-06-30 15:07:57 +10:00 committed by Paul Mackerras
parent ee48444b85
commit 1b19bc7214
6 changed files with 34 additions and 34 deletions

View File

@ -17,10 +17,10 @@
#include <asm/iSeries/HvLpEvent.h>
#include <asm/iSeries/HvCallEvent.h>
static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
static __inline__ int set_inUse(void)
{
int t;
u32 * inUseP = &(lpQueue->xInUseWord);
u32 * inUseP = &xItLpQueue.xInUseWord;
__asm__ __volatile__("\n\
1: lwarx %0,0,%2 \n\
@ -31,37 +31,37 @@ static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
stwcx. %0,0,%2 \n\
bne- 1b \n\
2: eieio"
: "=&r" (t), "=m" (lpQueue->xInUseWord)
: "r" (inUseP), "m" (lpQueue->xInUseWord)
: "=&r" (t), "=m" (xItLpQueue.xInUseWord)
: "r" (inUseP), "m" (xItLpQueue.xInUseWord)
: "cc");
return t;
}
static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
static __inline__ void clear_inUse(void)
{
lpQueue->xInUseWord = 0;
xItLpQueue.xInUseWord = 0;
}
/* Array of LpEvent handler functions */
extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
unsigned long ItLpQueueInProcess = 0;
struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
{
struct HvLpEvent * nextLpEvent =
(struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
(struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
if ( nextLpEvent->xFlags.xValid ) {
/* rmb() needed only for weakly consistent machines (regatta) */
rmb();
/* Set pointer to next potential event */
lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
LpEventAlign ) /
LpEventAlign ) *
LpEventAlign;
/* Wrap to beginning if no room at end */
if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
}
else
nextLpEvent = NULL;
@ -71,15 +71,15 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
static unsigned long spread_lpevents = NR_CPUS;
int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
int ItLpQueue_isLpIntPending(void)
{
struct HvLpEvent *next_event;
if (smp_processor_id() >= spread_lpevents)
return 0;
next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
}
void ItLpQueue_clearValid( struct HvLpEvent * event )
@ -104,13 +104,13 @@ void ItLpQueue_clearValid( struct HvLpEvent * event )
event->xFlags.xValid = 0;
}
unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
unsigned ItLpQueue_process(struct pt_regs *regs)
{
unsigned numIntsProcessed = 0;
struct HvLpEvent * nextLpEvent;
/* If we have recursed, just return */
if ( !set_inUse( lpQueue ) )
if ( !set_inUse() )
return 0;
if (ItLpQueueInProcess == 0)
@ -119,13 +119,13 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
BUG();
for (;;) {
nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
nextLpEvent = ItLpQueue_getNextLpEvent();
if ( nextLpEvent ) {
/* Count events to return to caller
* and count processed events in lpQueue
* and count processed events in xItLpQueue
*/
++numIntsProcessed;
lpQueue->xLpIntCount++;
xItLpQueue.xLpIntCount++;
/* Call appropriate handler here, passing
* a pointer to the LpEvent. The handler
* must make a copy of the LpEvent if it
@ -140,7 +140,7 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
* here!
*/
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[nextLpEvent->xType] )
lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@ -148,19 +148,19 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
ItLpQueue_clearValid( nextLpEvent );
} else if ( lpQueue->xPlicOverflowIntPending )
} else if ( xItLpQueue.xPlicOverflowIntPending )
/*
* No more valid events. If overflow events are
* pending process them
*/
HvCallEvent_getOverflowLpEvents( lpQueue->xIndex);
HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
else
break;
}
ItLpQueueInProcess = 0;
mb();
clear_inUse( lpQueue );
clear_inUse();
get_paca()->lpevent_count += numIntsProcessed;

View File

@ -88,7 +88,7 @@ static int iSeries_idle(void)
while (1) {
if (lpaca->lppaca.shared_proc) {
if (ItLpQueue_isLpIntPending(&xItLpQueue))
if (ItLpQueue_isLpIntPending())
process_iSeries_events();
if (!need_resched())
yield_shared_processor();
@ -100,7 +100,7 @@ static int iSeries_idle(void)
while (!need_resched()) {
HMT_medium();
if (ItLpQueue_isLpIntPending(&xItLpQueue))
if (ItLpQueue_isLpIntPending())
process_iSeries_events();
HMT_low();
}

View File

@ -294,8 +294,8 @@ void do_IRQ(struct pt_regs *regs)
iSeries_smp_message_recv(regs);
}
#endif /* CONFIG_SMP */
if (ItLpQueue_isLpIntPending(&xItLpQueue))
lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
if (ItLpQueue_isLpIntPending())
lpevent_count += ItLpQueue_process(regs);
irq_exit();

View File

@ -802,8 +802,8 @@ int mf_get_boot_rtc(struct rtc_time *tm)
/* We need to poll here as we are not yet taking interrupts */
while (rtc_data.busy) {
extern unsigned long lpevent_count;
if (ItLpQueue_isLpIntPending(&xItLpQueue))
lpevent_count += ItLpQueue_process(&xItLpQueue, NULL);
if (ItLpQueue_isLpIntPending())
lpevent_count += ItLpQueue_process(NULL);
}
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}

View File

@ -367,8 +367,8 @@ int timer_interrupt(struct pt_regs * regs)
set_dec(next_dec);
#ifdef CONFIG_PPC_ISERIES
if (ItLpQueue_isLpIntPending(&xItLpQueue))
lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
if (ItLpQueue_isLpIntPending())
lpevent_count += ItLpQueue_process(regs);
#endif
/* collect purr register values often, for accurate calculations */

View File

@ -76,9 +76,9 @@ struct ItLpQueue {
extern struct ItLpQueue xItLpQueue;
extern struct HvLpEvent *ItLpQueue_getNextLpEvent(struct ItLpQueue *);
extern int ItLpQueue_isLpIntPending(struct ItLpQueue *);
extern unsigned ItLpQueue_process(struct ItLpQueue *, struct pt_regs *);
extern struct HvLpEvent *ItLpQueue_getNextLpEvent(void);
extern int ItLpQueue_isLpIntPending(void);
extern unsigned ItLpQueue_process(struct pt_regs *);
extern void ItLpQueue_clearValid(struct HvLpEvent *);
#endif /* _ITLPQUEUE_H */