powerpc/xive: introduce a common routine xive_queue_page_alloc()

This routine will be used in the spapr backend. Also introduce a short
xive_alloc_order() helper.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Cédric Le Goater 2017-08-30 21:46:10 +02:00 committed by Michael Ellerman
parent 3b79b26101
commit 994ea2f419
3 changed files with 27 additions and 11 deletions

View File

@ -1428,6 +1428,22 @@ bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 o
return true;
}
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
{
unsigned int alloc_order;
struct page *pages;
__be32 *qpage;
alloc_order = xive_alloc_order(queue_shift);
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
if (!pages)
return ERR_PTR(-ENOMEM);
qpage = (__be32 *)page_address(pages);
memset(qpage, 0, 1 << queue_shift);
return qpage;
}
static int __init xive_off(char *arg)
{
xive_cmdline_disabled = true;

View File

@ -202,17 +202,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
struct page *pages;
__be32 *qpage;
alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
(xive_queue_shift - PAGE_SHIFT) : 0;
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
if (!pages)
return -ENOMEM;
qpage = (__be32 *)page_address(pages);
memset(qpage, 0, 1 << xive_queue_shift);
qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
if (IS_ERR(qpage))
return PTR_ERR(qpage);
return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift, false);
}
@ -227,8 +222,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8
* from an IPI and iounmap isn't safe
*/
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
(xive_queue_shift - PAGE_SHIFT) : 0;
alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}

View File

@ -56,6 +56,12 @@ struct xive_ops {
bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
u8 max_prio);
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
static inline u32 xive_alloc_order(u32 queue_shift)
{
return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
}
extern bool xive_cmdline_disabled;