xilinx: Add AXIENET & DMA models

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@petalogix.com>
This commit is contained in:
Edgar E. Iglesias 2011-03-14 11:13:55 +01:00
parent d746ce6dba
commit 93f1e4016b
4 changed files with 1448 additions and 0 deletions

View File

@ -272,6 +272,8 @@ obj-microblaze-y += xilinx_intc.o
obj-microblaze-y += xilinx_timer.o
obj-microblaze-y += xilinx_uartlite.o
obj-microblaze-y += xilinx_ethlite.o
obj-microblaze-y += xilinx_axidma.o
obj-microblaze-y += xilinx_axienet.o
obj-microblaze-$(CONFIG_FDT) += device_tree.o

509
hw/xilinx_axidma.c Normal file
View File

@ -0,0 +1,509 @@
/*
* QEMU model of Xilinx AXI-DMA block.
*
* Copyright (c) 2011 Edgar E. Iglesias.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "sysbus.h"
#include "qemu-char.h"
#include "qemu-timer.h"
#include "qemu-log.h"
#include "qdev-addr.h"
#include "xilinx_axidma.h"
#define D(x)
#define R_DMACR (0x00 / 4)
#define R_DMASR (0x04 / 4)
#define R_CURDESC (0x08 / 4)
#define R_TAILDESC (0x10 / 4)
#define R_MAX (0x30 / 4)
enum {
DMACR_RUNSTOP = 1,
DMACR_TAILPTR_MODE = 2,
DMACR_RESET = 4
};
enum {
DMASR_HALTED = 1,
DMASR_IDLE = 2,
DMASR_IOC_IRQ = 1 << 12,
DMASR_DLY_IRQ = 1 << 13,
DMASR_IRQ_MASK = 7 << 12
};
struct SDesc {
uint64_t nxtdesc;
uint64_t buffer_address;
uint64_t reserved;
uint32_t control;
uint32_t status;
uint32_t app[6];
};
enum {
SDESC_CTRL_EOF = (1 << 26),
SDESC_CTRL_SOF = (1 << 27),
SDESC_CTRL_LEN_MASK = (1 << 23) - 1
};
enum {
SDESC_STATUS_EOF = (1 << 26),
SDESC_STATUS_SOF_BIT = 27,
SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
SDESC_STATUS_COMPLETE = (1 << 31)
};
struct AXIStream {
QEMUBH *bh;
ptimer_state *ptimer;
qemu_irq irq;
int nr;
struct SDesc desc;
int pos;
unsigned int complete_cnt;
uint32_t regs[R_MAX];
};
struct XilinxAXIDMA {
SysBusDevice busdev;
uint32_t freqhz;
void *dmach;
struct AXIStream streams[2];
};
/*
* Helper calls to extract info from desriptors and other trivial
* state from regs.
*/
static inline int stream_desc_sof(struct SDesc *d)
{
return d->control & SDESC_CTRL_SOF;
}
static inline int stream_desc_eof(struct SDesc *d)
{
return d->control & SDESC_CTRL_EOF;
}
static inline int stream_resetting(struct AXIStream *s)
{
return !!(s->regs[R_DMACR] & DMACR_RESET);
}
static inline int stream_running(struct AXIStream *s)
{
return s->regs[R_DMACR] & DMACR_RUNSTOP;
}
static inline int stream_halted(struct AXIStream *s)
{
return s->regs[R_DMASR] & DMASR_HALTED;
}
static inline int stream_idle(struct AXIStream *s)
{
return !!(s->regs[R_DMASR] & DMASR_IDLE);
}
static void stream_reset(struct AXIStream *s)
{
s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshhold. */
}
/* Mapp an offset addr into a channel index. */
static inline int streamid_from_addr(target_phys_addr_t addr)
{
int sid;
sid = addr / (0x30);
sid &= 1;
return sid;
}
#ifdef DEBUG_ENET
static void stream_desc_show(struct SDesc *d)
{
qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
qemu_log("control = %x\n", d->control);
qemu_log("status = %x\n", d->status);
}
#endif
static void stream_desc_load(struct AXIStream *s, target_phys_addr_t addr)
{
struct SDesc *d = &s->desc;
int i;
cpu_physical_memory_read(addr, (void *) d, sizeof *d);
/* Convert from LE into host endianness. */
d->buffer_address = le64_to_cpu(d->buffer_address);
d->nxtdesc = le64_to_cpu(d->nxtdesc);
d->control = le32_to_cpu(d->control);
d->status = le32_to_cpu(d->status);
for (i = 0; i < ARRAY_SIZE(d->app); i++) {
d->app[i] = le32_to_cpu(d->app[i]);
}
}
static void stream_desc_store(struct AXIStream *s, target_phys_addr_t addr)
{
struct SDesc *d = &s->desc;
int i;
/* Convert from host endianness into LE. */
d->buffer_address = cpu_to_le64(d->buffer_address);
d->nxtdesc = cpu_to_le64(d->nxtdesc);
d->control = cpu_to_le32(d->control);
d->status = cpu_to_le32(d->status);
for (i = 0; i < ARRAY_SIZE(d->app); i++) {
d->app[i] = cpu_to_le32(d->app[i]);
}
cpu_physical_memory_write(addr, (void *) d, sizeof *d);
}
static void stream_update_irq(struct AXIStream *s)
{
unsigned int pending, mask, irq;
pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
irq = pending & mask;
qemu_set_irq(s->irq, !!irq);
}
static void stream_reload_complete_cnt(struct AXIStream *s)
{
unsigned int comp_th;
comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
s->complete_cnt = comp_th;
}
static void timer_hit(void *opaque)
{
struct AXIStream *s = opaque;
stream_reload_complete_cnt(s);
s->regs[R_DMASR] |= DMASR_DLY_IRQ;
stream_update_irq(s);
}
static void stream_complete(struct AXIStream *s)
{
unsigned int comp_delay;
/* Start the delayed timer. */
comp_delay = s->regs[R_DMACR] >> 24;
if (comp_delay) {
ptimer_stop(s->ptimer);
ptimer_set_count(s->ptimer, comp_delay);
ptimer_run(s->ptimer, 1);
}
s->complete_cnt--;
if (s->complete_cnt == 0) {
/* Raise the IOC irq. */
s->regs[R_DMASR] |= DMASR_IOC_IRQ;
stream_reload_complete_cnt(s);
}
}
static void stream_process_mem2s(struct AXIStream *s,
struct XilinxDMAConnection *dmach)
{
uint32_t prev_d;
unsigned char txbuf[16 * 1024];
unsigned int txlen;
uint32_t app[6];
if (!stream_running(s) || stream_idle(s)) {
return;
}
while (1) {
stream_desc_load(s, s->regs[R_CURDESC]);
if (s->desc.status & SDESC_STATUS_COMPLETE) {
s->regs[R_DMASR] |= DMASR_IDLE;
break;
}
if (stream_desc_sof(&s->desc)) {
s->pos = 0;
memcpy(app, s->desc.app, sizeof app);
}
txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
if ((txlen + s->pos) > sizeof txbuf) {
hw_error("%s: too small internal txbuf! %d\n", __func__,
txlen + s->pos);
}
cpu_physical_memory_read(s->desc.buffer_address,
txbuf + s->pos, txlen);
s->pos += txlen;
if (stream_desc_eof(&s->desc)) {
xlx_dma_push_to_client(dmach, txbuf, s->pos, app);
s->pos = 0;
stream_complete(s);
}
/* Update the descriptor. */
s->desc.status = txlen | SDESC_STATUS_COMPLETE;
stream_desc_store(s, s->regs[R_CURDESC]);
/* Advance. */
prev_d = s->regs[R_CURDESC];
s->regs[R_CURDESC] = s->desc.nxtdesc;
if (prev_d == s->regs[R_TAILDESC]) {
s->regs[R_DMASR] |= DMASR_IDLE;
break;
}
}
}
static void stream_process_s2mem(struct AXIStream *s,
unsigned char *buf, size_t len, uint32_t *app)
{
uint32_t prev_d;
unsigned int rxlen;
int pos = 0;
int sof = 1;
if (!stream_running(s) || stream_idle(s)) {
return;
}
while (len) {
stream_desc_load(s, s->regs[R_CURDESC]);
if (s->desc.status & SDESC_STATUS_COMPLETE) {
s->regs[R_DMASR] |= DMASR_IDLE;
break;
}
rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
if (rxlen > len) {
/* It fits. */
rxlen = len;
}
cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
len -= rxlen;
pos += rxlen;
/* Update the descriptor. */
if (!len) {
int i;
stream_complete(s);
for (i = 0; i < 5; i++) {
s->desc.app[i] = app[i];
}
s->desc.status |= SDESC_STATUS_EOF;
}
s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
s->desc.status |= SDESC_STATUS_COMPLETE;
stream_desc_store(s, s->regs[R_CURDESC]);
sof = 0;
/* Advance. */
prev_d = s->regs[R_CURDESC];
s->regs[R_CURDESC] = s->desc.nxtdesc;
if (prev_d == s->regs[R_TAILDESC]) {
s->regs[R_DMASR] |= DMASR_IDLE;
break;
}
}
}
static
void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app)
{
struct XilinxAXIDMA *d = opaque;
struct AXIStream *s = &d->streams[1];
if (!app) {
hw_error("No stream app data!\n");
}
stream_process_s2mem(s, buf, len, app);
stream_update_irq(s);
}
static uint32_t axidma_readl(void *opaque, target_phys_addr_t addr)
{
struct XilinxAXIDMA *d = opaque;
struct AXIStream *s;
uint32_t r = 0;
int sid;
sid = streamid_from_addr(addr);
s = &d->streams[sid];
addr = addr % 0x30;
addr >>= 2;
switch (addr) {
case R_DMACR:
/* Simulate one cycles reset delay. */
s->regs[addr] &= ~DMACR_RESET;
r = s->regs[addr];
break;
case R_DMASR:
s->regs[addr] &= 0xffff;
s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
r = s->regs[addr];
break;
default:
r = s->regs[addr];
D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
__func__, sid, addr * 4, r));
break;
}
return r;
}
static void
axidma_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
{
struct XilinxAXIDMA *d = opaque;
struct AXIStream *s;
int sid;
sid = streamid_from_addr(addr);
s = &d->streams[sid];
addr = addr % 0x30;
addr >>= 2;
switch (addr) {
case R_DMACR:
/* Tailptr mode is always on. */
value |= DMACR_TAILPTR_MODE;
/* Remember our previous reset state. */
value |= (s->regs[addr] & DMACR_RESET);
s->regs[addr] = value;
if (value & DMACR_RESET) {
stream_reset(s);
}
if ((value & 1) && !stream_resetting(s)) {
/* Start processing. */
s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
}
stream_reload_complete_cnt(s);
break;
case R_DMASR:
/* Mask away write to clear irq lines. */
value &= ~(value & DMASR_IRQ_MASK);
s->regs[addr] = value;
break;
case R_TAILDESC:
s->regs[addr] = value;
s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
if (!sid) {
stream_process_mem2s(s, d->dmach);
}
break;
default:
D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
__func__, sid, addr * 4, value));
s->regs[addr] = value;
break;
}
stream_update_irq(s);
}
static CPUReadMemoryFunc * const axidma_read[] = {
&axidma_readl,
&axidma_readl,
&axidma_readl,
};
static CPUWriteMemoryFunc * const axidma_write[] = {
&axidma_writel,
&axidma_writel,
&axidma_writel,
};
static int xilinx_axidma_init(SysBusDevice *dev)
{
struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
int axidma_regs;
int i;
sysbus_init_irq(dev, &s->streams[1].irq);
sysbus_init_irq(dev, &s->streams[0].irq);
if (!s->dmach) {
hw_error("Unconnected DMA channel.\n");
}
xlx_dma_connect_dma(s->dmach, s, axidma_push);
axidma_regs = cpu_register_io_memory(axidma_read, axidma_write, s,
DEVICE_NATIVE_ENDIAN);
sysbus_init_mmio(dev, R_MAX * 4 * 2, axidma_regs);
for (i = 0; i < 2; i++) {
stream_reset(&s->streams[i]);
s->streams[i].nr = i;
s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
}
return 0;
}
static SysBusDeviceInfo axidma_info = {
.init = xilinx_axidma_init,
.qdev.name = "xilinx,axidma",
.qdev.size = sizeof(struct XilinxAXIDMA),
.qdev.props = (Property[]) {
DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA, dmach),
DEFINE_PROP_END_OF_LIST(),
}
};
static void xilinx_axidma_register(void)
{
sysbus_register_withprop(&axidma_info);
}
device_init(xilinx_axidma_register)

39
hw/xilinx_axidma.h Normal file
View File

@ -0,0 +1,39 @@
/* AXI DMA connection. Used until qdev provides a generic way. */
typedef void (*DMAPushFn)(void *opaque,
unsigned char *buf, size_t len, uint32_t *app);
struct XilinxDMAConnection {
void *dma;
void *client;
DMAPushFn to_dma;
DMAPushFn to_client;
};
static inline void xlx_dma_connect_client(struct XilinxDMAConnection *dmach,
void *c, DMAPushFn f)
{
dmach->client = c;
dmach->to_client = f;
}
static inline void xlx_dma_connect_dma(struct XilinxDMAConnection *dmach,
void *d, DMAPushFn f)
{
dmach->dma = d;
dmach->to_dma = f;
}
static inline
void xlx_dma_push_to_dma(struct XilinxDMAConnection *dmach,
uint8_t *buf, size_t len, uint32_t *app)
{
dmach->to_dma(dmach->dma, buf, len, app);
}
static inline
void xlx_dma_push_to_client(struct XilinxDMAConnection *dmach,
uint8_t *buf, size_t len, uint32_t *app)
{
dmach->to_client(dmach->client, buf, len, app);
}

898
hw/xilinx_axienet.c Normal file
View File

@ -0,0 +1,898 @@
/*
* QEMU model of Xilinx AXI-Ethernet.
*
* Copyright (c) 2011 Edgar E. Iglesias.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "sysbus.h"
#include "qemu-char.h"
#include "qemu-log.h"
#include "net.h"
#include "net/checksum.h"
#include "xilinx_axidma.h"
#define DPHY(x)
/* Advertisement control register. */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
struct PHY {
uint32_t regs[32];
int link;
unsigned int (*read)(struct PHY *phy, unsigned int req);
void (*write)(struct PHY *phy, unsigned int req,
unsigned int data);
};
static unsigned int tdk_read(struct PHY *phy, unsigned int req)
{
int regnum;
unsigned r = 0;
regnum = req & 0x1f;
switch (regnum) {
case 1:
if (!phy->link) {
break;
}
/* MR1. */
/* Speeds and modes. */
r |= (1 << 13) | (1 << 14);
r |= (1 << 11) | (1 << 12);
r |= (1 << 5); /* Autoneg complete. */
r |= (1 << 3); /* Autoneg able. */
r |= (1 << 2); /* link. */
r |= (1 << 1); /* link. */
break;
case 5:
/* Link partner ability.
We are kind; always agree with whatever best mode
the guest advertises. */
r = 1 << 14; /* Success. */
/* Copy advertised modes. */
r |= phy->regs[4] & (15 << 5);
/* Autoneg support. */
r |= 1;
break;
case 17:
/* Marvel PHY on many xilinx boards. */
r = 0x8000; /* 1000Mb */
break;
case 18:
{
/* Diagnostics reg. */
int duplex = 0;
int speed_100 = 0;
if (!phy->link) {
break;
}
/* Are we advertising 100 half or 100 duplex ? */
speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
/* Are we advertising 10 duplex or 100 duplex ? */
duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
r = (speed_100 << 10) | (duplex << 11);
}
break;
default:
r = phy->regs[regnum];
break;
}
DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
return r;
}
static void
tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
{
int regnum;
regnum = req & 0x1f;
DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
switch (regnum) {
default:
phy->regs[regnum] = data;
break;
}
}
static void
tdk_init(struct PHY *phy)
{
phy->regs[0] = 0x3100;
/* PHY Id. */
phy->regs[2] = 0x0300;
phy->regs[3] = 0xe400;
/* Autonegotiation advertisement reg. */
phy->regs[4] = 0x01E1;
phy->link = 1;
phy->read = tdk_read;
phy->write = tdk_write;
}
struct MDIOBus {
/* bus. */
int mdc;
int mdio;
/* decoder. */
enum {
PREAMBLE,
SOF,
OPC,
ADDR,
REQ,
TURNAROUND,
DATA
} state;
unsigned int drive;
unsigned int cnt;
unsigned int addr;
unsigned int opc;
unsigned int req;
unsigned int data;
struct PHY *devs[32];
};
static void
mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
{
bus->devs[addr & 0x1f] = phy;
}
#ifdef USE_THIS_DEAD_CODE
static void
mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
{
bus->devs[addr & 0x1f] = NULL;
}
#endif
static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
unsigned int reg)
{
struct PHY *phy;
uint16_t data;
phy = bus->devs[addr];
if (phy && phy->read) {
data = phy->read(phy, reg);
} else {
data = 0xffff;
}
DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
return data;
}
static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
unsigned int reg, uint16_t data)
{
struct PHY *phy;
DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
phy = bus->devs[addr];
if (phy && phy->write) {
phy->write(phy, reg, data);
}
}
#define DENET(x)
#define R_RAF (0x000 / 4)
enum {
RAF_MCAST_REJ = (1 << 1),
RAF_BCAST_REJ = (1 << 2),
RAF_EMCF_EN = (1 << 12),
RAF_NEWFUNC_EN = (1 << 11)
};
#define R_IS (0x00C / 4)
enum {
IS_HARD_ACCESS_COMPLETE = 1,
IS_AUTONEG = (1 << 1),
IS_RX_COMPLETE = (1 << 2),
IS_RX_REJECT = (1 << 3),
IS_TX_COMPLETE = (1 << 5),
IS_RX_DCM_LOCK = (1 << 6),
IS_MGM_RDY = (1 << 7),
IS_PHY_RST_DONE = (1 << 8),
};
#define R_IP (0x010 / 4)
#define R_IE (0x014 / 4)
#define R_UAWL (0x020 / 4)
#define R_UAWU (0x024 / 4)
#define R_PPST (0x030 / 4)
enum {
PPST_LINKSTATUS = (1 << 0),
PPST_PHY_LINKSTATUS = (1 << 7),
};
#define R_STATS_RX_BYTESL (0x200 / 4)
#define R_STATS_RX_BYTESH (0x204 / 4)
#define R_STATS_TX_BYTESL (0x208 / 4)
#define R_STATS_TX_BYTESH (0x20C / 4)
#define R_STATS_RXL (0x290 / 4)
#define R_STATS_RXH (0x294 / 4)
#define R_STATS_RX_BCASTL (0x2a0 / 4)
#define R_STATS_RX_BCASTH (0x2a4 / 4)
#define R_STATS_RX_MCASTL (0x2a8 / 4)
#define R_STATS_RX_MCASTH (0x2ac / 4)
#define R_RCW0 (0x400 / 4)
#define R_RCW1 (0x404 / 4)
enum {
RCW1_VLAN = (1 << 27),
RCW1_RX = (1 << 28),
RCW1_FCS = (1 << 29),
RCW1_JUM = (1 << 30),
RCW1_RST = (1 << 31),
};
#define R_TC (0x408 / 4)
enum {
TC_VLAN = (1 << 27),
TC_TX = (1 << 28),
TC_FCS = (1 << 29),
TC_JUM = (1 << 30),
TC_RST = (1 << 31),
};
#define R_EMMC (0x410 / 4)
enum {
EMMC_LINKSPEED_10MB = (0 << 30),
EMMC_LINKSPEED_100MB = (1 << 30),
EMMC_LINKSPEED_1000MB = (2 << 30),
};
#define R_PHYC (0x414 / 4)
#define R_MC (0x500 / 4)
#define MC_EN (1 << 6)
#define R_MCR (0x504 / 4)
#define R_MWD (0x508 / 4)
#define R_MRD (0x50c / 4)
#define R_MIS (0x600 / 4)
#define R_MIP (0x620 / 4)
#define R_MIE (0x640 / 4)
#define R_MIC (0x640 / 4)
#define R_UAW0 (0x700 / 4)
#define R_UAW1 (0x704 / 4)
#define R_FMI (0x708 / 4)
#define R_AF0 (0x710 / 4)
#define R_AF1 (0x714 / 4)
#define R_MAX (0x34 / 4)
/* Indirect registers. */
struct TEMAC {
struct MDIOBus mdio_bus;
struct PHY phy;
void *parent;
};
struct XilinxAXIEnet {
SysBusDevice busdev;
qemu_irq irq;
void *dmach;
NICState *nic;
NICConf conf;
uint32_t c_rxmem;
uint32_t c_txmem;
uint32_t c_phyaddr;
struct TEMAC TEMAC;
/* MII regs. */
union {
uint32_t regs[4];
struct {
uint32_t mc;
uint32_t mcr;
uint32_t mwd;
uint32_t mrd;
};
} mii;
struct {
uint64_t rx_bytes;
uint64_t tx_bytes;
uint64_t rx;
uint64_t rx_bcast;
uint64_t rx_mcast;
} stats;
/* Receive configuration words. */
uint32_t rcw[2];
/* Transmit config. */
uint32_t tc;
uint32_t emmc;
uint32_t phyc;
/* Unicast Address Word. */
uint32_t uaw[2];
/* Unicast address filter used with extended mcast. */
uint32_t ext_uaw[2];
uint32_t fmi;
uint32_t regs[R_MAX];
/* Multicast filter addrs. */
uint32_t maddr[4][2];
/* 32K x 1 lookup filter. */
uint32_t ext_mtable[1024];
uint8_t *rxmem;
};
static void axienet_rx_reset(struct XilinxAXIEnet *s)
{
s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
}
static void axienet_tx_reset(struct XilinxAXIEnet *s)
{
s->tc = TC_JUM | TC_TX | TC_VLAN;
}
static inline int axienet_rx_resetting(struct XilinxAXIEnet *s)
{
return s->rcw[1] & RCW1_RST;
}
static inline int axienet_rx_enabled(struct XilinxAXIEnet *s)
{
return s->rcw[1] & RCW1_RX;
}
static inline int axienet_extmcf_enabled(struct XilinxAXIEnet *s)
{
return !!(s->regs[R_RAF] & RAF_EMCF_EN);
}
static inline int axienet_newfunc_enabled(struct XilinxAXIEnet *s)
{
return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
}
static void axienet_reset(struct XilinxAXIEnet *s)
{
axienet_rx_reset(s);
axienet_tx_reset(s);
s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
s->emmc = EMMC_LINKSPEED_100MB;
}
static void enet_update_irq(struct XilinxAXIEnet *s)
{
s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
qemu_set_irq(s->irq, !!s->regs[R_IP]);
}
static uint32_t enet_readl(void *opaque, target_phys_addr_t addr)
{
struct XilinxAXIEnet *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr) {
case R_RCW0:
case R_RCW1:
r = s->rcw[addr & 1];
break;
case R_TC:
r = s->tc;
break;
case R_EMMC:
r = s->emmc;
break;
case R_PHYC:
r = s->phyc;
break;
case R_MCR:
r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
break;
case R_STATS_RX_BYTESL:
case R_STATS_RX_BYTESH:
r = s->stats.rx_bytes >> (32 * (addr & 1));
break;
case R_STATS_TX_BYTESL:
case R_STATS_TX_BYTESH:
r = s->stats.tx_bytes >> (32 * (addr & 1));
break;
case R_STATS_RXL:
case R_STATS_RXH:
r = s->stats.rx >> (32 * (addr & 1));
break;
case R_STATS_RX_BCASTL:
case R_STATS_RX_BCASTH:
r = s->stats.rx_bcast >> (32 * (addr & 1));
break;
case R_STATS_RX_MCASTL:
case R_STATS_RX_MCASTH:
r = s->stats.rx_mcast >> (32 * (addr & 1));
break;
case R_MC:
case R_MWD:
case R_MRD:
r = s->mii.regs[addr & 3];
break;
case R_UAW0:
case R_UAW1:
r = s->uaw[addr & 1];
break;
case R_UAWU:
case R_UAWL:
r = s->ext_uaw[addr & 1];
break;
case R_FMI:
r = s->fmi;
break;
case R_AF0:
case R_AF1:
r = s->maddr[s->fmi & 3][addr & 1];
break;
case 0x8000 ... 0x83ff:
r = s->ext_mtable[addr - 0x8000];
break;
default:
if (addr < ARRAY_SIZE(s->regs)) {
r = s->regs[addr];
}
DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
__func__, addr * 4, r));
break;
}
return r;
}
static void
enet_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
{
struct XilinxAXIEnet *s = opaque;
struct TEMAC *t = &s->TEMAC;
addr >>= 2;
switch (addr) {
case R_RCW0:
case R_RCW1:
s->rcw[addr & 1] = value;
if ((addr & 1) && value & RCW1_RST) {
axienet_rx_reset(s);
}
break;
case R_TC:
s->tc = value;
if (value & TC_RST) {
axienet_tx_reset(s);
}
break;
case R_EMMC:
s->emmc = value;
break;
case R_PHYC:
s->phyc = value;
break;
case R_MC:
value &= ((1 < 7) - 1);
/* Enable the MII. */
if (value & MC_EN) {
unsigned int miiclkdiv = value & ((1 << 6) - 1);
if (!miiclkdiv) {
qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
}
}
s->mii.mc = value;
break;
case R_MCR: {
unsigned int phyaddr = (value >> 24) & 0x1f;
unsigned int regaddr = (value >> 16) & 0x1f;
unsigned int op = (value >> 14) & 3;
unsigned int initiate = (value >> 11) & 1;
if (initiate) {
if (op == 1) {
mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
} else if (op == 2) {
s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
} else {
qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
}
}
s->mii.mcr = value;
break;
}
case R_MWD:
case R_MRD:
s->mii.regs[addr & 3] = value;
break;
case R_UAW0:
case R_UAW1:
s->uaw[addr & 1] = value;
break;
case R_UAWL:
case R_UAWU:
s->ext_uaw[addr & 1] = value;
break;
case R_FMI:
s->fmi = value;
break;
case R_AF0:
case R_AF1:
s->maddr[s->fmi & 3][addr & 1] = value;
break;
case 0x8000 ... 0x83ff:
s->ext_mtable[addr - 0x8000] = value;
break;
default:
DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
__func__, addr * 4, value));
if (addr < ARRAY_SIZE(s->regs)) {
s->regs[addr] = value;
}
break;
}
enet_update_irq(s);
}
static CPUReadMemoryFunc * const enet_read[] = {
&enet_readl,
&enet_readl,
&enet_readl,
};
static CPUWriteMemoryFunc * const enet_write[] = {
&enet_writel,
&enet_writel,
&enet_writel,
};
static int eth_can_rx(VLANClientState *nc)
{
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
/* RX enabled? */
return !axienet_rx_resetting(s) && axienet_rx_enabled(s);
}
static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
{
int match = 1;
if (memcmp(buf, &f0, 4)) {
match = 0;
}
if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
match = 0;
}
return match;
}
static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
{
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
0xff, 0xff, 0xff};
static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
uint32_t app[6] = {0};
int promisc = s->fmi & (1 << 31);
int unicast, broadcast, multicast, ip_multicast = 0;
uint32_t csum32;
uint16_t csum16;
int i;
s = s;
DENET(qemu_log("%s: %zd bytes\n", __func__, size));
unicast = ~buf[0] & 0x1;
broadcast = memcmp(buf, sa_bcast, 6) == 0;
multicast = !unicast && !broadcast;
if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
ip_multicast = 1;
}
/* Jumbo or vlan sizes ? */
if (!(s->rcw[1] & RCW1_JUM)) {
if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
return size;
}
}
/* Basic Address filters. If you want to use the extended filters
you'll generally have to place the ethernet mac into promiscuous mode
to avoid the basic filtering from dropping most frames. */
if (!promisc) {
if (unicast) {
if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
return size;
}
} else {
if (broadcast) {
/* Broadcast. */
if (s->regs[R_RAF] & RAF_BCAST_REJ) {
return size;
}
} else {
int drop = 1;
/* Multicast. */
if (s->regs[R_RAF] & RAF_MCAST_REJ) {
return size;
}
for (i = 0; i < 4; i++) {
if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
drop = 0;
break;
}
}
if (drop) {
return size;
}
}
}
}
/* Extended mcast filtering enabled? */
if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
if (unicast) {
if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
return size;
}
} else {
if (broadcast) {
/* Broadcast. ??? */
if (s->regs[R_RAF] & RAF_BCAST_REJ) {
return size;
}
} else {
int idx, bit;
/* Multicast. */
if (!memcmp(buf, sa_ipmcast, 3)) {
return size;
}
idx = (buf[4] & 0x7f) << 8;
idx |= buf[5];
bit = 1 << (idx & 0x1f);
idx >>= 5;
if (!(s->ext_mtable[idx] & bit)) {
return size;
}
}
}
}
if (size < 12) {
s->regs[R_IS] |= IS_RX_REJECT;
enet_update_irq(s);
return -1;
}
if (size > (s->c_rxmem - 4)) {
size = s->c_rxmem - 4;
}
memcpy(s->rxmem, buf, size);
memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
if (s->rcw[1] & RCW1_FCS) {
size += 4; /* fcs is inband. */
}
app[0] = 5 << 28;
csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
/* Fold it once. */
csum32 = (csum32 & 0xffff) + (csum32 >> 16);
/* And twice to get rid of possible carries. */
csum16 = (csum32 & 0xffff) + (csum32 >> 16);
app[3] = csum16;
app[4] = size & 0xffff;
s->stats.rx_bytes += size;
s->stats.rx++;
if (multicast) {
s->stats.rx_mcast++;
app[2] |= 1 | (ip_multicast << 1);
} else if (broadcast) {
s->stats.rx_bcast++;
app[2] |= 1 << 3;
}
/* Good frame. */
app[2] |= 1 << 6;
xlx_dma_push_to_dma(s->dmach, (void *)s->rxmem, size, app);
s->regs[R_IS] |= IS_RX_COMPLETE;
enet_update_irq(s);
return size;
}
static void eth_cleanup(VLANClientState *nc)
{
/* FIXME. */
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
qemu_free(s->rxmem);
qemu_free(s);
}
static void
axienet_stream_push(void *opaque, uint8_t *buf, size_t size, uint32_t *hdr)
{
struct XilinxAXIEnet *s = opaque;
/* TX enable ? */
if (!(s->tc & TC_TX)) {
return;
}
/* Jumbo or vlan sizes ? */
if (!(s->tc & TC_JUM)) {
if (size > 1518 && size <= 1522 && !(s->tc & TC_VLAN)) {
return;
}
}
if (hdr[0] & 1) {
unsigned int start_off = hdr[1] >> 16;
unsigned int write_off = hdr[1] & 0xffff;
uint32_t tmp_csum;
uint16_t csum;
tmp_csum = net_checksum_add(size - start_off,
(uint8_t *)buf + start_off);
/* Accumulate the seed. */
tmp_csum += hdr[2] & 0xffff;
/* Fold the 32bit partial checksum. */
csum = net_checksum_finish(tmp_csum);
/* Writeback. */
buf[write_off] = csum >> 8;
buf[write_off + 1] = csum & 0xff;
}
qemu_send_packet(&s->nic->nc, buf, size);
s->stats.tx_bytes += size;
s->regs[R_IS] |= IS_TX_COMPLETE;
enet_update_irq(s);
}
static NetClientInfo net_xilinx_enet_info = {
.type = NET_CLIENT_TYPE_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_rx,
.receive = eth_rx,
.cleanup = eth_cleanup,
};
static int xilinx_enet_init(SysBusDevice *dev)
{
struct XilinxAXIEnet *s = FROM_SYSBUS(typeof(*s), dev);
int enet_regs;
sysbus_init_irq(dev, &s->irq);
if (!s->dmach) {
hw_error("Unconnected Xilinx Ethernet MAC.\n");
}
xlx_dma_connect_client(s->dmach, s, axienet_stream_push);
enet_regs = cpu_register_io_memory(enet_read, enet_write, s,
DEVICE_LITTLE_ENDIAN);
sysbus_init_mmio(dev, 0x40000, enet_regs);
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
dev->qdev.info->name, dev->qdev.id, s);
qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a);
tdk_init(&s->TEMAC.phy);
mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
s->TEMAC.parent = s;
s->rxmem = qemu_malloc(s->c_rxmem);
axienet_reset(s);
return 0;
}
static SysBusDeviceInfo xilinx_enet_info = {
.init = xilinx_enet_init,
.qdev.name = "xilinx,axienet",
.qdev.size = sizeof(struct XilinxAXIEnet),
.qdev.props = (Property[]) {
DEFINE_PROP_UINT32("phyaddr", struct XilinxAXIEnet, c_phyaddr, 7),
DEFINE_PROP_UINT32("c_rxmem", struct XilinxAXIEnet, c_rxmem, 0x1000),
DEFINE_PROP_UINT32("c_txmem", struct XilinxAXIEnet, c_txmem, 0x1000),
DEFINE_PROP_PTR("dmach", struct XilinxAXIEnet, dmach),
DEFINE_NIC_PROPERTIES(struct XilinxAXIEnet, conf),
DEFINE_PROP_END_OF_LIST(),
}
};
static void xilinx_enet_register(void)
{
sysbus_register_withprop(&xilinx_enet_info);
}
device_init(xilinx_enet_register)