sim-card
/
qemu
Archived
10
0
Fork 0

Merge branch 'omap-for-upstream' of git://git.linaro.org/people/pmaydell/qemu-arm into pm

This commit is contained in:
Edgar E. Iglesias 2011-08-29 23:59:06 +02:00
commit f0fb8b7180
8 changed files with 740 additions and 210 deletions

View File

@ -36,12 +36,7 @@ uint32_t nand_getbuswidth(DeviceState *dev);
#define NAND_MFR_MICRON 0x2c
/* onenand.c */
void onenand_base_update(void *opaque, target_phys_addr_t new);
void onenand_base_unmap(void *opaque);
void *onenand_init(BlockDriverState *bdrv,
uint16_t man_id, uint16_t dev_id, uint16_t ver_id,
int regshift, qemu_irq irq);
void *onenand_raw_otp(void *opaque);
void *onenand_raw_otp(DeviceState *onenand_device);
/* ecc.c */
typedef struct {

View File

@ -33,6 +33,7 @@
#include "loader.h"
#include "blockdev.h"
#include "tusb6010.h"
#include "sysbus.h"
/* Nokia N8x0 support */
struct n800_s {
@ -52,7 +53,7 @@ struct n800_s {
TUSBState *usb;
void *retu;
void *tahvo;
void *nand;
DeviceState *nand;
};
/* GPIO pins */
@ -167,13 +168,21 @@ static void n8x0_nand_setup(struct n800_s *s)
char *otp_region;
DriveInfo *dinfo;
dinfo = drive_get(IF_MTD, 0, 0);
s->nand = qdev_create(NULL, "onenand");
qdev_prop_set_uint16(s->nand, "manufacturer_id", NAND_MFR_SAMSUNG);
/* Either 0x40 or 0x48 are OK for the device ID */
s->nand = onenand_init(dinfo ? dinfo->bdrv : 0,
NAND_MFR_SAMSUNG, 0x48, 0, 1,
qdev_get_gpio_in(s->cpu->gpio, N8X0_ONENAND_GPIO));
omap_gpmc_attach(s->cpu->gpmc, N8X0_ONENAND_CS, 0, onenand_base_update,
onenand_base_unmap, s->nand);
qdev_prop_set_uint16(s->nand, "device_id", 0x48);
qdev_prop_set_uint16(s->nand, "version_id", 0);
qdev_prop_set_int32(s->nand, "shift", 1);
dinfo = drive_get(IF_MTD, 0, 0);
if (dinfo && dinfo->bdrv) {
qdev_prop_set_drive_nofail(s->nand, "drive", dinfo->bdrv);
}
qdev_init_nofail(s->nand);
sysbus_connect_irq(sysbus_from_qdev(s->nand), 0,
qdev_get_gpio_in(s->cpu->gpio, N8X0_ONENAND_GPIO));
omap_gpmc_attach(s->cpu->gpmc, N8X0_ONENAND_CS,
sysbus_mmio_get_region(sysbus_from_qdev(s->nand), 0));
otp_region = onenand_raw_otp(s->nand);
memcpy(otp_region + 0x000, n8x0_cal_wlan_mac, sizeof(n8x0_cal_wlan_mac));
@ -770,10 +779,8 @@ static void n8x0_usb_setup(struct n800_s *s)
TUSBState *tusb = tusb6010_init(tusb_irq);
/* Using the NOR interface */
omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_ASYNC_CS,
tusb6010_async_io(tusb), NULL, NULL, tusb);
omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_SYNC_CS,
tusb6010_sync_io(tusb), NULL, NULL, tusb);
omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_ASYNC_CS, tusb6010_async_io(tusb));
omap_gpmc_attach(s->cpu->gpmc, N8X0_USB_SYNC_CS, tusb6010_sync_io(tusb));
s->usb = tusb;
qdev_connect_gpio_out(s->cpu->gpio, N8X0_TUSB_ENABLE_GPIO, tusb_pwr);

View File

@ -118,11 +118,12 @@ void omap_sdrc_reset(struct omap_sdrc_s *s);
/* OMAP2 general purpose memory controller */
struct omap_gpmc_s;
struct omap_gpmc_s *omap_gpmc_init(target_phys_addr_t base, qemu_irq irq);
struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
target_phys_addr_t base,
qemu_irq irq, qemu_irq drq);
void omap_gpmc_reset(struct omap_gpmc_s *s);
void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem,
void (*base_upd)(void *opaque, target_phys_addr_t new),
void (*unmap)(void *opaque), void *opaque);
void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem);
void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand);
/*
* Common IRQ numbers for level 1 interrupt handler
@ -788,6 +789,7 @@ i2c_bus *omap_i2c_bus(struct omap_i2c_s *s);
# define cpu_is_omap2420(cpu) (cpu->mpu_model == omap2420)
# define cpu_is_omap2430(cpu) (cpu->mpu_model == omap2430)
# define cpu_is_omap3430(cpu) (cpu->mpu_model == omap3430)
# define cpu_is_omap3630(cpu) (cpu->mpu_model == omap3630)
# define cpu_is_omap15xx(cpu) \
(cpu_is_omap310(cpu) || cpu_is_omap1510(cpu))
@ -799,7 +801,8 @@ i2c_bus *omap_i2c_bus(struct omap_i2c_s *s);
# define cpu_class_omap1(cpu) \
(cpu_is_omap15xx(cpu) || cpu_is_omap16xx(cpu))
# define cpu_class_omap2(cpu) cpu_is_omap24xx(cpu)
# define cpu_class_omap3(cpu) cpu_is_omap3430(cpu)
# define cpu_class_omap3(cpu) \
(cpu_is_omap3430(cpu) || cpu_is_omap3630(cpu))
struct omap_mpu_state_s {
enum omap_mpu_model {
@ -813,6 +816,7 @@ struct omap_mpu_state_s {
omap2423,
omap2430,
omap3430,
omap3630,
} mpu_model;
CPUState *env;

View File

@ -2402,7 +2402,8 @@ struct omap_mpu_state_s *omap2420_mpu_init(unsigned long sdram_size,
sysbus_mmio_map(busdev, 4, omap_l4_region_base(ta, 5));
s->sdrc = omap_sdrc_init(0x68009000);
s->gpmc = omap_gpmc_init(0x6800a000, s->irq[0][OMAP_INT_24XX_GPMC_IRQ]);
s->gpmc = omap_gpmc_init(s, 0x6800a000, s->irq[0][OMAP_INT_24XX_GPMC_IRQ],
s->drq[OMAP24XX_DMA_GPMC]);
dinfo = drive_get(IF_SD, 0, 0);
if (!dinfo) {

View File

@ -27,82 +27,410 @@
/* General-Purpose Memory Controller */
struct omap_gpmc_s {
qemu_irq irq;
qemu_irq drq;
MemoryRegion iomem;
int accept_256;
uint8_t revision;
uint8_t sysconfig;
uint16_t irqst;
uint16_t irqen;
uint16_t lastirq;
uint16_t timeout;
uint16_t config;
uint32_t prefconfig[2];
int prefcontrol;
int preffifo;
int prefcount;
struct omap_gpmc_cs_file_s {
uint32_t config[7];
target_phys_addr_t base;
size_t size;
MemoryRegion *iomem;
MemoryRegion container;
void (*base_update)(void *opaque, target_phys_addr_t new);
void (*unmap)(void *opaque);
void *opaque;
MemoryRegion nandiomem;
DeviceState *dev;
} cs_file[8];
int ecc_cs;
int ecc_ptr;
uint32_t ecc_cfg;
ECCState ecc[9];
struct prefetch {
uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
MemoryRegion iomem;
uint8_t fifo[64];
} prefetch;
};
#define OMAP_GPMC_8BIT 0
#define OMAP_GPMC_16BIT 1
#define OMAP_GPMC_NOR 0
#define OMAP_GPMC_NAND 2
static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
{
return (f->config[0] >> 10) & 3;
}
static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
{
/* devsize field is really 2 bits but we ignore the high
* bit to ensure consistent behaviour if the guest sets
* it (values 2 and 3 are reserved in the TRM)
*/
return (f->config[0] >> 12) & 1;
}
/* Extract the chip-select value from the prefetch config1 register */
static int prefetch_cs(uint32_t config1)
{
return (config1 >> 24) & 7;
}
static int prefetch_threshold(uint32_t config1)
{
return (config1 >> 8) & 0x7f;
}
static void omap_gpmc_int_update(struct omap_gpmc_s *s)
{
qemu_set_irq(s->irq, s->irqen & s->irqst);
/* The TRM is a bit unclear, but it seems to say that
* the TERMINALCOUNTSTATUS bit is set only on the
* transition when the prefetch engine goes from
* active to inactive, whereas the FIFOEVENTSTATUS
* bit is held high as long as the fifo has at
* least THRESHOLD bytes available.
* So we do the latter here, but TERMINALCOUNTSTATUS
* is set elsewhere.
*/
if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
s->irqst |= 1;
}
if ((s->irqen & s->irqst) != s->lastirq) {
s->lastirq = s->irqen & s->irqst;
qemu_set_irq(s->irq, s->lastirq);
}
}
static void omap_gpmc_cs_map(struct omap_gpmc_cs_file_s *f, int base, int mask)
static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
{
/* TODO: check for overlapping regions and report access errors */
if ((mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf) ||
(base < 0 || base >= 0x40) ||
(base & 0x0f & ~mask)) {
fprintf(stderr, "%s: wrong cs address mapping/decoding!\n",
__FUNCTION__);
if (s->prefetch.config1 & 4) {
qemu_set_irq(s->drq, value);
}
}
/* Access functions for when a NAND-like device is mapped into memory:
* all addresses in the region behave like accesses to the relevant
* GPMC_NAND_DATA_i register (which is actually implemented to call these)
*/
static uint64_t omap_nand_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
uint64_t v;
nand_setpins(f->dev, 0, 0, 0, 1, 0);
switch (omap_gpmc_devsize(f)) {
case OMAP_GPMC_8BIT:
v = nand_getio(f->dev);
if (size == 1) {
return v;
}
v |= (nand_getio(f->dev) << 8);
if (size == 2) {
return v;
}
v |= (nand_getio(f->dev) << 16);
v |= (nand_getio(f->dev) << 24);
return v;
case OMAP_GPMC_16BIT:
v = nand_getio(f->dev);
if (size == 1) {
/* 8 bit read from 16 bit device : probably a guest bug */
return v & 0xff;
}
if (size == 2) {
return v;
}
v |= (nand_getio(f->dev) << 16);
return v;
default:
abort();
}
}
static void omap_nand_setio(DeviceState *dev, uint64_t value,
int nandsize, int size)
{
/* Write the specified value to the NAND device, respecting
* both size of the NAND device and size of the write access.
*/
switch (nandsize) {
case OMAP_GPMC_8BIT:
switch (size) {
case 1:
nand_setio(dev, value & 0xff);
break;
case 2:
nand_setio(dev, value & 0xff);
nand_setio(dev, (value >> 8) & 0xff);
break;
case 4:
default:
nand_setio(dev, value & 0xff);
nand_setio(dev, (value >> 8) & 0xff);
nand_setio(dev, (value >> 16) & 0xff);
nand_setio(dev, (value >> 24) & 0xff);
break;
}
case OMAP_GPMC_16BIT:
switch (size) {
case 1:
/* writing to a 16bit device with 8bit access is probably a guest
* bug; pass the value through anyway.
*/
case 2:
nand_setio(dev, value & 0xffff);
break;
case 4:
default:
nand_setio(dev, value & 0xffff);
nand_setio(dev, (value >> 16) & 0xffff);
break;
}
}
}
static void omap_nand_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
nand_setpins(f->dev, 0, 0, 0, 1, 0);
omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
}
static const MemoryRegionOps omap_nand_ops = {
.read = omap_nand_read,
.write = omap_nand_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
static void fill_prefetch_fifo(struct omap_gpmc_s *s)
{
/* Fill the prefetch FIFO by reading data from NAND.
* We do this synchronously, unlike the hardware which
* will do this asynchronously. We refill when the
* FIFO has THRESHOLD bytes free, and we always refill
* as much data as possible starting at the top end
* of the FIFO.
* (We have to refill at THRESHOLD rather than waiting
* for the FIFO to empty to allow for the case where
* the FIFO size isn't an exact multiple of THRESHOLD
* and we're doing DMA transfers.)
* This means we never need to handle wrap-around in
* the fifo-reading code, and the next byte of data
* to read is always fifo[63 - fifopointer].
*/
int fptr;
int cs = prefetch_cs(s->prefetch.config1);
int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
int bytes;
/* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
* and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
* Instead believe the bit that says it is always a byte count.
*/
bytes = 64 - s->prefetch.fifopointer;
if (bytes > s->prefetch.count) {
bytes = s->prefetch.count;
}
s->prefetch.count -= bytes;
s->prefetch.fifopointer += bytes;
fptr = 64 - s->prefetch.fifopointer;
/* Move the existing data in the FIFO so it sits just
* before what we're about to read in
*/
while (fptr < (64 - bytes)) {
s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
fptr++;
}
while (fptr < 64) {
if (is16bit) {
uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
s->prefetch.fifo[fptr++] = v & 0xff;
s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
} else {
s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
}
}
if (s->prefetch.startengine && (s->prefetch.count == 0)) {
/* This was the final transfer: raise TERMINALCOUNTSTATUS */
s->irqst |= 2;
s->prefetch.startengine = 0;
}
/* If there are any bytes in the FIFO at this point then
* we must raise a DMA request (either this is a final part
* transfer, or we filled the FIFO in which case we certainly
* have THRESHOLD bytes available)
*/
if (s->prefetch.fifopointer != 0) {
omap_gpmc_dma_update(s, 1);
}
omap_gpmc_int_update(s);
}
/* Access functions for a NAND-like device when the prefetch/postwrite
* engine is enabled -- all addresses in the region behave alike:
* data is read or written to the FIFO.
*/
static uint64_t omap_gpmc_prefetch_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
uint32_t data;
if (s->prefetch.config1 & 1) {
/* The TRM doesn't define the behaviour if you read from the
* FIFO when the prefetch engine is in write mode. We choose
* to always return zero.
*/
return 0;
}
/* Note that trying to read an empty fifo repeats the last byte */
if (s->prefetch.fifopointer) {
s->prefetch.fifopointer--;
}
data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
if (s->prefetch.fifopointer ==
(64 - prefetch_threshold(s->prefetch.config1))) {
/* We've drained THRESHOLD bytes now. So deassert the
* DMA request, then refill the FIFO (which will probably
* assert it again.)
*/
omap_gpmc_dma_update(s, 0);
fill_prefetch_fifo(s);
}
omap_gpmc_int_update(s);
return data;
}
static void omap_gpmc_prefetch_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
int cs = prefetch_cs(s->prefetch.config1);
if ((s->prefetch.config1 & 1) == 0) {
/* The TRM doesn't define the behaviour of writing to the
* FIFO when the prefetch engine is in read mode. We
* choose to ignore the write.
*/
return;
}
if (s->prefetch.count == 0) {
/* The TRM doesn't define the behaviour of writing to the
* FIFO if the transfer is complete. We choose to ignore.
*/
return;
}
/* The only reason we do any data buffering in postwrite
* mode is if we are talking to a 16 bit NAND device, in
* which case we need to buffer the first byte of the
* 16 bit word until the other byte arrives.
*/
int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
if (is16bit) {
/* fifopointer alternates between 64 (waiting for first
* byte of word) and 63 (waiting for second byte)
*/
if (s->prefetch.fifopointer == 64) {
s->prefetch.fifo[0] = value;
s->prefetch.fifopointer--;
} else {
value = (value << 8) | s->prefetch.fifo[0];
omap_nand_write(&s->cs_file[cs], 0, value, 2);
s->prefetch.count--;
s->prefetch.fifopointer = 64;
}
} else {
/* Just write the byte : fifopointer remains 64 at all times */
omap_nand_write(&s->cs_file[cs], 0, value, 1);
s->prefetch.count--;
}
if (s->prefetch.count == 0) {
/* Final transfer: raise TERMINALCOUNTSTATUS */
s->irqst |= 2;
s->prefetch.startengine = 0;
}
omap_gpmc_int_update(s);
}
static const MemoryRegionOps omap_prefetch_ops = {
.read = omap_gpmc_prefetch_read,
.write = omap_gpmc_prefetch_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.impl.min_access_size = 1,
.impl.max_access_size = 1,
};
static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
{
/* Return the MemoryRegion* to map/unmap for this chipselect */
struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
return f->iomem;
}
if ((s->prefetch.config1 & 0x80) &&
(prefetch_cs(s->prefetch.config1) == cs)) {
/* The prefetch engine is enabled for this CS: map the FIFO */
return &s->prefetch.iomem;
}
return &f->nandiomem;
}
static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
{
struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
uint32_t mask = (f->config[6] >> 8) & 0xf;
uint32_t base = f->config[6] & 0x3f;
uint32_t size;
if (!f->iomem && !f->dev) {
return;
}
if (!f->opaque)
if (!(f->config[6] & (1 << 6))) {
/* Do nothing unless CSVALID */
return;
}
f->base = base << 24;
f->size = (0x0fffffff & ~(mask << 24)) + 1;
/* TODO: check for overlapping regions and report access errors */
if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
&& !(s->accept_256 && !mask)) {
fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
__func__, mask);
}
base <<= 24;
size = (0x0fffffff & ~(mask << 24)) + 1;
/* TODO: rather than setting the size of the mapping (which should be
* constant), the mask should cause wrapping of the address space, so
* that the same memory becomes accessible at every <i>size</i> bytes
* starting from <i>base</i>. */
if (f->iomem) {
memory_region_init(&f->container, "omap-gpmc-file", f->size);
memory_region_add_subregion(&f->container, 0, f->iomem);
memory_region_add_subregion(get_system_memory(), f->base,
&f->container);
}
if (f->base_update)
f->base_update(f->opaque, f->base);
memory_region_init(&f->container, "omap-gpmc-file", size);
memory_region_add_subregion(&f->container, 0,
omap_gpmc_cs_memregion(s, cs));
memory_region_add_subregion(get_system_memory(), base,
&f->container);
}
static void omap_gpmc_cs_unmap(struct omap_gpmc_cs_file_s *f)
static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
{
if (f->size) {
if (f->unmap)
f->unmap(f->opaque);
if (f->iomem) {
memory_region_del_subregion(get_system_memory(), &f->container);
memory_region_del_subregion(&f->container, f->iomem);
memory_region_destroy(&f->container);
}
f->base = 0;
f->size = 0;
struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
if (!(f->config[6] & (1 << 6))) {
/* Do nothing unless CSVALID */
return;
}
if (!f->iomem && !f->dev) {
return;
}
memory_region_del_subregion(get_system_memory(), &f->container);
memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
memory_region_destroy(&f->container);
}
void omap_gpmc_reset(struct omap_gpmc_s *s)
@ -115,25 +443,32 @@ void omap_gpmc_reset(struct omap_gpmc_s *s)
omap_gpmc_int_update(s);
s->timeout = 0;
s->config = 0xa00;
s->prefconfig[0] = 0x00004000;
s->prefconfig[1] = 0x00000000;
s->prefcontrol = 0;
s->preffifo = 0;
s->prefcount = 0;
s->prefetch.config1 = 0x00004000;
s->prefetch.transfercount = 0x00000000;
s->prefetch.startengine = 0;
s->prefetch.fifopointer = 0;
s->prefetch.count = 0;
for (i = 0; i < 8; i ++) {
if (s->cs_file[i].config[6] & (1 << 6)) /* CSVALID */
omap_gpmc_cs_unmap(s->cs_file + i);
s->cs_file[i].config[0] = i ? 1 << 12 : 0;
omap_gpmc_cs_unmap(s, i);
s->cs_file[i].config[1] = 0x101001;
s->cs_file[i].config[2] = 0x020201;
s->cs_file[i].config[3] = 0x10031003;
s->cs_file[i].config[4] = 0x10f1111;
s->cs_file[i].config[5] = 0;
s->cs_file[i].config[6] = 0xf00 | (i ? 0 : 1 << 6);
if (s->cs_file[i].config[6] & (1 << 6)) /* CSVALID */
omap_gpmc_cs_map(&s->cs_file[i],
s->cs_file[i].config[6] & 0x1f, /* MASKADDR */
(s->cs_file[i].config[6] >> 8 & 0xf)); /* BASEADDR */
s->cs_file[i].config[6] = 0xf00;
/* In theory we could probe attached devices for some CFG1
* bits here, but we just retain them across resets as they
* were set initially by omap_gpmc_attach().
*/
if (i == 0) {
s->cs_file[i].config[0] &= 0x00433e00;
s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
omap_gpmc_cs_map(s, i);
} else {
s->cs_file[i].config[0] &= 0x00403c00;
}
}
s->ecc_cs = 0;
s->ecc_ptr = 0;
@ -142,6 +477,24 @@ void omap_gpmc_reset(struct omap_gpmc_s *s)
ecc_reset(&s->ecc[i]);
}
static int gpmc_wordaccess_only(target_phys_addr_t addr)
{
/* Return true if the register offset is to a register that
* only permits word width accesses.
* Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
* for any chipselect.
*/
if (addr >= 0x60 && addr <= 0x1d4) {
int cs = (addr - 0x60) / 0x30;
addr -= cs * 0x30;
if (addr >= 0x7c && addr < 0x88) {
/* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
return 0;
}
}
return 1;
}
static uint64_t omap_gpmc_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
@ -149,13 +502,13 @@ static uint64_t omap_gpmc_read(void *opaque, target_phys_addr_t addr,
int cs;
struct omap_gpmc_cs_file_s *f;
if (size != 4) {
if (size != 4 && gpmc_wordaccess_only(addr)) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x000: /* GPMC_REVISION */
return 0x20;
return s->revision;
case 0x010: /* GPMC_SYSCONFIG */
return s->sysconfig;
@ -187,36 +540,39 @@ static uint64_t omap_gpmc_read(void *opaque, target_phys_addr_t addr,
addr -= cs * 0x30;
f = s->cs_file + cs;
switch (addr) {
case 0x60: /* GPMC_CONFIG1 */
return f->config[0];
case 0x64: /* GPMC_CONFIG2 */
return f->config[1];
case 0x68: /* GPMC_CONFIG3 */
return f->config[2];
case 0x6c: /* GPMC_CONFIG4 */
return f->config[3];
case 0x70: /* GPMC_CONFIG5 */
return f->config[4];
case 0x74: /* GPMC_CONFIG6 */
return f->config[5];
case 0x78: /* GPMC_CONFIG7 */
return f->config[6];
case 0x84: /* GPMC_NAND_DATA */
return 0;
case 0x60: /* GPMC_CONFIG1 */
return f->config[0];
case 0x64: /* GPMC_CONFIG2 */
return f->config[1];
case 0x68: /* GPMC_CONFIG3 */
return f->config[2];
case 0x6c: /* GPMC_CONFIG4 */
return f->config[3];
case 0x70: /* GPMC_CONFIG5 */
return f->config[4];
case 0x74: /* GPMC_CONFIG6 */
return f->config[5];
case 0x78: /* GPMC_CONFIG7 */
return f->config[6];
case 0x84 ... 0x87: /* GPMC_NAND_DATA */
if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
return omap_nand_read(f, 0, size);
}
return 0;
}
break;
case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
return s->prefconfig[0];
return s->prefetch.config1;
case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
return s->prefconfig[1];
return s->prefetch.transfercount;
case 0x1ec: /* GPMC_PREFETCH_CONTROL */
return s->prefcontrol;
return s->prefetch.startengine;
case 0x1f0: /* GPMC_PREFETCH_STATUS */
return (s->preffifo << 24) |
((s->preffifo >
((s->prefconfig[0] >> 8) & 0x7f) ? 1 : 0) << 16) |
s->prefcount;
return (s->prefetch.fifopointer << 24) |
((s->prefetch.fifopointer >=
((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
s->prefetch.count;
case 0x1f4: /* GPMC_ECC_CONFIG */
return s->ecc_cs;
@ -251,7 +607,7 @@ static void omap_gpmc_write(void *opaque, target_phys_addr_t addr,
int cs;
struct omap_gpmc_cs_file_s *f;
if (size != 4) {
if (size != 4 && gpmc_wordaccess_only(addr)) {
return omap_badwidth_write32(opaque, addr, value);
}
@ -276,7 +632,7 @@ static void omap_gpmc_write(void *opaque, target_phys_addr_t addr,
break;
case 0x018: /* GPMC_IRQSTATUS */
s->irqen = ~value;
s->irqen &= ~value;
omap_gpmc_int_update(s);
break;
@ -302,62 +658,109 @@ static void omap_gpmc_write(void *opaque, target_phys_addr_t addr,
addr -= cs * 0x30;
f = s->cs_file + cs;
switch (addr) {
case 0x60: /* GPMC_CONFIG1 */
f->config[0] = value & 0xffef3e13;
break;
case 0x64: /* GPMC_CONFIG2 */
f->config[1] = value & 0x001f1f8f;
break;
case 0x68: /* GPMC_CONFIG3 */
f->config[2] = value & 0x001f1f8f;
break;
case 0x6c: /* GPMC_CONFIG4 */
f->config[3] = value & 0x1f8f1f8f;
break;
case 0x70: /* GPMC_CONFIG5 */
f->config[4] = value & 0x0f1f1f1f;
break;
case 0x74: /* GPMC_CONFIG6 */
f->config[5] = value & 0x00000fcf;
break;
case 0x78: /* GPMC_CONFIG7 */
if ((f->config[6] ^ value) & 0xf7f) {
if (f->config[6] & (1 << 6)) /* CSVALID */
omap_gpmc_cs_unmap(f);
if (value & (1 << 6)) /* CSVALID */
omap_gpmc_cs_map(f, value & 0x1f, /* MASKADDR */
(value >> 8 & 0xf)); /* BASEADDR */
}
case 0x60: /* GPMC_CONFIG1 */
f->config[0] = value & 0xffef3e13;
break;
case 0x64: /* GPMC_CONFIG2 */
f->config[1] = value & 0x001f1f8f;
break;
case 0x68: /* GPMC_CONFIG3 */
f->config[2] = value & 0x001f1f8f;
break;
case 0x6c: /* GPMC_CONFIG4 */
f->config[3] = value & 0x1f8f1f8f;
break;
case 0x70: /* GPMC_CONFIG5 */
f->config[4] = value & 0x0f1f1f1f;
break;
case 0x74: /* GPMC_CONFIG6 */
f->config[5] = value & 0x00000fcf;
break;
case 0x78: /* GPMC_CONFIG7 */
if ((f->config[6] ^ value) & 0xf7f) {
omap_gpmc_cs_unmap(s, cs);
f->config[6] = value & 0x00000f7f;
break;
case 0x7c: /* GPMC_NAND_COMMAND */
case 0x80: /* GPMC_NAND_ADDRESS */
case 0x84: /* GPMC_NAND_DATA */
break;
default:
goto bad_reg;
omap_gpmc_cs_map(s, cs);
}
break;
case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
}
break;
case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
}
break;
case 0x84 ... 0x87: /* GPMC_NAND_DATA */
if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
omap_nand_write(f, 0, value, size);
}
break;
default:
goto bad_reg;
}
break;
case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
s->prefconfig[0] = value & 0x7f8f7fbf;
/* TODO: update interrupts, fifos, dmas */
if (!s->prefetch.startengine) {
uint32_t oldconfig1 = s->prefetch.config1;
uint32_t changed;
s->prefetch.config1 = value & 0x7f8f7fbf;
changed = oldconfig1 ^ s->prefetch.config1;
if (changed & (0x80 | 0x7000000)) {
/* Turning the engine on or off, or mapping it somewhere else.
* cs_map() and cs_unmap() check the prefetch config and
* overall CSVALID bits, so it is sufficient to unmap-and-map
* both the old cs and the new one.
*/
int oldcs = prefetch_cs(oldconfig1);
int newcs = prefetch_cs(s->prefetch.config1);
omap_gpmc_cs_unmap(s, oldcs);
omap_gpmc_cs_map(s, oldcs);
if (newcs != oldcs) {
omap_gpmc_cs_unmap(s, newcs);
omap_gpmc_cs_map(s, newcs);
}
}
}
break;
case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
s->prefconfig[1] = value & 0x3fff;
if (!s->prefetch.startengine) {
s->prefetch.transfercount = value & 0x3fff;
}
break;
case 0x1ec: /* GPMC_PREFETCH_CONTROL */
s->prefcontrol = value & 1;
if (s->prefcontrol) {
if (s->prefconfig[0] & 1)
s->preffifo = 0x40;
else
s->preffifo = 0x00;
if (s->prefetch.startengine != (value & 1)) {
s->prefetch.startengine = value & 1;
if (s->prefetch.startengine) {
/* Prefetch engine start */
s->prefetch.count = s->prefetch.transfercount;
if (s->prefetch.config1 & 1) {
/* Write */
s->prefetch.fifopointer = 64;
} else {
/* Read */
s->prefetch.fifopointer = 0;
fill_prefetch_fifo(s);
}
} else {
/* Prefetch engine forcibly stopped. The TRM
* doesn't define the behaviour if you do this.
* We clear the prefetch count, which means that
* we permit no more writes, and don't read any
* more data from NAND. The CPU can still drain
* the FIFO of unread data.
*/
s->prefetch.count = 0;
}
omap_gpmc_int_update(s);
}
/* TODO: start */
break;
case 0x1f4: /* GPMC_ECC_CONFIG */
@ -394,24 +797,47 @@ static const MemoryRegionOps omap_gpmc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
struct omap_gpmc_s *omap_gpmc_init(target_phys_addr_t base, qemu_irq irq)
struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
target_phys_addr_t base,
qemu_irq irq, qemu_irq drq)
{
int cs;
struct omap_gpmc_s *s = (struct omap_gpmc_s *)
g_malloc0(sizeof(struct omap_gpmc_s));
omap_gpmc_reset(s);
memory_region_init_io(&s->iomem, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
memory_region_add_subregion(get_system_memory(), base, &s->iomem);
s->irq = irq;
s->drq = drq;
s->accept_256 = cpu_is_omap3630(mpu);
s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
s->lastirq = 0;
omap_gpmc_reset(s);
/* We have to register a different IO memory handler for each
* chip select region in case a NAND device is mapped there. We
* make the region the worst-case size of 256MB and rely on the
* container memory region in cs_map to chop it down to the actual
* guest-requested size.
*/
for (cs = 0; cs < 8; cs++) {
memory_region_init_io(&s->cs_file[cs].nandiomem,
&omap_nand_ops,
&s->cs_file[cs],
"omap-nand",
256 * 1024 * 1024);
}
memory_region_init_io(&s->prefetch.iomem, &omap_prefetch_ops, s,
"omap-gpmc-prefetch", 256 * 1024 * 1024);
return s;
}
void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem,
void (*base_upd)(void *opaque, target_phys_addr_t new),
void (*unmap)(void *opaque), void *opaque)
void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
{
struct omap_gpmc_cs_file_s *f;
assert(iomem);
if (cs < 0 || cs >= 8) {
fprintf(stderr, "%s: bad chip-select %i\n", __FUNCTION__, cs);
@ -419,12 +845,29 @@ void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem,
}
f = &s->cs_file[cs];
omap_gpmc_cs_unmap(s, cs);
f->config[0] &= ~(0xf << 10);
f->iomem = iomem;
f->base_update = base_upd;
f->unmap = unmap;
f->opaque = opaque;
if (f->config[6] & (1 << 6)) /* CSVALID */
omap_gpmc_cs_map(f, f->config[6] & 0x1f, /* MASKADDR */
(f->config[6] >> 8 & 0xf)); /* BASEADDR */
omap_gpmc_cs_map(s, cs);
}
void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
{
struct omap_gpmc_cs_file_s *f;
assert(nand);
if (cs < 0 || cs >= 8) {
fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
exit(-1);
}
f = &s->cs_file[cs];
omap_gpmc_cs_unmap(s, cs);
f->config[0] &= ~(0xf << 10);
f->config[0] |= (OMAP_GPMC_NAND << 10);
f->dev = nand;
if (nand_getbuswidth(f->dev) == 16) {
f->config[0] |= OMAP_GPMC_16BIT << 12;
}
omap_gpmc_cs_map(s, cs);
}

View File

@ -25,6 +25,7 @@
#include "blockdev.h"
#include "memory.h"
#include "exec-memory.h"
#include "sysbus.h"
/* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */
#define PAGE_SHIFT 11
@ -33,6 +34,7 @@
#define BLOCK_SHIFT (PAGE_SHIFT + 6)
typedef struct {
SysBusDevice busdev;
struct {
uint16_t man;
uint16_t dev;
@ -49,6 +51,7 @@ typedef struct {
uint8_t *current;
MemoryRegion ram;
MemoryRegion mapped_ram;
uint8_t current_direction;
uint8_t *boot[2];
uint8_t *data[2][2];
MemoryRegion iomem;
@ -120,27 +123,72 @@ static void onenand_mem_setup(OneNANDState *s)
1);
}
void onenand_base_update(void *opaque, target_phys_addr_t new)
{
OneNANDState *s = (OneNANDState *) opaque;
s->base = new;
memory_region_add_subregion(get_system_memory(), s->base, &s->container);
}
void onenand_base_unmap(void *opaque)
{
OneNANDState *s = (OneNANDState *) opaque;
memory_region_del_subregion(get_system_memory(), &s->container);
}
static void onenand_intr_update(OneNANDState *s)
{
qemu_set_irq(s->intr, ((s->intstatus >> 15) ^ (~s->config[0] >> 6)) & 1);
}
static void onenand_pre_save(void *opaque)
{
OneNANDState *s = opaque;
if (s->current == s->otp) {
s->current_direction = 1;
} else if (s->current == s->image) {
s->current_direction = 2;
} else {
s->current_direction = 0;
}
}
static int onenand_post_load(void *opaque, int version_id)
{
OneNANDState *s = opaque;
switch (s->current_direction) {
case 0:
break;
case 1:
s->current = s->otp;
break;
case 2:
s->current = s->image;
break;
default:
return -1;
}
onenand_intr_update(s);
return 0;
}
static const VMStateDescription vmstate_onenand = {
.name = "onenand",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.pre_save = onenand_pre_save,
.post_load = onenand_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8(current_direction, OneNANDState),
VMSTATE_INT32(cycle, OneNANDState),
VMSTATE_INT32(otpmode, OneNANDState),
VMSTATE_UINT16_ARRAY(addr, OneNANDState, 8),
VMSTATE_UINT16_ARRAY(unladdr, OneNANDState, 8),
VMSTATE_INT32(bufaddr, OneNANDState),
VMSTATE_INT32(count, OneNANDState),
VMSTATE_UINT16(command, OneNANDState),
VMSTATE_UINT16_ARRAY(config, OneNANDState, 2),
VMSTATE_UINT16(status, OneNANDState),
VMSTATE_UINT16(intstatus, OneNANDState),
VMSTATE_UINT16(wpstatus, OneNANDState),
VMSTATE_INT32(secs_cur, OneNANDState),
VMSTATE_PARTIAL_VBUFFER(blockwp, OneNANDState, blocks),
VMSTATE_UINT8(ecc.cp, OneNANDState),
VMSTATE_UINT16_ARRAY(ecc.lp, OneNANDState, 2),
VMSTATE_UINT16(ecc.count, OneNANDState),
VMSTATE_BUFFER_UNSAFE(otp, OneNANDState, 0, ((64 + 2) << PAGE_SHIFT)),
VMSTATE_END_OF_LIST()
}
};
/* Hot reset (Reset OneNAND command) or warm reset (RP pin low) */
static void onenand_reset(OneNANDState *s, int cold)
{
@ -167,11 +215,17 @@ static void onenand_reset(OneNANDState *s, int cold)
/* Lock the whole flash */
memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks);
if (s->bdrv && bdrv_read(s->bdrv, 0, s->boot[0], 8) < 0)
hw_error("%s: Loading the BootRAM failed.\n", __FUNCTION__);
if (s->bdrv_cur && bdrv_read(s->bdrv_cur, 0, s->boot[0], 8) < 0) {
hw_error("%s: Loading the BootRAM failed.\n", __func__);
}
}
}
static void onenand_system_reset(DeviceState *dev)
{
onenand_reset(FROM_SYSBUS(OneNANDState, sysbus_from_qdev(dev)), 1);
}
static inline int onenand_load_main(OneNANDState *s, int sec, int secn,
void *dest)
{
@ -191,8 +245,8 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn,
int result = 0;
if (secn > 0) {
uint32_t size = (uint32_t) secn * 512;
const uint8_t *sp = (const uint8_t *) src;
uint32_t size = (uint32_t)secn * 512;
const uint8_t *sp = (const uint8_t *)src;
uint8_t *dp = 0;
if (s->bdrv_cur) {
dp = g_malloc(size);
@ -203,7 +257,7 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn,
if (sec + secn > s->secs_cur) {
result = 1;
} else {
dp = (uint8_t *) s->current + (sec << 9);
dp = (uint8_t *)s->current + (sec << 9);
}
}
if (!result) {
@ -245,13 +299,13 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
{
int result = 0;
if (secn > 0) {
const uint8_t *sp = (const uint8_t *) src;
const uint8_t *sp = (const uint8_t *)src;
uint8_t *dp = 0, *dpp = 0;
if (s->bdrv_cur) {
dp = g_malloc(512);
if (!dp || bdrv_read(s->bdrv_cur,
s->secs_cur + (sec >> 5),
dp, 1) < 0) {
s->secs_cur + (sec >> 5),
dp, 1) < 0) {
result = 1;
} else {
dpp = dp + ((sec & 31) << 4);
@ -270,7 +324,7 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
}
if (s->bdrv_cur) {
result = bdrv_write(s->bdrv_cur, s->secs_cur + (sec >> 5),
dp, 1) < 0;
dp, 1) < 0;
}
}
if (dp) {
@ -326,7 +380,7 @@ fail:
return 1;
}
static void onenand_command(OneNANDState *s, int cmd)
static void onenand_command(OneNANDState *s)
{
int b;
int sec;
@ -346,7 +400,7 @@ static void onenand_command(OneNANDState *s, int cmd)
s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \
buf += (s->bufaddr & 3) << 4;
switch (cmd) {
switch (s->command) {
case 0x00: /* Load single/multiple sector data unit into buffer */
SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE)
@ -527,7 +581,7 @@ static void onenand_command(OneNANDState *s, int cmd)
s->status |= ONEN_ERR_CMD;
s->intstatus |= ONEN_INT;
fprintf(stderr, "%s: unknown OneNAND command %x\n",
__FUNCTION__, cmd);
__func__, s->command);
}
onenand_intr_update(s);
@ -659,7 +713,7 @@ static void onenand_write(void *opaque, target_phys_addr_t addr,
if (s->intstatus & (1 << 15))
break;
s->command = value;
onenand_command(s, s->command);
onenand_command(s);
break;
case 0xf221: /* System Configuration 1 */
s->config[0] = value;
@ -700,30 +754,25 @@ static const MemoryRegionOps onenand_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
void *onenand_init(BlockDriverState *bdrv,
uint16_t man_id, uint16_t dev_id, uint16_t ver_id,
int regshift, qemu_irq irq)
static int onenand_initfn(SysBusDevice *dev)
{
OneNANDState *s = (OneNANDState *) g_malloc0(sizeof(*s));
uint32_t size = 1 << (24 + ((dev_id >> 4) & 7));
OneNANDState *s = (OneNANDState *)dev;
uint32_t size = 1 << (24 + ((s->id.dev >> 4) & 7));
void *ram;
s->shift = regshift;
s->intr = irq;
s->base = (target_phys_addr_t)-1;
s->rdy = NULL;
s->id.man = man_id;
s->id.dev = dev_id;
s->id.ver = ver_id;
s->blocks = size >> BLOCK_SHIFT;
s->secs = size >> 9;
s->blockwp = g_malloc(s->blocks);
s->density_mask = (dev_id & 0x08) ? (1 << (6 + ((dev_id >> 4) & 7))) : 0;
s->density_mask = (s->id.dev & 0x08)
? (1 << (6 + ((s->id.dev >> 4) & 7))) : 0;
memory_region_init_io(&s->iomem, &onenand_ops, s, "onenand",
0x10000 << s->shift);
s->bdrv = bdrv;
if (!s->bdrv) {
s->image = memset(g_malloc(size + (size >> 5)),
0xff, size + (size >> 5));
0xff, size + (size >> 5));
} else {
s->bdrv_cur = s->bdrv;
}
s->otp = memset(g_malloc((64 + 2) << PAGE_SHIFT),
0xff, (64 + 2) << PAGE_SHIFT);
@ -736,15 +785,40 @@ void *onenand_init(BlockDriverState *bdrv,
s->data[1][0] = ram + ((0x0200 + (1 << (PAGE_SHIFT - 1))) << s->shift);
s->data[1][1] = ram + ((0x8010 + (1 << (PAGE_SHIFT - 6))) << s->shift);
onenand_mem_setup(s);
onenand_reset(s, 1);
return s;
sysbus_init_irq(dev, &s->intr);
sysbus_init_mmio_region(dev, &s->container);
vmstate_register(&dev->qdev,
((s->shift & 0x7f) << 24)
| ((s->id.man & 0xff) << 16)
| ((s->id.dev & 0xff) << 8)
| (s->id.ver & 0xff),
&vmstate_onenand, s);
return 0;
}
void *onenand_raw_otp(void *opaque)
static SysBusDeviceInfo onenand_info = {
.init = onenand_initfn,
.qdev.name = "onenand",
.qdev.size = sizeof(OneNANDState),
.qdev.reset = onenand_system_reset,
.qdev.props = (Property[]) {
DEFINE_PROP_UINT16("manufacturer_id", OneNANDState, id.man, 0),
DEFINE_PROP_UINT16("device_id", OneNANDState, id.dev, 0),
DEFINE_PROP_UINT16("version_id", OneNANDState, id.ver, 0),
DEFINE_PROP_INT32("shift", OneNANDState, shift, 0),
DEFINE_PROP_DRIVE("drive", OneNANDState, bdrv),
DEFINE_PROP_END_OF_LIST()
}
};
static void onenand_register_device(void)
{
OneNANDState *s = (OneNANDState *) opaque;
return s->otp;
sysbus_register_withprop(&onenand_info);
}
void *onenand_raw_otp(DeviceState *onenand_device)
{
return FROM_SYSBUS(OneNANDState, sysbus_from_qdev(onenand_device))->otp;
}
device_init(onenand_register_device)

View File

@ -131,6 +131,11 @@ void sysbus_init_mmio_region(SysBusDevice *dev, MemoryRegion *memory)
dev->mmio[n].memory = memory;
}
MemoryRegion *sysbus_mmio_get_region(SysBusDevice *dev, int n)
{
return dev->mmio[n].memory;
}
void sysbus_init_ioports(SysBusDevice *dev, pio_addr_t ioport, pio_addr_t size)
{
pio_addr_t i;

View File

@ -50,6 +50,7 @@ void sysbus_init_mmio(SysBusDevice *dev, target_phys_addr_t size,
void sysbus_init_mmio_cb2(SysBusDevice *dev,
mmio_mapfunc cb, mmio_mapfunc unmap);
void sysbus_init_mmio_region(SysBusDevice *dev, MemoryRegion *memory);
MemoryRegion *sysbus_mmio_get_region(SysBusDevice *dev, int n);
void sysbus_init_irq(SysBusDevice *dev, qemu_irq *p);
void sysbus_pass_irq(SysBusDevice *dev, SysBusDevice *target);
void sysbus_init_ioports(SysBusDevice *dev, pio_addr_t ioport, pio_addr_t size);