Merge remote-tracking branch 'upstream/master' into main
This commit is contained in:
commit
ef51b76ce6
@ -146,6 +146,8 @@ static void qemu_chr_open_stdio(Chardev *chr,
|
||||
bool *be_opened,
|
||||
Error **errp)
|
||||
{
|
||||
ChardevStdio *opts = backend->u.stdio.data;
|
||||
bool stdio_allow_signal = !opts->has_signal || opts->signal;
|
||||
WinStdioChardev *stdio = WIN_STDIO_CHARDEV(chr);
|
||||
DWORD dwMode;
|
||||
int is_console = 0;
|
||||
@ -193,7 +195,11 @@ static void qemu_chr_open_stdio(Chardev *chr,
|
||||
if (is_console) {
|
||||
/* set the terminal in raw mode */
|
||||
/* ENABLE_QUICK_EDIT_MODE | ENABLE_EXTENDED_FLAGS */
|
||||
if (stdio_allow_signal) {
|
||||
dwMode |= ENABLE_PROCESSED_INPUT;
|
||||
} else {
|
||||
dwMode &= ~ENABLE_PROCESSED_INPUT;
|
||||
}
|
||||
}
|
||||
|
||||
SetConsoleMode(stdio->hStdIn, dwMode);
|
||||
|
@ -941,7 +941,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
|
||||
gic_update(s);
|
||||
}
|
||||
|
||||
static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
|
||||
static uint8_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
|
||||
{
|
||||
GICState *s = (GICState *)opaque;
|
||||
uint32_t res;
|
||||
@ -955,6 +955,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
|
||||
cm = 1 << cpu;
|
||||
if (offset < 0x100) {
|
||||
if (offset == 0) { /* GICD_CTLR */
|
||||
/* We rely here on the only non-zero bits being in byte 0 */
|
||||
if (s->security_extn && !attrs.secure) {
|
||||
/* The NS bank of this register is just an alias of the
|
||||
* EnableGrp1 bit in the S bank version.
|
||||
@ -964,13 +965,26 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
|
||||
return s->ctlr;
|
||||
}
|
||||
}
|
||||
if (offset == 4)
|
||||
/* Interrupt Controller Type Register */
|
||||
return ((s->num_irq / 32) - 1)
|
||||
| ((s->num_cpu - 1) << 5)
|
||||
| (s->security_extn << 10);
|
||||
if (offset < 0x08)
|
||||
if (offset == 4) {
|
||||
/* GICD_TYPER byte 0 */
|
||||
return ((s->num_irq / 32) - 1) | ((s->num_cpu - 1) << 5);
|
||||
}
|
||||
if (offset == 5) {
|
||||
/* GICD_TYPER byte 1 */
|
||||
return (s->security_extn << 2);
|
||||
}
|
||||
if (offset == 8) {
|
||||
/* GICD_IIDR byte 0 */
|
||||
return 0x3b; /* Arm JEP106 identity */
|
||||
}
|
||||
if (offset == 9) {
|
||||
/* GICD_IIDR byte 1 */
|
||||
return 0x04; /* Arm JEP106 identity */
|
||||
}
|
||||
if (offset < 0x0c) {
|
||||
/* All other bytes in this range are RAZ */
|
||||
return 0;
|
||||
}
|
||||
if (offset >= 0x80) {
|
||||
/* Interrupt Group Registers: these RAZ/WI if this is an NS
|
||||
* access to a GIC with the security extensions, or if the GIC
|
||||
|
@ -77,7 +77,6 @@
|
||||
( ( input ) & ( size - 1 ) )
|
||||
|
||||
#define ETHER_TYPE_LEN 2
|
||||
#define ETH_MTU 1500
|
||||
|
||||
#define VLAN_TCI_LEN 2
|
||||
#define VLAN_HLEN (ETHER_TYPE_LEN + VLAN_TCI_LEN)
|
||||
@ -1934,8 +1933,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
#define CP_TX_LS (1<<28)
|
||||
/* large send packet flag */
|
||||
#define CP_TX_LGSEN (1<<27)
|
||||
/* large send MSS mask, bits 16...25 */
|
||||
#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1)
|
||||
/* large send MSS mask, bits 16...26 */
|
||||
#define CP_TC_LGSEN_MSS_SHIFT 16
|
||||
#define CP_TC_LGSEN_MSS_MASK ((1 << 11) - 1)
|
||||
|
||||
/* IP checksum offload flag */
|
||||
#define CP_TX_IPCS (1<<18)
|
||||
@ -2027,18 +2027,21 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
s->currCPlusTxDesc = 0;
|
||||
}
|
||||
|
||||
/* Build the Tx Status Descriptor */
|
||||
uint32_t tx_status = txdw0;
|
||||
|
||||
/* transfer ownership to target */
|
||||
txdw0 &= ~CP_TX_OWN;
|
||||
tx_status &= ~CP_TX_OWN;
|
||||
|
||||
/* reset error indicator bits */
|
||||
txdw0 &= ~CP_TX_STATUS_UNF;
|
||||
txdw0 &= ~CP_TX_STATUS_TES;
|
||||
txdw0 &= ~CP_TX_STATUS_OWC;
|
||||
txdw0 &= ~CP_TX_STATUS_LNKF;
|
||||
txdw0 &= ~CP_TX_STATUS_EXC;
|
||||
tx_status &= ~CP_TX_STATUS_UNF;
|
||||
tx_status &= ~CP_TX_STATUS_TES;
|
||||
tx_status &= ~CP_TX_STATUS_OWC;
|
||||
tx_status &= ~CP_TX_STATUS_LNKF;
|
||||
tx_status &= ~CP_TX_STATUS_EXC;
|
||||
|
||||
/* update ring data */
|
||||
val = cpu_to_le32(txdw0);
|
||||
val = cpu_to_le32(tx_status);
|
||||
pci_dma_write(d, cplus_tx_ring_desc, (uint8_t *)&val, 4);
|
||||
|
||||
/* Now decide if descriptor being processed is holding the last segment of packet */
|
||||
@ -2132,7 +2135,7 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
}
|
||||
ip_data_len -= hlen;
|
||||
|
||||
if (txdw0 & CP_TX_IPCS)
|
||||
if (!(txdw0 & CP_TX_LGSEN) && (txdw0 & CP_TX_IPCS))
|
||||
{
|
||||
DPRINTF("+++ C+ mode need IP checksum\n");
|
||||
|
||||
@ -2149,10 +2152,11 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
goto skip_offload;
|
||||
}
|
||||
|
||||
int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK;
|
||||
int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) &
|
||||
CP_TC_LGSEN_MSS_MASK;
|
||||
|
||||
DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d "
|
||||
"frame data %d specified MSS=%d\n", ETH_MTU,
|
||||
DPRINTF("+++ C+ mode offloaded task TSO IP data %d "
|
||||
"frame data %d specified MSS=%d\n",
|
||||
ip_data_len, saved_size - ETH_HLEN, large_send_mss);
|
||||
|
||||
int tcp_send_offset = 0;
|
||||
@ -2177,25 +2181,22 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
goto skip_offload;
|
||||
}
|
||||
|
||||
/* ETH_MTU = ip header len + tcp header len + payload */
|
||||
int tcp_data_len = ip_data_len - tcp_hlen;
|
||||
int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen;
|
||||
|
||||
DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP "
|
||||
"data len %d TCP chunk size %d\n", ip_data_len,
|
||||
tcp_hlen, tcp_data_len, tcp_chunk_size);
|
||||
"data len %d\n", ip_data_len, tcp_hlen, tcp_data_len);
|
||||
|
||||
/* note the cycle below overwrites IP header data,
|
||||
but restores it from saved_ip_header before sending packet */
|
||||
|
||||
int is_last_frame = 0;
|
||||
|
||||
for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size)
|
||||
for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += large_send_mss)
|
||||
{
|
||||
uint16_t chunk_size = tcp_chunk_size;
|
||||
uint16_t chunk_size = large_send_mss;
|
||||
|
||||
/* check if this is the last frame */
|
||||
if (tcp_send_offset + tcp_chunk_size >= tcp_data_len)
|
||||
if (tcp_send_offset + large_send_mss >= tcp_data_len)
|
||||
{
|
||||
is_last_frame = 1;
|
||||
chunk_size = tcp_data_len - tcp_send_offset;
|
||||
@ -2244,7 +2245,7 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size);
|
||||
|
||||
/* increment IP id for subsequent frames */
|
||||
ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id));
|
||||
ip->ip_id = cpu_to_be16(tcp_send_offset/large_send_mss + be16_to_cpu(ip->ip_id));
|
||||
|
||||
ip->ip_sum = 0;
|
||||
ip->ip_sum = ip_checksum(eth_payload_data, hlen);
|
||||
@ -2265,7 +2266,7 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
|
||||
/* Stop sending this frame */
|
||||
saved_size = 0;
|
||||
}
|
||||
else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS))
|
||||
else if (!(txdw0 & CP_TX_LGSEN) && (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS)))
|
||||
{
|
||||
DPRINTF("+++ C+ mode need TCP or UDP checksum\n");
|
||||
|
||||
|
@ -65,7 +65,7 @@ enum {
|
||||
REG_SD_DLBA = 0x84, /* Descriptor List Base Address */
|
||||
REG_SD_IDST = 0x88, /* Internal DMA Controller Status */
|
||||
REG_SD_IDIE = 0x8C, /* Internal DMA Controller IRQ Enable */
|
||||
REG_SD_THLDC = 0x100, /* Card Threshold Control */
|
||||
REG_SD_THLDC = 0x100, /* Card Threshold Control / FIFO (sun4i only)*/
|
||||
REG_SD_DSBD = 0x10C, /* eMMC DDR Start Bit Detection Control */
|
||||
REG_SD_RES_CRC = 0x110, /* Response CRC from card/eMMC */
|
||||
REG_SD_DATA7_CRC = 0x114, /* CRC Data 7 from card/eMMC */
|
||||
@ -415,10 +415,29 @@ static void allwinner_sdhost_dma(AwSdHostState *s)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t allwinner_sdhost_fifo_read(AwSdHostState *s)
|
||||
{
|
||||
uint32_t res = 0;
|
||||
|
||||
if (sdbus_data_ready(&s->sdbus)) {
|
||||
sdbus_read_data(&s->sdbus, &res, sizeof(uint32_t));
|
||||
le32_to_cpus(&res);
|
||||
allwinner_sdhost_update_transfer_cnt(s, sizeof(uint32_t));
|
||||
allwinner_sdhost_auto_stop(s);
|
||||
allwinner_sdhost_update_irq(s);
|
||||
} else {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: no data ready on SD bus\n",
|
||||
__func__);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
AwSdHostState *s = AW_SDHOST(opaque);
|
||||
AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s);
|
||||
uint32_t res = 0;
|
||||
|
||||
switch (offset) {
|
||||
@ -508,8 +527,12 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset,
|
||||
case REG_SD_IDIE: /* Internal DMA Controller Interrupt Enable */
|
||||
res = s->dmac_irq;
|
||||
break;
|
||||
case REG_SD_THLDC: /* Card Threshold Control */
|
||||
case REG_SD_THLDC: /* Card Threshold Control or FIFO register (sun4i) */
|
||||
if (sc->is_sun4i) {
|
||||
res = allwinner_sdhost_fifo_read(s);
|
||||
} else {
|
||||
res = s->card_threshold;
|
||||
}
|
||||
break;
|
||||
case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */
|
||||
res = s->startbit_detect;
|
||||
@ -531,16 +554,7 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset,
|
||||
res = s->status_crc;
|
||||
break;
|
||||
case REG_SD_FIFO: /* Read/Write FIFO */
|
||||
if (sdbus_data_ready(&s->sdbus)) {
|
||||
sdbus_read_data(&s->sdbus, &res, sizeof(uint32_t));
|
||||
le32_to_cpus(&res);
|
||||
allwinner_sdhost_update_transfer_cnt(s, sizeof(uint32_t));
|
||||
allwinner_sdhost_auto_stop(s);
|
||||
allwinner_sdhost_update_irq(s);
|
||||
} else {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: no data ready on SD bus\n",
|
||||
__func__);
|
||||
}
|
||||
res = allwinner_sdhost_fifo_read(s);
|
||||
break;
|
||||
default:
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %"
|
||||
@ -553,11 +567,20 @@ static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset,
|
||||
return res;
|
||||
}
|
||||
|
||||
static void allwinner_sdhost_fifo_write(AwSdHostState *s, uint64_t value)
|
||||
{
|
||||
uint32_t u32 = cpu_to_le32(value);
|
||||
sdbus_write_data(&s->sdbus, &u32, sizeof(u32));
|
||||
allwinner_sdhost_update_transfer_cnt(s, sizeof(u32));
|
||||
allwinner_sdhost_auto_stop(s);
|
||||
allwinner_sdhost_update_irq(s);
|
||||
}
|
||||
|
||||
static void allwinner_sdhost_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
AwSdHostState *s = AW_SDHOST(opaque);
|
||||
uint32_t u32;
|
||||
AwSdHostClass *sc = AW_SDHOST_GET_CLASS(s);
|
||||
|
||||
trace_allwinner_sdhost_write(offset, value, size);
|
||||
|
||||
@ -657,18 +680,18 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset,
|
||||
s->dmac_irq = value;
|
||||
allwinner_sdhost_update_irq(s);
|
||||
break;
|
||||
case REG_SD_THLDC: /* Card Threshold Control */
|
||||
case REG_SD_THLDC: /* Card Threshold Control or FIFO (sun4i) */
|
||||
if (sc->is_sun4i) {
|
||||
allwinner_sdhost_fifo_write(s, value);
|
||||
} else {
|
||||
s->card_threshold = value;
|
||||
}
|
||||
break;
|
||||
case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */
|
||||
s->startbit_detect = value;
|
||||
break;
|
||||
case REG_SD_FIFO: /* Read/Write FIFO */
|
||||
u32 = cpu_to_le32(value);
|
||||
sdbus_write_data(&s->sdbus, &u32, sizeof(u32));
|
||||
allwinner_sdhost_update_transfer_cnt(s, sizeof(u32));
|
||||
allwinner_sdhost_auto_stop(s);
|
||||
allwinner_sdhost_update_irq(s);
|
||||
allwinner_sdhost_fifo_write(s, value);
|
||||
break;
|
||||
case REG_SD_RES_CRC: /* Response CRC from card/eMMC */
|
||||
case REG_SD_DATA7_CRC: /* CRC Data 7 from card/eMMC */
|
||||
@ -834,12 +857,14 @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
|
||||
sc->max_desc_size = 8 * KiB;
|
||||
sc->is_sun4i = true;
|
||||
}
|
||||
|
||||
static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
AwSdHostClass *sc = AW_SDHOST_CLASS(klass);
|
||||
sc->max_desc_size = 64 * KiB;
|
||||
sc->is_sun4i = false;
|
||||
}
|
||||
|
||||
static const TypeInfo allwinner_sdhost_info = {
|
||||
|
@ -42,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bmap_set: set clear bitmap for the page range
|
||||
* clear_bmap_set: set clear bitmap for the page range. Must be with
|
||||
* bitmap_mutex held.
|
||||
*
|
||||
* @rb: the ramblock to operate on
|
||||
* @start: the start page number
|
||||
@ -55,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
|
||||
{
|
||||
uint8_t shift = rb->clear_bmap_shift;
|
||||
|
||||
bitmap_set_atomic(rb->clear_bmap, start >> shift,
|
||||
clear_bmap_size(npages, shift));
|
||||
bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
|
||||
* clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
|
||||
* Must be with bitmap_mutex held.
|
||||
*
|
||||
* @rb: the ramblock to operate on
|
||||
* @page: the page number to check
|
||||
@ -71,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
|
||||
{
|
||||
uint8_t shift = rb->clear_bmap_shift;
|
||||
|
||||
return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
|
||||
return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
|
||||
}
|
||||
|
||||
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
||||
|
@ -53,6 +53,9 @@ struct RAMBlock {
|
||||
* and split clearing of dirty bitmap on the remote node (e.g.,
|
||||
* KVM). The bitmap will be set only when doing global sync.
|
||||
*
|
||||
* It is only used during src side of ram migration, and it is
|
||||
* protected by the global ram_state.bitmap_mutex.
|
||||
*
|
||||
* NOTE: this bitmap is different comparing to the other bitmaps
|
||||
* in that one bit can represent multiple guest pages (which is
|
||||
* decided by the `clear_bmap_shift' variable below). On
|
||||
|
@ -130,6 +130,7 @@ struct AwSdHostClass {
|
||||
|
||||
/** Maximum buffer size in bytes per DMA descriptor */
|
||||
size_t max_desc_size;
|
||||
bool is_sun4i;
|
||||
|
||||
};
|
||||
|
||||
|
@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
|
||||
void bitmap_set_atomic(unsigned long *map, long i, long len);
|
||||
void bitmap_clear(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
|
||||
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
|
||||
long nr);
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
|
@ -880,8 +880,8 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
|
||||
blk_mig_unlock();
|
||||
|
||||
/* Report at least one block pending during bulk phase */
|
||||
if (pending <= max_size && !block_mig_state.bulk_completed) {
|
||||
pending = max_size + BLK_MIG_BLOCK_SIZE;
|
||||
if (!pending && !block_mig_state.bulk_completed) {
|
||||
pending = BLK_MIG_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
trace_migration_block_save_pending(pending);
|
||||
|
@ -62,7 +62,8 @@ qio_channel_block_readv(QIOChannel *ioc,
|
||||
qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov);
|
||||
ret = bdrv_readv_vmstate(bioc->bs, &qiov, bioc->offset);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
error_setg_errno(errp, -ret, "bdrv_readv_vmstate failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bioc->offset += qiov.size;
|
||||
@ -86,7 +87,8 @@ qio_channel_block_writev(QIOChannel *ioc,
|
||||
qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov);
|
||||
ret = bdrv_writev_vmstate(bioc->bs, &qiov, bioc->offset);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
error_setg_errno(errp, -ret, "bdrv_writev_vmstate failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bioc->offset += qiov.size;
|
||||
|
@ -1337,6 +1337,24 @@ static bool migrate_caps_check(bool *cap_list,
|
||||
error_setg(errp, "Postcopy preempt requires postcopy-ram");
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Preempt mode requires urgent pages to be sent in separate
|
||||
* channel, OTOH compression logic will disorder all pages into
|
||||
* different compression channels, which is not compatible with the
|
||||
* preempt assumptions on channel assignments.
|
||||
*/
|
||||
if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
|
||||
error_setg(errp, "Postcopy preempt not compatible with compress");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (cap_list[MIGRATION_CAPABILITY_MULTIFD]) {
|
||||
if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
|
||||
error_setg(errp, "Multifd is not compatible with compress");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -566,6 +566,23 @@ void multifd_save_cleanup(void)
|
||||
multifd_send_state = NULL;
|
||||
}
|
||||
|
||||
static int multifd_zero_copy_flush(QIOChannel *c)
|
||||
{
|
||||
int ret;
|
||||
Error *err = NULL;
|
||||
|
||||
ret = qio_channel_flush(c, &err);
|
||||
if (ret < 0) {
|
||||
error_report_err(err);
|
||||
return -1;
|
||||
}
|
||||
if (ret == 1) {
|
||||
dirty_sync_missed_zero_copy();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int multifd_send_sync_main(QEMUFile *f)
|
||||
{
|
||||
int i;
|
||||
@ -616,17 +633,8 @@ int multifd_send_sync_main(QEMUFile *f)
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
qemu_sem_post(&p->sem);
|
||||
|
||||
if (flush_zero_copy && p->c) {
|
||||
int ret;
|
||||
Error *err = NULL;
|
||||
|
||||
ret = qio_channel_flush(p->c, &err);
|
||||
if (ret < 0) {
|
||||
error_report_err(err);
|
||||
if (flush_zero_copy && p->c && (multifd_zero_copy_flush(p->c) < 0)) {
|
||||
return -1;
|
||||
} else if (ret == 1) {
|
||||
dirty_sync_missed_zero_copy();
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
|
@ -79,6 +79,30 @@ int qemu_file_shutdown(QEMUFile *f)
|
||||
int ret = 0;
|
||||
|
||||
f->shutdown = true;
|
||||
|
||||
/*
|
||||
* We must set qemufile error before the real shutdown(), otherwise
|
||||
* there can be a race window where we thought IO all went though
|
||||
* (because last_error==NULL) but actually IO has already stopped.
|
||||
*
|
||||
* If without correct ordering, the race can happen like this:
|
||||
*
|
||||
* page receiver other thread
|
||||
* ------------- ------------
|
||||
* qemu_get_buffer()
|
||||
* do shutdown()
|
||||
* returns 0 (buffer all zero)
|
||||
* (we didn't check this retcode)
|
||||
* try to detect IO error
|
||||
* last_error==NULL, IO okay
|
||||
* install ALL-ZERO page
|
||||
* set last_error
|
||||
* --> guest crash!
|
||||
*/
|
||||
if (!f->last_error) {
|
||||
qemu_file_set_error(f, -EIO);
|
||||
}
|
||||
|
||||
if (!qio_channel_has_feature(f->ioc,
|
||||
QIO_CHANNEL_FEATURE_SHUTDOWN)) {
|
||||
return -ENOSYS;
|
||||
@ -88,9 +112,6 @@ int qemu_file_shutdown(QEMUFile *f)
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (!f->last_error) {
|
||||
qemu_file_set_error(f, -EIO);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2305,13 +2305,12 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not use multifd for:
|
||||
* 1. Compression as the first page in the new block should be posted out
|
||||
* before sending the compressed page
|
||||
* 2. In postcopy as one whole host page should be placed
|
||||
* Do not use multifd in postcopy as one whole host page should be
|
||||
* placed. Meanwhile postcopy requires atomic update of pages, so even
|
||||
* if host page size == guest page size the dest guest during run may
|
||||
* still see partially copied pages which is data corruption.
|
||||
*/
|
||||
if (!save_page_use_compression(rs) && migrate_use_multifd()
|
||||
&& !migration_in_postcopy()) {
|
||||
if (migrate_use_multifd() && !migration_in_postcopy()) {
|
||||
return ram_save_multifd_page(rs, block, offset);
|
||||
}
|
||||
|
||||
@ -2546,14 +2545,22 @@ static int ram_find_and_save_block(RAMState *rs)
|
||||
return pages;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always keep last_seen_block/last_page valid during this procedure,
|
||||
* because find_dirty_block() relies on these values (e.g., we compare
|
||||
* last_seen_block with pss.block to see whether we searched all the
|
||||
* ramblocks) to detect the completion of migration. Having NULL value
|
||||
* of last_seen_block can conditionally cause below loop to run forever.
|
||||
*/
|
||||
if (!rs->last_seen_block) {
|
||||
rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks);
|
||||
rs->last_page = 0;
|
||||
}
|
||||
|
||||
pss.block = rs->last_seen_block;
|
||||
pss.page = rs->last_page;
|
||||
pss.complete_round = false;
|
||||
|
||||
if (!pss.block) {
|
||||
pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
|
||||
}
|
||||
|
||||
do {
|
||||
again = true;
|
||||
found = get_queued_page(rs, &pss);
|
||||
|
@ -1222,6 +1222,14 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
ps = MIN(ps, param.ps);
|
||||
assert(ps < ARRAY_SIZE(pamax_map));
|
||||
outputsize = pamax_map[ps];
|
||||
|
||||
/*
|
||||
* With LPA2, the effective output address (OA) size is at most 48 bits
|
||||
* unless TCR.DS == 1
|
||||
*/
|
||||
if (!param.ds && param.gran != Gran64K) {
|
||||
outputsize = MIN(outputsize, 48);
|
||||
}
|
||||
} else {
|
||||
param = aa32_va_parameters(env, address, mmu_idx);
|
||||
level = 1;
|
||||
|
@ -64,7 +64,7 @@ class BootLinuxAarch64(LinuxTest):
|
||||
:avocado: tags=machine:virt
|
||||
:avocado: tags=machine:gic-version=2
|
||||
"""
|
||||
timeout = 240
|
||||
timeout = 720
|
||||
|
||||
def add_common_args(self):
|
||||
self.vm.add_args('-bios',
|
||||
|
@ -240,6 +240,51 @@ void bitmap_clear(unsigned long *map, long start, long nr)
|
||||
}
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
const long size = start + nr;
|
||||
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||
bool dirty = false;
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
/* First word */
|
||||
if (nr - bits_to_clear > 0) {
|
||||
if ((*p) & mask_to_clear) {
|
||||
dirty = true;
|
||||
}
|
||||
*p &= ~mask_to_clear;
|
||||
nr -= bits_to_clear;
|
||||
bits_to_clear = BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
|
||||
/* Full words */
|
||||
if (bits_to_clear == BITS_PER_LONG) {
|
||||
while (nr >= BITS_PER_LONG) {
|
||||
if (*p) {
|
||||
dirty = true;
|
||||
*p = 0;
|
||||
}
|
||||
nr -= BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||
if ((*p) & mask_to_clear) {
|
||||
dirty = true;
|
||||
}
|
||||
*p &= ~mask_to_clear;
|
||||
}
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
|
Loading…
x
Reference in New Issue
Block a user