2012-10-12 20:53:58 +02:00
|
|
|
/** @file
|
|
|
|
|
|
|
|
Utility functions used by virtio device drivers.
|
|
|
|
|
|
|
|
Copyright (C) 2012, Red Hat, Inc.
|
2013-12-11 17:58:22 +01:00
|
|
|
Portion of Copyright (C) 2013, ARM Ltd.
|
2012-10-12 20:53:58 +02:00
|
|
|
|
|
|
|
This program and the accompanying materials are licensed and made available
|
|
|
|
under the terms and conditions of the BSD License which accompanies this
|
|
|
|
distribution. The full text of the license may be found at
|
|
|
|
http://opensource.org/licenses/bsd-license.php
|
|
|
|
|
|
|
|
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
|
|
|
|
WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
|
|
|
|
|
|
|
**/
|
|
|
|
|
2012-10-12 20:54:35 +02:00
|
|
|
#include <Library/BaseLib.h>
|
2012-10-12 20:53:58 +02:00
|
|
|
#include <Library/BaseMemoryLib.h>
|
|
|
|
#include <Library/DebugLib.h>
|
|
|
|
#include <Library/MemoryAllocationLib.h>
|
2012-10-12 20:54:35 +02:00
|
|
|
#include <Library/UefiBootServicesTableLib.h>
|
2012-10-12 20:53:58 +02:00
|
|
|
|
|
|
|
#include <Library/VirtioLib.h>
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
|
|
Configure a virtio ring.
|
|
|
|
|
|
|
|
This function sets up internal storage (the guest-host communication area)
|
|
|
|
and lays out several "navigation" (ie. no-ownership) pointers to parts of
|
|
|
|
that storage.
|
|
|
|
|
|
|
|
Relevant sections from the virtio-0.9.5 spec:
|
|
|
|
- 1.1 Virtqueues,
|
|
|
|
- 2.3 Virtqueue Configuration.
|
|
|
|
|
|
|
|
@param[in] The number of descriptors to allocate for the
|
|
|
|
virtio ring, as requested by the host.
|
|
|
|
|
|
|
|
@param[out] Ring The virtio ring to set up.
|
|
|
|
|
|
|
|
@retval EFI_OUT_OF_RESOURCES AllocatePages() failed to allocate contiguous
|
|
|
|
pages for the requested QueueSize. Fields of
|
|
|
|
Ring have indeterminate value.
|
|
|
|
|
|
|
|
@retval EFI_SUCCESS Allocation and setup successful. Ring->Base
|
|
|
|
(and nothing else) is responsible for
|
|
|
|
deallocation.
|
|
|
|
|
|
|
|
**/
|
|
|
|
EFI_STATUS
|
|
|
|
EFIAPI
|
|
|
|
VirtioRingInit (
|
|
|
|
IN UINT16 QueueSize,
|
|
|
|
OUT VRING *Ring
|
|
|
|
)
|
|
|
|
{
|
|
|
|
UINTN RingSize;
|
|
|
|
volatile UINT8 *RingPagesPtr;
|
|
|
|
|
|
|
|
RingSize = ALIGN_VALUE (
|
|
|
|
sizeof *Ring->Desc * QueueSize +
|
|
|
|
sizeof *Ring->Avail.Flags +
|
|
|
|
sizeof *Ring->Avail.Idx +
|
|
|
|
sizeof *Ring->Avail.Ring * QueueSize +
|
|
|
|
sizeof *Ring->Avail.UsedEvent,
|
|
|
|
EFI_PAGE_SIZE);
|
|
|
|
|
|
|
|
RingSize += ALIGN_VALUE (
|
|
|
|
sizeof *Ring->Used.Flags +
|
|
|
|
sizeof *Ring->Used.Idx +
|
|
|
|
sizeof *Ring->Used.UsedElem * QueueSize +
|
|
|
|
sizeof *Ring->Used.AvailEvent,
|
|
|
|
EFI_PAGE_SIZE);
|
|
|
|
|
|
|
|
Ring->NumPages = EFI_SIZE_TO_PAGES (RingSize);
|
|
|
|
Ring->Base = AllocatePages (Ring->NumPages);
|
|
|
|
if (Ring->Base == NULL) {
|
|
|
|
return EFI_OUT_OF_RESOURCES;
|
|
|
|
}
|
|
|
|
SetMem (Ring->Base, RingSize, 0x00);
|
|
|
|
RingPagesPtr = Ring->Base;
|
|
|
|
|
|
|
|
Ring->Desc = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Desc * QueueSize;
|
|
|
|
|
|
|
|
Ring->Avail.Flags = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Avail.Flags;
|
|
|
|
|
|
|
|
Ring->Avail.Idx = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Avail.Idx;
|
|
|
|
|
|
|
|
Ring->Avail.Ring = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Avail.Ring * QueueSize;
|
|
|
|
|
|
|
|
Ring->Avail.UsedEvent = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Avail.UsedEvent;
|
|
|
|
|
|
|
|
RingPagesPtr = (volatile UINT8 *) Ring->Base +
|
|
|
|
ALIGN_VALUE (RingPagesPtr - (volatile UINT8 *) Ring->Base,
|
|
|
|
EFI_PAGE_SIZE);
|
|
|
|
|
|
|
|
Ring->Used.Flags = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Used.Flags;
|
|
|
|
|
|
|
|
Ring->Used.Idx = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Used.Idx;
|
|
|
|
|
|
|
|
Ring->Used.UsedElem = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Used.UsedElem * QueueSize;
|
|
|
|
|
|
|
|
Ring->Used.AvailEvent = (volatile VOID *) RingPagesPtr;
|
|
|
|
RingPagesPtr += sizeof *Ring->Used.AvailEvent;
|
|
|
|
|
|
|
|
Ring->QueueSize = QueueSize;
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
|
|
Tear down the internal resources of a configured virtio ring.
|
|
|
|
|
|
|
|
The caller is responsible to stop the host from using this ring before
|
|
|
|
invoking this function: the VSTAT_DRIVER_OK bit must be clear in
|
|
|
|
VhdrDeviceStatus.
|
|
|
|
|
|
|
|
@param[out] Ring The virtio ring to clean up.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
EFIAPI
|
|
|
|
VirtioRingUninit (
|
|
|
|
IN OUT VRING *Ring
|
|
|
|
)
|
|
|
|
{
|
|
|
|
FreePages (Ring->Base, Ring->NumPages);
|
|
|
|
SetMem (Ring, sizeof *Ring, 0x00);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 20:54:35 +02:00
|
|
|
/**
|
|
|
|
|
|
|
|
Turn off interrupt notifications from the host, and prepare for appending
|
|
|
|
multiple descriptors to the virtio ring.
|
|
|
|
|
|
|
|
The calling driver must be in VSTAT_DRIVER_OK state.
|
|
|
|
|
2013-05-15 08:22:15 +02:00
|
|
|
@param[in,out] Ring The virtio ring we intend to append descriptors to.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
|
|
|
@param[out] Indices The DESC_INDICES structure to initialize.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
EFIAPI
|
|
|
|
VirtioPrepare (
|
|
|
|
IN OUT VRING *Ring,
|
|
|
|
OUT DESC_INDICES *Indices
|
|
|
|
)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Prepare for virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device.
|
|
|
|
// We're going to poll the answer, the host should not send an interrupt.
|
|
|
|
//
|
|
|
|
*Ring->Avail.Flags = (UINT16) VRING_AVAIL_F_NO_INTERRUPT;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Prepare for virtio-0.9.5, 2.4.1 Supplying Buffers to the Device.
|
|
|
|
//
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
// Since we support only one in-flight descriptor chain, we can always build
|
|
|
|
// that chain starting at entry #0 of the descriptor table.
|
|
|
|
//
|
|
|
|
Indices->HeadDescIdx = 0;
|
|
|
|
Indices->NextDescIdx = Indices->HeadDescIdx;
|
2012-10-12 20:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 20:53:58 +02:00
|
|
|
/**
|
|
|
|
|
|
|
|
Append a contiguous buffer for transmission / reception via the virtio ring.
|
|
|
|
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
This function implements the following section from virtio-0.9.5:
|
2012-10-12 20:53:58 +02:00
|
|
|
- 2.4.1.1 Placing Buffers into the Descriptor Table
|
|
|
|
|
|
|
|
Free space is taken as granted, since the individual drivers support only
|
|
|
|
synchronous requests and host side status is processed in lock-step with
|
|
|
|
request submission. It is the calling driver's responsibility to verify the
|
|
|
|
ring size in advance.
|
|
|
|
|
2012-10-12 20:54:35 +02:00
|
|
|
The caller is responsible for initializing *Indices with VirtioPrepare()
|
|
|
|
first.
|
|
|
|
|
2013-05-15 08:22:50 +02:00
|
|
|
@param[in,out] Ring The virtio ring to append the buffer to, as a
|
|
|
|
descriptor.
|
|
|
|
|
|
|
|
@param[in] BufferPhysAddr (Guest pseudo-physical) start address of the
|
|
|
|
transmit / receive buffer.
|
|
|
|
|
|
|
|
@param[in] BufferSize Number of bytes to transmit or receive.
|
|
|
|
|
|
|
|
@param[in] Flags A bitmask of VRING_DESC_F_* flags. The caller
|
|
|
|
computes this mask dependent on further buffers to
|
|
|
|
append and transfer direction.
|
|
|
|
VRING_DESC_F_INDIRECT is unsupported. The
|
|
|
|
VRING_DESC.Next field is always set, but the host
|
|
|
|
only interprets it dependent on VRING_DESC_F_NEXT.
|
|
|
|
|
|
|
|
@param[in,out] Indices Indices->HeadDescIdx is not accessed.
|
|
|
|
On input, Indices->NextDescIdx identifies the next
|
|
|
|
descriptor to carry the buffer. On output,
|
|
|
|
Indices->NextDescIdx is incremented by one, modulo
|
|
|
|
2^16.
|
2012-10-12 20:53:58 +02:00
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
EFIAPI
|
2012-10-12 20:54:17 +02:00
|
|
|
VirtioAppendDesc (
|
2012-10-12 20:54:35 +02:00
|
|
|
IN OUT VRING *Ring,
|
|
|
|
IN UINTN BufferPhysAddr,
|
|
|
|
IN UINT32 BufferSize,
|
|
|
|
IN UINT16 Flags,
|
|
|
|
IN OUT DESC_INDICES *Indices
|
2012-10-12 20:53:58 +02:00
|
|
|
)
|
|
|
|
{
|
|
|
|
volatile VRING_DESC *Desc;
|
|
|
|
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
Desc = &Ring->Desc[Indices->NextDescIdx++ % Ring->QueueSize];
|
2012-10-12 20:53:58 +02:00
|
|
|
Desc->Addr = BufferPhysAddr;
|
|
|
|
Desc->Len = BufferSize;
|
|
|
|
Desc->Flags = Flags;
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
Desc->Next = Indices->NextDescIdx % Ring->QueueSize;
|
2012-10-12 20:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
Notify the host about the descriptor chain just built, and wait until the
|
|
|
|
host processes it.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
2013-12-11 17:58:22 +01:00
|
|
|
@param[in] VirtIo The target virtio device to notify.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
|
|
|
@param[in] VirtQueueId Identifies the queue for the target device.
|
|
|
|
|
2013-05-15 08:23:22 +02:00
|
|
|
@param[in,out] Ring The virtio ring with descriptors to submit.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
2013-05-15 08:23:22 +02:00
|
|
|
@param[in] Indices Indices->NextDescIdx is not accessed.
|
|
|
|
Indices->HeadDescIdx identifies the head descriptor
|
|
|
|
of the descriptor chain.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
|
|
|
|
2013-12-11 17:58:22 +01:00
|
|
|
@return Error code from VirtIo->SetQueueNotify() if it fails.
|
2012-10-12 20:54:35 +02:00
|
|
|
|
|
|
|
@retval EFI_SUCCESS Otherwise, the host processed all descriptors.
|
|
|
|
|
|
|
|
**/
|
|
|
|
EFI_STATUS
|
|
|
|
EFIAPI
|
|
|
|
VirtioFlush (
|
2013-12-11 17:58:22 +01:00
|
|
|
IN VIRTIO_DEVICE_PROTOCOL *VirtIo,
|
|
|
|
IN UINT16 VirtQueueId,
|
|
|
|
IN OUT VRING *Ring,
|
|
|
|
IN DESC_INDICES *Indices
|
2012-10-12 20:54:35 +02:00
|
|
|
)
|
|
|
|
{
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
UINT16 NextAvailIdx;
|
2012-10-12 20:54:35 +02:00
|
|
|
EFI_STATUS Status;
|
|
|
|
UINTN PollPeriodUsecs;
|
|
|
|
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
//
|
|
|
|
// virtio-0.9.5, 2.4.1.2 Updating the Available Ring
|
|
|
|
//
|
|
|
|
// It is not exactly clear from the wording of the virtio-0.9.5
|
|
|
|
// specification, but each entry in the Available Ring references only the
|
|
|
|
// head descriptor of any given descriptor chain.
|
|
|
|
//
|
|
|
|
NextAvailIdx = *Ring->Avail.Idx;
|
|
|
|
Ring->Avail.Ring[NextAvailIdx++ % Ring->QueueSize] =
|
|
|
|
Indices->HeadDescIdx % Ring->QueueSize;
|
|
|
|
|
2012-10-12 20:54:35 +02:00
|
|
|
//
|
|
|
|
// virtio-0.9.5, 2.4.1.3 Updating the Index Field
|
|
|
|
//
|
|
|
|
MemoryFence();
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
*Ring->Avail.Idx = NextAvailIdx;
|
2012-10-12 20:54:35 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
// virtio-0.9.5, 2.4.1.4 Notifying the Device -- gratuitous notifications are
|
|
|
|
// OK.
|
|
|
|
//
|
|
|
|
MemoryFence();
|
2013-12-11 17:58:22 +01:00
|
|
|
Status = VirtIo->SetQueueNotify (VirtIo, VirtQueueId);
|
2012-10-12 20:54:35 +02:00
|
|
|
if (EFI_ERROR (Status)) {
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device
|
|
|
|
// Wait until the host processes and acknowledges our descriptor chain. The
|
|
|
|
// condition we use for polling is greatly simplified and relies on the
|
|
|
|
// synchronous, lock-step progress.
|
|
|
|
//
|
|
|
|
// Keep slowing down until we reach a poll period of slightly above 1 ms.
|
|
|
|
//
|
|
|
|
PollPeriodUsecs = 1;
|
|
|
|
MemoryFence();
|
OvmfPkg: VirtioLib: populate the Available Ring correctly
The descriptor table (also known as "queue") consists of descriptors. (The
corresponding type in the code is VRING_DESC.)
An individual descriptor describes a contiguous buffer, to be transferred
uni-directionally between host and guest.
Several descriptors in the descriptor table can be linked into a
descriptor chain, specifying a bi-directional scatter-gather transfer
between host and guest. Such a descriptor chain is also known as "virtio
request".
(The descriptor table can host sereval descriptor chains (in-flight virtio
requests) in parallel, but the OVMF driver supports at most one chain, at
any point in time.)
The first descriptor in any descriptor chain is called "head descriptor".
In order to submit a number of parallel requests (= a set of independent
descriptor chains) from the guest to the host, the guest must put *only*
the head descriptor of each separate chain onto the Available Ring.
VirtioLib currently places the head of its one descriptor chain onto the
Available Ring repeatedly, once for each single (head *or* dependent)
descriptor in said descriptor chain. If the descriptor chain comprises N
descriptors, this error amounts to submitting the same entire chain N
times in parallel.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Ptr to head --/ Desc#1 (next in same chain)
... / ...
Ptr to head / Desc#(N-1) (last in same chain)
Anatomy of a single virtio-blk READ request (a descriptor chain with three
descriptors):
virtio-blk request header, prepared by guest:
VirtioAppendDesc PhysAddr=3FBC6050 Size=16 Flags=1 Head=1232 Next=1232
payload to be filled in by host:
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=1232 Next=1233
host status, to be filled in by host:
VirtioAppendDesc PhysAddr=3FBC604F Size=1 Flags=2 Head=1232 Next=1234
Processing on the host side -- the descriptor chain is processed three
times in parallel (its head is available to virtqueue_pop() thrice); the
same chain is submitted/collected separately to/from AIO three times:
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtqueue_pop vq VQ#0 elem EL#1 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#1
virtqueue_pop vq VQ#0 elem EL#2 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#2
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
virtio_blk_rw_complete req REQ#1 ret 0
virtio_blk_req_complete req REQ#1 status 0
virtio_blk_rw_complete req REQ#2 ret 0
virtio_blk_req_complete req REQ#2 status 0
On my Thinkpad T510 laptop with RHEL-6 as host, this probably leads to
simultaneous DMA transfers targeting the same RAM area. Even though the
source of each transfer is identical, the data is corrupted in the
destination buffer -- the CRC32 calculated over the buffer varies, even
though the origin of the transfers is the same, never rewritten LBA.
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=BF68A44D
The problem is invisible on my HP Z400 workstation.
Fix the request submission by:
- building the only one descriptor chain supported by VirtioLib always at
the beginning of the descriptor table,
- ensuring the head descriptor of this chain is put on the Available Ring
only once,
- requesting the virtio spec's language to be cleaned up
<http://lists.linuxfoundation.org/pipermail/virtualization/2013-April/024032.html>.
Available Ring Descriptor table
Ptr to head ----> Desc#0 (head of chain)
Desc#1 (next in same chain)
...
Desc#(N-1) (last in same chain)
VirtioAppendDesc PhysAddr=3FBC6040 Size=16 Flags=1 Head=0 Next=0
VirtioAppendDesc PhysAddr=3B934C00 Size=32768 Flags=3 Head=0 Next=1
VirtioAppendDesc PhysAddr=3FBC603F Size=1 Flags=2 Head=0 Next=2
virtio_queue_notify vdev VDEV vq VQ#0
virtqueue_pop vq VQ#0 elem EL#0 in_num 2 out_num 1
bdrv_aio_readv bs BDRV sector_num 585792 nb_sectors 64 opaque REQ#0
virtio_blk_rw_complete req REQ#0 ret 0
virtio_blk_req_complete req REQ#0 status 0
SynchronousRequest Lba=585792 BufSiz=32768 ReqIsWrite=0 Crc32=1EEB2B07
(The Crc32 was double-checked with edk2's and Linux's guest IDE driver.)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14356 6f19259b-4bc3-4df7-8a09-765794883524
2013-05-14 17:57:55 +02:00
|
|
|
while (*Ring->Used.Idx != NextAvailIdx) {
|
2012-10-12 20:54:35 +02:00
|
|
|
gBS->Stall (PollPeriodUsecs); // calls AcpiTimerLib::MicroSecondDelay
|
|
|
|
|
|
|
|
if (PollPeriodUsecs < 1024) {
|
|
|
|
PollPeriodUsecs *= 2;
|
|
|
|
}
|
|
|
|
MemoryFence();
|
|
|
|
}
|
|
|
|
|
2013-08-23 20:46:03 +02:00
|
|
|
MemoryFence();
|
2012-10-12 20:54:35 +02:00
|
|
|
return EFI_SUCCESS;
|
2012-10-12 20:53:58 +02:00
|
|
|
}
|