OvmfPkg/XenPvBlkDxe: Add BlockFront client.

This is the code that will do the actual communication between OVMF and
a PV block backend, where the block device lives. The protocol used is
describe in the blkif.h header.

This implementation originally comes from Mini-OS, a part of the Xen
Project.

Change in V4:
- add file header to BlockFront.h (license, copyright, brief desc)

Change in V3:
- Improve comment of XenBusReadUint64.
- Moving blkif.h to this patch
  with the necessary #pragma pack(4) applied for Ia32.
- Add a note about the license in the commit message
- Add "The protocol used is describe in the blkif.h header." in the
  commit message
- Have a mandatory sector-size multiple of 512 or fail to initialize.
- use Sector instead of Offset for IO request.
  with Sector been 512-byte unit.
- print something if EventChannelNotify return an error.

Change in V2:
- trigger CoW is probably not needed on OVMF (as opposed to Mini-OS),
  removed the test.
- comments
- renamed XenbusReadInteger to XenBusReadUint64
- remove callback from IoData, use simple status instead
- return a status from the synchronus io
- Close protocol if blockfront init fail.
- fix few debug print
- Rename XenbusIo to XenBusIo
- XenPvBlkWaitForBackendState will return an error if the new backend
  states is not the expected state.
- Add the license

License: This patch adds some files which are under the MIT license.
Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Acked-by: Jordan Justen <jordan.l.justen@intel.com>

git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@16273 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
Samuel Thibault 2014-10-29 06:51:58 +00:00 committed by jljusten
parent de671da8e4
commit 5cce852404
5 changed files with 1382 additions and 0 deletions

View File

@ -0,0 +1,619 @@
/******************************************************************************
* blkif.h
*
* Unified block-device I/O interface for Xen guest OSes.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2003-2004, Keir Fraser
* Copyright (c) 2012, Spectra Logic Corporation
*/
#ifndef __XEN_PUBLIC_IO_BLKIF_H__
#define __XEN_PUBLIC_IO_BLKIF_H__
#include "ring.h"
#include "../grant_table.h"
/*
* Front->back notifications: When enqueuing a new request, sending a
* notification can be made conditional on req_event (i.e., the generic
* hold-off mechanism provided by the ring macros). Backends must set
* req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
*
* Back->front notifications: When enqueuing a new response, sending a
* notification can be made conditional on rsp_event (i.e., the generic
* hold-off mechanism provided by the ring macros). Frontends must set
* rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
*/
#ifndef blkif_vdev_t
#define blkif_vdev_t UINT16
#endif
#define blkif_sector_t UINT64
/*
* Feature and Parameter Negotiation
* =================================
* The two halves of a Xen block driver utilize nodes within the XenStore to
* communicate capabilities and to negotiate operating parameters. This
* section enumerates these nodes which reside in the respective front and
* backend portions of the XenStore, following the XenBus convention.
*
* All data in the XenStore is stored as strings. Nodes specifying numeric
* values are encoded in decimal. Integer value ranges listed below are
* expressed as fixed sized integer types capable of storing the conversion
* of a properly formated node string, without loss of information.
*
* Any specified default value is in effect if the corresponding XenBus node
* is not present in the XenStore.
*
* XenStore nodes in sections marked "PRIVATE" are solely for use by the
* driver side whose XenBus tree contains them.
*
* XenStore nodes marked "DEPRECATED" in their notes section should only be
* used to provide interoperability with legacy implementations.
*
* See the XenBus state transition diagram below for details on when XenBus
* nodes must be published and when they can be queried.
*
*****************************************************************************
* Backend XenBus Nodes
*****************************************************************************
*
*------------------ Backend Device Identification (PRIVATE) ------------------
*
* mode
* Values: "r" (read only), "w" (writable)
*
* The read or write access permissions to the backing store to be
* granted to the frontend.
*
* params
* Values: string
*
* A free formatted string providing sufficient information for the
* backend driver to open the backing device. (e.g. the path to the
* file or block device representing the backing store.)
*
* type
* Values: "file", "phy", "tap"
*
* The type of the backing device/object.
*
*--------------------------------- Features ---------------------------------
*
* feature-barrier
* Values: 0/1 (boolean)
* Default Value: 0
*
* A value of "1" indicates that the backend can process requests
* containing the BLKIF_OP_WRITE_BARRIER request opcode. Requests
* of this type may still be returned at any time with the
* BLKIF_RSP_EOPNOTSUPP result code.
*
* feature-flush-cache
* Values: 0/1 (boolean)
* Default Value: 0
*
* A value of "1" indicates that the backend can process requests
* containing the BLKIF_OP_FLUSH_DISKCACHE request opcode. Requests
* of this type may still be returned at any time with the
* BLKIF_RSP_EOPNOTSUPP result code.
*
* feature-discard
* Values: 0/1 (boolean)
* Default Value: 0
*
* A value of "1" indicates that the backend can process requests
* containing the BLKIF_OP_DISCARD request opcode. Requests
* of this type may still be returned at any time with the
* BLKIF_RSP_EOPNOTSUPP result code.
*
* feature-persistent
* Values: 0/1 (boolean)
* Default Value: 0
* Notes: 7
*
* A value of "1" indicates that the backend can keep the grants used
* by the frontend driver mapped, so the same set of grants should be
* used in all transactions. The maximum number of grants the backend
* can map persistently depends on the implementation, but ideally it
* should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
* feature the backend doesn't need to unmap each grant, preventing
* costly TLB flushes. The backend driver should only map grants
* persistently if the frontend supports it. If a backend driver chooses
* to use the persistent protocol when the frontend doesn't support it,
* it will probably hit the maximum number of persistently mapped grants
* (due to the fact that the frontend won't be reusing the same grants),
* and fall back to non-persistent mode. Backend implementations may
* shrink or expand the number of persistently mapped grants without
* notifying the frontend depending on memory constraints (this might
* cause a performance degradation).
*
* If a backend driver wants to limit the maximum number of persistently
* mapped grants to a value less than RING_SIZE *
* BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
* discard the grants that are less commonly used. Using a LRU in the
* backend driver paired with a LIFO queue in the frontend will
* allow us to have better performance in this scenario.
*
*----------------------- Request Transport Parameters ------------------------
*
* max-ring-page-order
* Values: <UINT32>
* Default Value: 0
* Notes: 1, 3
*
* The maximum supported size of the request ring buffer in units of
* lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
* etc.).
*
* max-ring-pages
* Values: <UINT32>
* Default Value: 1
* Notes: DEPRECATED, 2, 3
*
* The maximum supported size of the request ring buffer in units of
* machine pages. The value must be a power of 2.
*
*------------------------- Backend Device Properties -------------------------
*
* discard-alignment
* Values: <UINT32>
* Default Value: 0
* Notes: 4, 5
*
* The offset, in bytes from the beginning of the virtual block device,
* to the first, addressable, discard extent on the underlying device.
*
* discard-granularity
* Values: <UINT32>
* Default Value: <"sector-size">
* Notes: 4
*
* The size, in bytes, of the individually addressable discard extents
* of the underlying device.
*
* discard-secure
* Values: 0/1 (boolean)
* Default Value: 0
* Notes: 10
*
* A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
* requests with the BLKIF_DISCARD_SECURE flag set.
*
* info
* Values: <UINT32> (bitmap)
*
* A collection of bit flags describing attributes of the backing
* device. The VDISK_* macros define the meaning of each bit
* location.
*
* sector-size
* Values: <UINT32>
*
* The logical sector size, in bytes, of the backend device.
*
* physical-sector-size
* Values: <UINT32>
*
* The physical sector size, in bytes, of the backend device.
*
* sectors
* Values: <UINT64>
*
* The size of the backend device, expressed in units of its logical
* sector size ("sector-size").
*
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
*
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
* Values: <UINT32>
*
* The identifier of the Xen event channel used to signal activity
* in the ring buffer.
*
* ring-ref
* Values: <UINT32>
* Notes: 6
*
* The Xen grant reference granting permission for the backend to map
* the sole page in a single page sized ring buffer.
*
* ring-ref%u
* Values: <UINT32>
* Notes: 6
*
* For a frontend providing a multi-page ring, a "number of ring pages"
* sized list of nodes, each containing a Xen grant reference granting
* permission for the backend to map the page of the ring located
* at page index "%u". Page indexes are zero based.
*
* protocol
* Values: string (XEN_IO_PROTO_ABI_*)
* Default Value: XEN_IO_PROTO_ABI_NATIVE
*
* The machine ABI rules governing the format of all ring request and
* response structures.
*
* ring-page-order
* Values: <UINT32>
* Default Value: 0
* Maximum Value: MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
* Notes: 1, 3
*
* The size of the frontend allocated request ring buffer in units
* of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
* etc.).
*
* num-ring-pages
* Values: <UINT32>
* Default Value: 1
* Maximum Value: MAX(max-ring-pages,(0x1 << max-ring-page-order))
* Notes: DEPRECATED, 2, 3
*
* The size of the frontend allocated request ring buffer in units of
* machine pages. The value must be a power of 2.
*
* feature-persistent
* Values: 0/1 (boolean)
* Default Value: 0
* Notes: 7, 8, 9
*
* A value of "1" indicates that the frontend will reuse the same grants
* for all transactions, allowing the backend to map them with write
* access (even when it should be read-only). If the frontend hits the
* maximum number of allowed persistently mapped grants, it can fallback
* to non persistent mode. This will cause a performance degradation,
* since the the backend driver will still try to map those grants
* persistently. Since the persistent grants protocol is compatible with
* the previous protocol, a frontend driver can choose to work in
* persistent mode even when the backend doesn't support it.
*
* It is recommended that the frontend driver stores the persistently
* mapped grants in a LIFO queue, so a subset of all persistently mapped
* grants gets used commonly. This is done in case the backend driver
* decides to limit the maximum number of persistently mapped grants
* to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*
*------------------------- Virtual Device Properties -------------------------
*
* device-type
* Values: "disk", "cdrom", "floppy", etc.
*
* virtual-device
* Values: <UINT32>
*
* A value indicating the physical device to virtualize within the
* frontend's domain. (e.g. "The first ATA disk", "The third SCSI
* disk", etc.)
*
* See docs/misc/vbd-interface.txt for details on the format of this
* value.
*
* Notes
* -----
* (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
* PV drivers.
* (2) Multi-page ring buffer scheme first used in some RedHat distributions
* including a distribution deployed on certain nodes of the Amazon
* EC2 cluster.
* (3) Support for multi-page ring buffers was implemented independently,
* in slightly different forms, by both Citrix and RedHat/Amazon.
* For full interoperability, block front and backends should publish
* identical ring parameters, adjusted for unit differences, to the
* XenStore nodes used in both schemes.
* (4) Devices that support discard functionality may internally allocate space
* (discardable extents) in units that are larger than the exported logical
* block size. If the backing device has such discardable extents the
* backend should provide both discard-granularity and discard-alignment.
* Providing just one of the two may be considered an error by the frontend.
* Backends supporting discard should include discard-granularity and
* discard-alignment even if it supports discarding individual sectors.
* Frontends should assume discard-alignment == 0 and discard-granularity
* == sector size if these keys are missing.
* (5) The discard-alignment parameter allows a physical device to be
* partitioned into virtual devices that do not necessarily begin or
* end on a discardable extent boundary.
* (6) When there is only a single page allocated to the request ring,
* 'ring-ref' is used to communicate the grant reference for this
* page to the backend. When using a multi-page ring, the 'ring-ref'
* node is not created. Instead 'ring-ref0' - 'ring-refN' are used.
* (7) When using persistent grants data has to be copied from/to the page
* where the grant is currently mapped. The overhead of doing this copy
* however doesn't suppress the speed improvement of not having to unmap
* the grants.
* (8) The frontend driver has to allow the backend driver to map all grants
* with write access, even when they should be mapped read-only, since
* further requests may reuse these grants and require write permissions.
* (9) Linux implementation doesn't have a limit on the maximum number of
* grants that can be persistently mapped in the frontend driver, but
* due to the frontent driver implementation it should never be bigger
* than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*(10) The discard-secure property may be present and will be set to 1 if the
* backing device supports secure discard.
*/
/*
* STATE DIAGRAMS
*
*****************************************************************************
* Startup *
*****************************************************************************
*
* Tool stack creates front and back nodes with state XenbusStateInitialising.
*
* Front Back
* ================================= =====================================
* XenbusStateInitialising XenbusStateInitialising
* o Query virtual device o Query backend device identification
* properties. data.
* o Setup OS device instance. o Open and validate backend device.
* o Publish backend features and
* transport parameters.
* |
* |
* V
* XenbusStateInitWait
*
* o Query backend features and
* transport parameters.
* o Allocate and initialize the
* request ring.
* o Publish transport parameters
* that will be in effect during
* this connection.
* |
* |
* V
* XenbusStateInitialised
*
* o Query frontend transport parameters.
* o Connect to the request ring and
* event channel.
* o Publish backend device properties.
* |
* |
* V
* XenbusStateConnected
*
* o Query backend device properties.
* o Finalize OS virtual device
* instance.
* |
* |
* V
* XenbusStateConnected
*
* Note: Drivers that do not support any optional features, or the negotiation
* of transport parameters, can skip certain states in the state machine:
*
* o A frontend may transition to XenbusStateInitialised without
* waiting for the backend to enter XenbusStateInitWait. In this
* case, default transport parameters are in effect and any
* transport parameters published by the frontend must contain
* their default values.
*
* o A backend may transition to XenbusStateInitialised, bypassing
* XenbusStateInitWait, without waiting for the frontend to first
* enter the XenbusStateInitialised state. In this case, default
* transport parameters are in effect and any transport parameters
* published by the backend must contain their default values.
*
* Drivers that support optional features and/or transport parameter
* negotiation must tolerate these additional state transition paths.
* In general this means performing the work of any skipped state
* transition, if it has not already been performed, in addition to the
* work associated with entry into the current state.
*/
/*
* REQUEST CODES.
*/
#define BLKIF_OP_READ 0
#define BLKIF_OP_WRITE 1
/*
* All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
* operation code ("barrier request") must be completed prior to the
* execution of the barrier request. All writes issued after the barrier
* request must not execute until after the completion of the barrier request.
*
* Optional. See "feature-barrier" XenBus node documentation above.
*/
#define BLKIF_OP_WRITE_BARRIER 2
/*
* Commit any uncommitted contents of the backing device's volatile cache
* to stable storage.
*
* Optional. See "feature-flush-cache" XenBus node documentation above.
*/
#define BLKIF_OP_FLUSH_DISKCACHE 3
/*
* Used in SLES sources for device specific command packet
* contained within the request. Reserved for that purpose.
*/
#define BLKIF_OP_RESERVED_1 4
/*
* Indicate to the backend device that a region of storage is no longer in
* use, and may be discarded at any time without impact to the client. If
* the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
* discarded region on the device must be rendered unrecoverable before the
* command returns.
*
* This operation is analogous to performing a trim (ATA) or unamp (SCSI),
* command on a native device.
*
* More information about trim/unmap operations can be found at:
* http://t13.org/Documents/UploadedDocuments/docs2008/
* e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
* http://www.seagate.com/staticfiles/support/disc/manuals/
* Interface%20manuals/100293068c.pdf
*
* Optional. See "feature-discard", "discard-alignment",
* "discard-granularity", and "discard-secure" in the XenBus node
* documentation above.
*/
#define BLKIF_OP_DISCARD 5
/*
* Recognized if "feature-max-indirect-segments" in present in the backend
* xenbus info. The "feature-max-indirect-segments" node contains the maximum
* number of segments allowed by the backend per request. If the node is
* present, the frontend might use blkif_request_indirect structs in order to
* issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
* maximum number of indirect segments is fixed by the backend, but the
* frontend can issue requests with any number of indirect segments as long as
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
* These pages are filled with an array of blkif_request_segment that hold the
* information about the segments. The number of indirect pages to use is
* determined by the number of segments an indirect request contains. Every
* indirect page can contain a maximum of
* (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
* calculate the number of indirect pages to use we have to do
* ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
*/
#define BLKIF_OP_INDIRECT 6
/*
* Maximum scatter/gather segments per request.
* This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
* NB. This could be 12 if the ring indexes weren't stored in the same page.
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
/*
* Maximum number of indirect pages to use per request.
*/
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
/*
* NB. first_sect and last_sect in blkif_request_segment, as well as
* sector_number in blkif_request, are always expressed in 512-byte units.
* However they must be properly aligned to the real sector size of the
* physical disk, which is reported in the "physical-sector-size" node in
* the backend xenbus info. Also the xenbus "sectors" node is expressed in
* 512-byte units.
*/
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */
UINT8 first_sect, last_sect;
};
/*
* Starting ring element for any I/O request.
*/
#if defined(__i386__)
//
// pack(4) is necessary when these structs are compiled for Ia32.
// Without it, the struct will have a different alignment than the one
// a backend expect for a 32bit guest.
//
#pragma pack(4)
#endif
struct blkif_request {
UINT8 operation; /* BLKIF_OP_??? */
UINT8 nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
typedef struct blkif_request blkif_request_t;
/*
* Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
* sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
*/
struct blkif_request_discard {
UINT8 operation; /* BLKIF_OP_DISCARD */
UINT8 flag; /* BLKIF_DISCARD_SECURE or zero */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t handle; /* same as for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk */
UINT64 nr_sectors; /* number of contiguous sectors to discard*/
};
typedef struct blkif_request_discard blkif_request_discard_t;
struct blkif_request_indirect {
UINT8 operation; /* BLKIF_OP_INDIRECT */
UINT8 indirect_op; /* BLKIF_OP_{READ/WRITE} */
UINT16 nr_segments; /* number of segments */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
blkif_vdev_t handle; /* same as for read/write requests */
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
#ifdef __i386__
UINT64 pad; /* Make it 64 byte aligned on i386 */
#endif
};
typedef struct blkif_request_indirect blkif_request_indirect_t;
struct blkif_response {
UINT64 id; /* copied from request */
UINT8 operation; /* copied from request */
INT16 status; /* BLKIF_RSP_??? */
};
typedef struct blkif_response blkif_response_t;
#if defined(__i386__)
#pragma pack()
#endif
/*
* STATUS RETURN CODES.
*/
/* Operation not supported (only happens on barrier writes). */
#define BLKIF_RSP_EOPNOTSUPP -2
/* Operation failed for some unspecified reason (-EIO). */
#define BLKIF_RSP_ERROR -1
/* Operation completed successfully. */
#define BLKIF_RSP_OKAY 0
/*
* Generate blkif ring structures and types.
*/
DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
#define VDISK_CDROM 0x1
#define VDISK_REMOVABLE 0x2
#define VDISK_READONLY 0x4
#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -0,0 +1,647 @@
/** @file
Minimal block driver for Mini-OS.
Copyright (c) 2007-2008 Samuel Thibault.
Copyright (C) 2014, Citrix Ltd.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
**/
#include <Library/PrintLib.h>
#include <Library/DebugLib.h>
#include "BlockFront.h"
#include <IndustryStandard/Xen/io/protocols.h>
#include <IndustryStandard/Xen/io/xenbus.h>
//
// Header used for UINT32_MAX and UINT16_MAX
//
#include "inttypes.h"
/**
Helper to read an integer from XenStore.
If the number overflows according to the range defined by UINT64,
then ASSERT().
@param This A pointer to a XENBUS_PROTOCOL instance.
@param Node The XenStore node to read from.
@param FromBackend Read frontend or backend value.
@param ValuePtr Where to put the value.
@retval XENSTORE_STATUS_SUCCESS If succefull, will update ValuePtr.
@return Any other return value indicate the error,
ValuePtr is not updated in this case.
**/
STATIC
XENSTORE_STATUS
XenBusReadUint64 (
IN XENBUS_PROTOCOL *This,
IN CONST CHAR8 *Node,
IN BOOLEAN FromBackend,
OUT UINT64 *ValuePtr
)
{
XENSTORE_STATUS Status;
CHAR8 *Ptr;
if (!FromBackend) {
Status = This->XsRead (This, XST_NIL, Node, (VOID**)&Ptr);
} else {
Status = This->XsBackendRead (This, XST_NIL, Node, (VOID**)&Ptr);
}
if (Status != XENSTORE_STATUS_SUCCESS) {
return Status;
}
// AsciiStrDecimalToUint64 will ASSERT if Ptr overflow UINT64.
*ValuePtr = AsciiStrDecimalToUint64 (Ptr);
FreePool (Ptr);
return Status;
}
/**
Free an instance of XEN_BLOCK_FRONT_DEVICE.
@param Dev The instance to free.
**/
STATIC
VOID
XenPvBlockFree (
IN XEN_BLOCK_FRONT_DEVICE *Dev
)
{
XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
if (Dev->RingRef != 0) {
XenBusIo->GrantEndAccess (XenBusIo, Dev->RingRef);
}
if (Dev->Ring.sring != NULL) {
FreePages (Dev->Ring.sring, 1);
}
if (Dev->EventChannel != 0) {
XenBusIo->EventChannelClose (XenBusIo, Dev->EventChannel);
}
FreePool (Dev);
}
/**
Wait until until the backend has reached the ExpectedState.
@param Dev A XEN_BLOCK_FRONT_DEVICE instance.
@param ExpectedState The backend state expected.
@param LastStatePtr An optional pointer where to right the final state.
@return Return XENSTORE_STATUS_SUCCESS if the new backend state is ExpectedState
or return an error otherwise.
**/
STATIC
XENSTORE_STATUS
XenPvBlkWaitForBackendState (
IN XEN_BLOCK_FRONT_DEVICE *Dev,
IN XenbusState ExpectedState,
OUT XenbusState *LastStatePtr OPTIONAL
)
{
XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
XenbusState State;
UINT64 Value;
XENSTORE_STATUS Status = XENSTORE_STATUS_SUCCESS;
while (TRUE) {
Status = XenBusReadUint64 (XenBusIo, "state", TRUE, &Value);
if (Status != XENSTORE_STATUS_SUCCESS) {
return Status;
}
if (Value > XenbusStateReconfigured) {
//
// Value is not a State value.
//
return XENSTORE_STATUS_EIO;
}
State = Value;
if (State == ExpectedState) {
break;
} else if (State > ExpectedState) {
Status = XENSTORE_STATUS_FAIL;
break;
}
DEBUG ((EFI_D_INFO,
"XenPvBlk: waiting backend state %d, current: %d\n",
ExpectedState, State));
XenBusIo->WaitForWatch (XenBusIo, Dev->StateWatchToken);
}
if (LastStatePtr != NULL) {
*LastStatePtr = State;
}
return Status;
}
EFI_STATUS
XenPvBlockFrontInitialization (
IN XENBUS_PROTOCOL *XenBusIo,
IN CONST CHAR8 *NodeName,
OUT XEN_BLOCK_FRONT_DEVICE **DevPtr
)
{
XENSTORE_TRANSACTION xbt;
CHAR8 *DeviceType;
blkif_sring_t *SharedRing;
XENSTORE_STATUS Status;
XEN_BLOCK_FRONT_DEVICE *Dev;
XenbusState State;
UINT64 Value;
ASSERT (NodeName != NULL);
Dev = AllocateZeroPool (sizeof (XEN_BLOCK_FRONT_DEVICE));
Dev->Signature = XEN_BLOCK_FRONT_SIGNATURE;
Dev->NodeName = NodeName;
Dev->XenBusIo = XenBusIo;
Dev->DeviceId = XenBusIo->DeviceId;
XenBusIo->XsRead (XenBusIo, XST_NIL, "device-type", (VOID**)&DeviceType);
if (AsciiStrCmp (DeviceType, "cdrom") == 0) {
Dev->MediaInfo.CdRom = TRUE;
} else {
Dev->MediaInfo.CdRom = FALSE;
}
FreePool (DeviceType);
Status = XenBusReadUint64 (XenBusIo, "backend-id", FALSE, &Value);
if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT16_MAX) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to get backend-id (%d)\n",
Status));
goto Error;
}
Dev->DomainId = Value;
XenBusIo->EventChannelAllocate (XenBusIo, Dev->DomainId, &Dev->EventChannel);
SharedRing = (blkif_sring_t*) AllocatePages (1);
SHARED_RING_INIT (SharedRing);
FRONT_RING_INIT (&Dev->Ring, SharedRing, EFI_PAGE_SIZE);
XenBusIo->GrantAccess (XenBusIo,
Dev->DomainId,
(INTN) SharedRing >> EFI_PAGE_SHIFT,
FALSE,
&Dev->RingRef);
Again:
Status = XenBusIo->XsTransactionStart (XenBusIo, &xbt);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_WARN, "XenPvBlk: Failed to start transaction, %d\n", Status));
goto Error;
}
Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName, "ring-ref", "%d",
Dev->RingRef);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write ring-ref.\n"));
goto AbortTransaction;
}
Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName,
"event-channel", "%d", Dev->EventChannel);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write event-channel.\n"));
goto AbortTransaction;
}
Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName,
"protocol", "%a", XEN_IO_PROTO_ABI_NATIVE);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write protocol.\n"));
goto AbortTransaction;
}
Status = XenBusIo->SetState (XenBusIo, xbt, XenbusStateConnected);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to switch state.\n"));
goto AbortTransaction;
}
Status = XenBusIo->XsTransactionEnd (XenBusIo, xbt, FALSE);
if (Status == XENSTORE_STATUS_EAGAIN) {
goto Again;
}
XenBusIo->RegisterWatchBackend (XenBusIo, "state", &Dev->StateWatchToken);
//
// Waiting for backend
//
Status = XenPvBlkWaitForBackendState (Dev, XenbusStateConnected, &State);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: backend for %a/%d not available, rc=%d state=%d\n",
XenBusIo->Type, XenBusIo->DeviceId, Status, State));
goto Error2;
}
Status = XenBusReadUint64 (XenBusIo, "info", TRUE, &Value);
if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
goto Error2;
}
Dev->MediaInfo.VDiskInfo = Value;
if (Dev->MediaInfo.VDiskInfo & VDISK_READONLY) {
Dev->MediaInfo.ReadWrite = FALSE;
} else {
Dev->MediaInfo.ReadWrite = TRUE;
}
Status = XenBusReadUint64 (XenBusIo, "sectors", TRUE, &Dev->MediaInfo.Sectors);
if (Status != XENSTORE_STATUS_SUCCESS) {
goto Error2;
}
Status = XenBusReadUint64 (XenBusIo, "sector-size", TRUE, &Value);
if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
goto Error2;
}
if (Value % 512 != 0) {
//
// This is not supported by the driver.
//
DEBUG ((EFI_D_ERROR, "XenPvBlk: Unsupported sector-size value %d, "
"it must be a multiple of 512\n", Value));
goto Error2;
}
Dev->MediaInfo.SectorSize = Value;
// Default value
Value = 0;
XenBusReadUint64 (XenBusIo, "feature-barrier", TRUE, &Value);
if (Value == 1) {
Dev->MediaInfo.FeatureBarrier = TRUE;
} else {
Dev->MediaInfo.FeatureBarrier = FALSE;
}
// Default value
Value = 0;
XenBusReadUint64 (XenBusIo, "feature-flush-cache", TRUE, &Value);
if (Value == 1) {
Dev->MediaInfo.FeatureFlushCache = TRUE;
} else {
Dev->MediaInfo.FeatureFlushCache = FALSE;
}
DEBUG ((EFI_D_INFO, "XenPvBlk: New disk with %ld sectors of %d bytes\n",
Dev->MediaInfo.Sectors, Dev->MediaInfo.SectorSize));
*DevPtr = Dev;
return EFI_SUCCESS;
Error2:
XenBusIo->UnregisterWatch (XenBusIo, Dev->StateWatchToken);
XenBusIo->XsRemove (XenBusIo, XST_NIL, "ring-ref");
XenBusIo->XsRemove (XenBusIo, XST_NIL, "event-channel");
XenBusIo->XsRemove (XenBusIo, XST_NIL, "protocol");
goto Error;
AbortTransaction:
XenBusIo->XsTransactionEnd (XenBusIo, xbt, TRUE);
Error:
XenPvBlockFree (Dev);
return EFI_DEVICE_ERROR;
}
VOID
XenPvBlockFrontShutdown (
IN XEN_BLOCK_FRONT_DEVICE *Dev
)
{
XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
XENSTORE_STATUS Status;
UINT64 Value;
XenPvBlockSync (Dev);
Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateClosing);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while changing state to Closing: %d\n",
Status));
goto Close;
}
Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosing, NULL);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while waiting for closing backend state: %d\n",
Status));
goto Close;
}
Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateClosed);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while changing state to Closed: %d\n",
Status));
goto Close;
}
Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosed, NULL);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while waiting for closed backend state: %d\n",
Status));
goto Close;
}
Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateInitialising);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while changing state to initialising: %d\n",
Status));
goto Close;
}
while (TRUE) {
Status = XenBusReadUint64 (XenBusIo, "state", TRUE, &Value);
if (Status != XENSTORE_STATUS_SUCCESS) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: error while waiting for new backend state: %d\n",
Status));
goto Close;
}
if (Value <= XenbusStateInitWait || Value >= XenbusStateClosed) {
break;
}
DEBUG ((EFI_D_INFO,
"XenPvBlk: waiting backend state %d, current: %d\n",
XenbusStateInitWait, Value));
XenBusIo->WaitForWatch (XenBusIo, Dev->StateWatchToken);
}
Close:
XenBusIo->UnregisterWatch (XenBusIo, Dev->StateWatchToken);
XenBusIo->XsRemove (XenBusIo, XST_NIL, "ring-ref");
XenBusIo->XsRemove (XenBusIo, XST_NIL, "event-channel");
XenBusIo->XsRemove (XenBusIo, XST_NIL, "protocol");
XenPvBlockFree (Dev);
}
STATIC
VOID
XenPvBlockWaitSlot (
IN XEN_BLOCK_FRONT_DEVICE *Dev
)
{
/* Wait for a slot */
if (RING_FULL (&Dev->Ring)) {
while (TRUE) {
XenPvBlockAsyncIoPoll (Dev);
if (!RING_FULL (&Dev->Ring)) {
break;
}
/* Really no slot, could wait for an event on Dev->EventChannel. */
}
}
}
VOID
XenPvBlockAsyncIo (
IN OUT XEN_BLOCK_FRONT_IO *IoData,
IN BOOLEAN IsWrite
)
{
XEN_BLOCK_FRONT_DEVICE *Dev = IoData->Dev;
XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
blkif_request_t *Request;
RING_IDX RingIndex;
BOOLEAN Notify;
INT32 NumSegments, Index;
UINTN Start, End;
// Can't io at non-sector-aligned location
ASSERT(!(IoData->Sector & ((Dev->MediaInfo.SectorSize / 512) - 1)));
// Can't io non-sector-sized amounts
ASSERT(!(IoData->Size & (Dev->MediaInfo.SectorSize - 1)));
// Can't io non-sector-aligned buffer
ASSERT(!((UINTN) IoData->Buffer & (Dev->MediaInfo.SectorSize - 1)));
Start = (UINTN) IoData->Buffer & ~EFI_PAGE_MASK;
End = ((UINTN) IoData->Buffer + IoData->Size + EFI_PAGE_SIZE - 1) & ~EFI_PAGE_MASK;
IoData->NumRef = NumSegments = (End - Start) / EFI_PAGE_SIZE;
ASSERT (NumSegments <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
XenPvBlockWaitSlot (Dev);
RingIndex = Dev->Ring.req_prod_pvt;
Request = RING_GET_REQUEST (&Dev->Ring, RingIndex);
Request->operation = IsWrite ? BLKIF_OP_WRITE : BLKIF_OP_READ;
Request->nr_segments = NumSegments;
Request->handle = Dev->DeviceId;
Request->id = (UINTN) IoData;
Request->sector_number = IoData->Sector;
for (Index = 0; Index < NumSegments; Index++) {
Request->seg[Index].first_sect = 0;
Request->seg[Index].last_sect = EFI_PAGE_SIZE / 512 - 1;
}
Request->seg[0].first_sect = ((UINTN) IoData->Buffer & EFI_PAGE_MASK) / 512;
Request->seg[NumSegments - 1].last_sect =
(((UINTN) IoData->Buffer + IoData->Size - 1) & EFI_PAGE_MASK) / 512;
for (Index = 0; Index < NumSegments; Index++) {
UINTN Data = Start + Index * EFI_PAGE_SIZE;
XenBusIo->GrantAccess (XenBusIo, Dev->DomainId,
Data >> EFI_PAGE_SHIFT, IsWrite,
&Request->seg[Index].gref);
IoData->GrantRef[Index] = Request->seg[Index].gref;
}
Dev->Ring.req_prod_pvt = RingIndex + 1;
MemoryFence ();
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
if (Notify) {
UINT32 ReturnCode;
ReturnCode = XenBusIo->EventChannelNotify (XenBusIo, Dev->EventChannel);
if (ReturnCode != 0) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: Unexpected return value from EventChannelNotify: %d\n",
ReturnCode));
}
}
}
EFI_STATUS
XenPvBlockIo (
IN OUT XEN_BLOCK_FRONT_IO *IoData,
IN BOOLEAN IsWrite
)
{
//
// Status value that correspond to an IO in progress.
//
IoData->Status = EFI_ALREADY_STARTED;
XenPvBlockAsyncIo (IoData, IsWrite);
while (IoData->Status == EFI_ALREADY_STARTED) {
XenPvBlockAsyncIoPoll (IoData->Dev);
}
return IoData->Status;
}
STATIC
VOID
XenPvBlockPushOperation (
IN XEN_BLOCK_FRONT_DEVICE *Dev,
IN UINT8 Operation,
IN UINT64 Id
)
{
INT32 Index;
blkif_request_t *Request;
BOOLEAN Notify;
XenPvBlockWaitSlot (Dev);
Index = Dev->Ring.req_prod_pvt;
Request = RING_GET_REQUEST(&Dev->Ring, Index);
Request->operation = Operation;
Request->nr_segments = 0;
Request->handle = Dev->DeviceId;
Request->id = Id;
/* Not needed anyway, but the backend will check it */
Request->sector_number = 0;
Dev->Ring.req_prod_pvt = Index + 1;
MemoryFence ();
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
if (Notify) {
XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
UINT32 ReturnCode;
ReturnCode = XenBusIo->EventChannelNotify (XenBusIo, Dev->EventChannel);
if (ReturnCode != 0) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: Unexpected return value from EventChannelNotify: %d\n",
ReturnCode));
}
}
}
VOID
XenPvBlockSync (
IN XEN_BLOCK_FRONT_DEVICE *Dev
)
{
if (Dev->MediaInfo.ReadWrite) {
if (Dev->MediaInfo.FeatureBarrier) {
XenPvBlockPushOperation (Dev, BLKIF_OP_WRITE_BARRIER, 0);
}
if (Dev->MediaInfo.FeatureFlushCache) {
XenPvBlockPushOperation (Dev, BLKIF_OP_FLUSH_DISKCACHE, 0);
}
}
/* Note: This won't finish if another thread enqueues requests. */
while (TRUE) {
XenPvBlockAsyncIoPoll (Dev);
if (RING_FREE_REQUESTS (&Dev->Ring) == RING_SIZE (&Dev->Ring)) {
break;
}
}
}
VOID
XenPvBlockAsyncIoPoll (
IN XEN_BLOCK_FRONT_DEVICE *Dev
)
{
RING_IDX ProducerIndex, ConsumerIndex;
blkif_response_t *Response;
INT32 More;
do {
ProducerIndex = Dev->Ring.sring->rsp_prod;
/* Ensure we see queued responses up to 'ProducerIndex'. */
MemoryFence ();
ConsumerIndex = Dev->Ring.rsp_cons;
while (ConsumerIndex != ProducerIndex) {
XEN_BLOCK_FRONT_IO *IoData = NULL;
INT16 Status;
Response = RING_GET_RESPONSE (&Dev->Ring, ConsumerIndex);
IoData = (VOID *) (UINTN) Response->id;
Status = Response->status;
switch (Response->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
{
INT32 Index;
if (Status != BLKIF_RSP_OKAY) {
DEBUG ((EFI_D_ERROR,
"XenPvBlk: "
"%a error %d on %a at sector %p, num bytes %p\n",
Response->operation == BLKIF_OP_READ ? "read" : "write",
Status, IoData->Dev->NodeName,
IoData->Sector,
IoData->Size));
}
for (Index = 0; Index < IoData->NumRef; Index++) {
Dev->XenBusIo->GrantEndAccess (Dev->XenBusIo, IoData->GrantRef[Index]);
}
break;
}
case BLKIF_OP_WRITE_BARRIER:
if (Status != BLKIF_RSP_OKAY) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: write barrier error %d\n", Status));
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
if (Status != BLKIF_RSP_OKAY) {
DEBUG ((EFI_D_ERROR, "XenPvBlk: flush error %d\n", Status));
}
break;
default:
DEBUG ((EFI_D_ERROR,
"XenPvBlk: unrecognized block operation %d response (status %d)\n",
Response->operation, Status));
break;
}
Dev->Ring.rsp_cons = ++ConsumerIndex;
if (IoData != NULL) {
IoData->Status = Status ? EFI_DEVICE_ERROR : EFI_SUCCESS;
}
if (Dev->Ring.rsp_cons != ConsumerIndex) {
/* We reentered, we must not continue here */
break;
}
}
RING_FINAL_CHECK_FOR_RESPONSES (&Dev->Ring, More);
} while (More != 0);
}

View File

@ -0,0 +1,101 @@
/** @file
BlockFront functions and types declarations.
Copyright (C) 2014, Citrix Ltd.
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "XenPvBlkDxe.h"
#include <IndustryStandard/Xen/event_channel.h>
#include <IndustryStandard/Xen/io/blkif.h>
typedef struct _XEN_BLOCK_FRONT_DEVICE XEN_BLOCK_FRONT_DEVICE;
typedef struct _XEN_BLOCK_FRONT_IO XEN_BLOCK_FRONT_IO;
struct _XEN_BLOCK_FRONT_IO
{
XEN_BLOCK_FRONT_DEVICE *Dev;
UINT8 *Buffer;
UINTN Size;
UINTN Sector; ///< 512 bytes sector.
grant_ref_t GrantRef[BLKIF_MAX_SEGMENTS_PER_REQUEST];
INT32 NumRef;
EFI_STATUS Status;
};
typedef struct
{
UINT64 Sectors;
UINT32 SectorSize;
UINT32 VDiskInfo;
BOOLEAN ReadWrite;
BOOLEAN CdRom;
BOOLEAN FeatureBarrier;
BOOLEAN FeatureFlushCache;
} XEN_BLOCK_FRONT_MEDIA_INFO;
#define XEN_BLOCK_FRONT_SIGNATURE SIGNATURE_32 ('X', 'p', 'v', 'B')
struct _XEN_BLOCK_FRONT_DEVICE {
UINT32 Signature;
EFI_BLOCK_IO_PROTOCOL BlockIo;
domid_t DomainId;
blkif_front_ring_t Ring;
grant_ref_t RingRef;
evtchn_port_t EventChannel;
blkif_vdev_t DeviceId;
CONST CHAR8 *NodeName;
XEN_BLOCK_FRONT_MEDIA_INFO MediaInfo;
VOID *StateWatchToken;
XENBUS_PROTOCOL *XenBusIo;
};
#define XEN_BLOCK_FRONT_FROM_BLOCK_IO(b) \
CR (b, XEN_BLOCK_FRONT_DEVICE, BlockIo, XEN_BLOCK_FRONT_SIGNATURE)
EFI_STATUS
XenPvBlockFrontInitialization (
IN XENBUS_PROTOCOL *XenBusIo,
IN CONST CHAR8 *NodeName,
OUT XEN_BLOCK_FRONT_DEVICE **DevPtr
);
VOID
XenPvBlockFrontShutdown (
IN XEN_BLOCK_FRONT_DEVICE *Dev
);
VOID
XenPvBlockAsyncIo (
IN OUT XEN_BLOCK_FRONT_IO *IoData,
IN BOOLEAN IsWrite
);
EFI_STATUS
XenPvBlockIo (
IN OUT XEN_BLOCK_FRONT_IO *IoData,
IN BOOLEAN IsWrite
);
VOID
XenPvBlockAsyncIoPoll (
IN XEN_BLOCK_FRONT_DEVICE *Dev
);
VOID
XenPvBlockSync (
IN XEN_BLOCK_FRONT_DEVICE *Dev
);

View File

@ -20,6 +20,8 @@
#include "XenPvBlkDxe.h"
#include "BlockFront.h"
///
/// Driver Binding Protocol instance
@ -258,6 +260,7 @@ XenPvBlkDxeDriverBindingStart (
{
EFI_STATUS Status;
XENBUS_PROTOCOL *XenBusIo;
XEN_BLOCK_FRONT_DEVICE *Dev;
Status = gBS->OpenProtocol (
ControllerHandle,
@ -271,7 +274,17 @@ XenPvBlkDxeDriverBindingStart (
return Status;
}
Status = XenPvBlockFrontInitialization (XenBusIo, XenBusIo->Node, &Dev);
if (EFI_ERROR (Status)) {
goto CloseProtocol;
}
return EFI_SUCCESS;
CloseProtocol:
gBS->CloseProtocol (ControllerHandle, &gXenBusProtocolGuid,
This->DriverBindingHandle, ControllerHandle);
return Status;
}
/**

View File

@ -33,6 +33,8 @@
XenPvBlkDxe.c
ComponentName.c
ComponentName.h
BlockFront.c
BlockFront.h
[LibraryClasses]