OvmfPkg/VirtioGpuDxe: map virtio GPU command objects to device addresses

Every virtio GPU command used by VirtioGpuDxe is synchronous and formatted
as a two-descriptor chain: request, response. The internal workhorse
function that all the command-specific functions call for such messaging
is VirtioGpuSendCommand().

In VirtioGpuSendCommand(), map the request from system memory to bus
master device address for BusMasterRead operation, and map the response
from system memory to bus master device address for BusMasterWrite
operation.

Pass the bus master device addresses to VirtioAppendDesc(). (See also
commit 4b725858de68, "OvmfPkg/VirtioLib: change the parameter of
VirtioAppendDesc() to UINT64", 2017-08-23.)

Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Tested-by: Brijesh Singh <brijesh.singh@amd.com>
This commit is contained in:
Laszlo Ersek 2017-08-26 17:34:51 +02:00
parent 9bc5026c19
commit 067b648332

View File

@ -278,7 +278,8 @@ VirtioGpuExitBoot (
code has been logged on the EFI_D_ERROR level. code has been logged on the EFI_D_ERROR level.
@return Codes for unexpected errors in VirtIo @return Codes for unexpected errors in VirtIo
messaging. messaging, or request/response
mapping/unmapping.
**/ **/
STATIC STATIC
EFI_STATUS EFI_STATUS
@ -294,6 +295,10 @@ VirtioGpuSendCommand (
volatile VIRTIO_GPU_CONTROL_HEADER Response; volatile VIRTIO_GPU_CONTROL_HEADER Response;
EFI_STATUS Status; EFI_STATUS Status;
UINT32 ResponseSize; UINT32 ResponseSize;
EFI_PHYSICAL_ADDRESS RequestDeviceAddress;
VOID *RequestMap;
EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;
VOID *ResponseMap;
// //
// Initialize Header. // Initialize Header.
@ -312,20 +317,80 @@ VirtioGpuSendCommand (
ASSERT (RequestSize >= sizeof *Header); ASSERT (RequestSize >= sizeof *Header);
ASSERT (RequestSize <= MAX_UINT32); ASSERT (RequestSize <= MAX_UINT32);
//
// Map request and response to bus master device addresses.
//
Status = VirtioMapAllBytesInSharedBuffer (
VgpuDev->VirtIo,
VirtioOperationBusMasterRead,
(VOID *)Header,
RequestSize,
&RequestDeviceAddress,
&RequestMap
);
if (EFI_ERROR (Status)) {
return Status;
}
Status = VirtioMapAllBytesInSharedBuffer (
VgpuDev->VirtIo,
VirtioOperationBusMasterWrite,
(VOID *)&Response,
sizeof Response,
&ResponseDeviceAddress,
&ResponseMap
);
if (EFI_ERROR (Status)) {
goto UnmapRequest;
}
// //
// Compose the descriptor chain. // Compose the descriptor chain.
// //
VirtioPrepare (&VgpuDev->Ring, &Indices); VirtioPrepare (&VgpuDev->Ring, &Indices);
VirtioAppendDesc (&VgpuDev->Ring, (UINTN)Header, (UINT32)RequestSize, VirtioAppendDesc (
VRING_DESC_F_NEXT, &Indices); &VgpuDev->Ring,
VirtioAppendDesc (&VgpuDev->Ring, (UINTN)&Response, sizeof Response, RequestDeviceAddress,
VRING_DESC_F_WRITE, &Indices); (UINT32)RequestSize,
VRING_DESC_F_NEXT,
&Indices
);
VirtioAppendDesc (
&VgpuDev->Ring,
ResponseDeviceAddress,
(UINT32)sizeof Response,
VRING_DESC_F_WRITE,
&Indices
);
// //
// Send the command. // Send the command.
// //
Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE, Status = VirtioFlush (VgpuDev->VirtIo, VIRTIO_GPU_CONTROL_QUEUE,
&VgpuDev->Ring, &Indices, &ResponseSize); &VgpuDev->Ring, &Indices, &ResponseSize);
if (EFI_ERROR (Status)) {
goto UnmapResponse;
}
//
// Verify response size.
//
if (ResponseSize != sizeof Response) {
DEBUG ((EFI_D_ERROR, "%a: malformed response to Request=0x%x\n",
__FUNCTION__, (UINT32)RequestType));
Status = EFI_PROTOCOL_ERROR;
goto UnmapResponse;
}
//
// Unmap response and request, in reverse order of mapping. On error, the
// respective mapping is invalidated anyway, only the data may not have been
// committed to system memory (in case of VirtioOperationBusMasterWrite).
//
Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
if (EFI_ERROR (Status)) {
goto UnmapRequest;
}
Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
if (EFI_ERROR (Status)) { if (EFI_ERROR (Status)) {
return Status; return Status;
} }
@ -333,12 +398,6 @@ VirtioGpuSendCommand (
// //
// Parse the response. // Parse the response.
// //
if (ResponseSize != sizeof Response) {
DEBUG ((EFI_D_ERROR, "%a: malformed response to Request=0x%x\n",
__FUNCTION__, (UINT32)RequestType));
return EFI_PROTOCOL_ERROR;
}
if (Response.Type == VirtioGpuRespOkNodata) { if (Response.Type == VirtioGpuRespOkNodata) {
return EFI_SUCCESS; return EFI_SUCCESS;
} }
@ -346,6 +405,14 @@ VirtioGpuSendCommand (
DEBUG ((EFI_D_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__, DEBUG ((EFI_D_ERROR, "%a: Request=0x%x Response=0x%x\n", __FUNCTION__,
(UINT32)RequestType, Response.Type)); (UINT32)RequestType, Response.Type));
return EFI_DEVICE_ERROR; return EFI_DEVICE_ERROR;
UnmapResponse:
VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
UnmapRequest:
VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
return Status;
} }
/** /**