OvmfPkg: Apply uncrustify changes

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=3737

Apply uncrustify changes to .c/.h files in the OvmfPkg package

Cc: Andrew Fish <afish@apple.com>
Cc: Leif Lindholm <leif@nuviainc.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Signed-off-by: Michael Kubacki <michael.kubacki@microsoft.com>
Reviewed-by: Andrew Fish <afish@apple.com>
This commit is contained in:
Michael Kubacki
2021-12-05 14:54:09 -08:00
committed by mergify[bot]
parent d1050b9dff
commit ac0a286f4d
445 changed files with 30894 additions and 26369 deletions

View File

@@ -152,9 +152,9 @@
* at Documentation/devicetree/bindings/arm/xen.txt.
*/
#define XEN_HYPERCALL_TAG 0xEA1
#define XEN_HYPERCALL_TAG 0xEA1
#define uint64_aligned_t UINT64 __attribute__((aligned(8)))
#define uint64_aligned_t UINT64 __attribute__((aligned(8)))
#ifndef __ASSEMBLY__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
@@ -173,134 +173,137 @@
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
/* this is going to be changed on 64 bit */
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define set_xen_guest_handle_raw(hnd, val) \
do { \
typeof(&(hnd)) _sxghr_tmp = &(hnd); \
_sxghr_tmp->q = 0; \
_sxghr_tmp->p = val; \
} while ( 0 )
#ifdef __XEN_TOOLS__
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
#if defined (__GNUC__) && !defined (__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
# define __DECL_REG(n64, n32) union { \
#define __DECL_REG(n64, n32) union { \
UINT64 n64; \
UINT32 n32; \
}
#else
#else
/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
#define __DECL_REG(n64, n32) UINT64 n64
#endif
#define __DECL_REG(n64, n32) UINT64 n64
#endif
struct vcpu_guest_core_regs
{
/* Aarch64 Aarch32 */
__DECL_REG(x0, r0_usr);
__DECL_REG(x1, r1_usr);
__DECL_REG(x2, r2_usr);
__DECL_REG(x3, r3_usr);
__DECL_REG(x4, r4_usr);
__DECL_REG(x5, r5_usr);
__DECL_REG(x6, r6_usr);
__DECL_REG(x7, r7_usr);
__DECL_REG(x8, r8_usr);
__DECL_REG(x9, r9_usr);
__DECL_REG(x10, r10_usr);
__DECL_REG(x11, r11_usr);
__DECL_REG(x12, r12_usr);
struct vcpu_guest_core_regs {
/* Aarch64 Aarch32 */
__DECL_REG (x0, r0_usr);
__DECL_REG (x1, r1_usr);
__DECL_REG (x2, r2_usr);
__DECL_REG (x3, r3_usr);
__DECL_REG (x4, r4_usr);
__DECL_REG (x5, r5_usr);
__DECL_REG (x6, r6_usr);
__DECL_REG (x7, r7_usr);
__DECL_REG (x8, r8_usr);
__DECL_REG (x9, r9_usr);
__DECL_REG (x10, r10_usr);
__DECL_REG (x11, r11_usr);
__DECL_REG (x12, r12_usr);
__DECL_REG(x13, sp_usr);
__DECL_REG(x14, lr_usr);
__DECL_REG (x13, sp_usr);
__DECL_REG (x14, lr_usr);
__DECL_REG(x15, __unused_sp_hyp);
__DECL_REG (x15, __unused_sp_hyp);
__DECL_REG(x16, lr_irq);
__DECL_REG(x17, sp_irq);
__DECL_REG (x16, lr_irq);
__DECL_REG (x17, sp_irq);
__DECL_REG(x18, lr_svc);
__DECL_REG(x19, sp_svc);
__DECL_REG (x18, lr_svc);
__DECL_REG (x19, sp_svc);
__DECL_REG(x20, lr_abt);
__DECL_REG(x21, sp_abt);
__DECL_REG (x20, lr_abt);
__DECL_REG (x21, sp_abt);
__DECL_REG(x22, lr_und);
__DECL_REG(x23, sp_und);
__DECL_REG (x22, lr_und);
__DECL_REG (x23, sp_und);
__DECL_REG(x24, r8_fiq);
__DECL_REG(x25, r9_fiq);
__DECL_REG(x26, r10_fiq);
__DECL_REG(x27, r11_fiq);
__DECL_REG(x28, r12_fiq);
__DECL_REG (x24, r8_fiq);
__DECL_REG (x25, r9_fiq);
__DECL_REG (x26, r10_fiq);
__DECL_REG (x27, r11_fiq);
__DECL_REG (x28, r12_fiq);
__DECL_REG(x29, sp_fiq);
__DECL_REG(x30, lr_fiq);
__DECL_REG (x29, sp_fiq);
__DECL_REG (x30, lr_fiq);
/* Return address and mode */
__DECL_REG(pc64, pc32); /* ELR_EL2 */
UINT32 cpsr; /* SPSR_EL2 */
/* Return address and mode */
__DECL_REG (pc64, pc32); /* ELR_EL2 */
UINT32 cpsr; /* SPSR_EL2 */
union {
UINT32 spsr_el1; /* AArch64 */
UINT32 spsr_svc; /* AArch32 */
};
union {
UINT32 spsr_el1; /* AArch64 */
UINT32 spsr_svc; /* AArch32 */
};
/* AArch32 guests only */
UINT32 spsr_fiq, spsr_irq, spsr_und, spsr_abt;
/* AArch32 guests only */
UINT32 spsr_fiq, spsr_irq, spsr_und, spsr_abt;
/* AArch64 guests only */
UINT64 sp_el0;
UINT64 sp_el1, elr_el1;
/* AArch64 guests only */
UINT64 sp_el0;
UINT64 sp_el1, elr_el1;
};
typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
#undef __DECL_REG
typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
DEFINE_XEN_GUEST_HANDLE (vcpu_guest_core_regs_t);
#undef __DECL_REG
typedef UINT64 xen_pfn_t;
#define PRI_xen_pfn PRIx64
#define PRI_xen_pfn PRIx64
/* Maximum number of virtual CPUs in legacy multi-processor guests. */
/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
#define XEN_LEGACY_MAX_VCPUS 1
#define XEN_LEGACY_MAX_VCPUS 1
typedef UINT64 xen_ulong_t;
#define PRI_xen_ulong PRIx64
#define PRI_xen_ulong PRIx64
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#if defined (__XEN__) || defined (__XEN_TOOLS__)
struct vcpu_guest_context {
#define _VGCF_online 0
#define VGCF_online (1<<_VGCF_online)
UINT32 flags; /* VGCF_* */
#define _VGCF_online 0
#define VGCF_online (1<<_VGCF_online)
UINT32 flags; /* VGCF_* */
struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
UINT32 sctlr;
UINT64 ttbcr, ttbr0, ttbr1;
UINT32 sctlr;
UINT64 ttbcr, ttbr0, ttbr1;
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
#endif
DEFINE_XEN_GUEST_HANDLE (vcpu_guest_context_t);
#endif
struct arch_vcpu_info {
};
typedef struct arch_vcpu_info arch_vcpu_info_t;
struct arch_shared_info {
};
typedef struct arch_shared_info arch_shared_info_t;
typedef UINT64 xen_callback_t;
typedef UINT64 xen_callback_t;
#endif
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#if defined (__XEN__) || defined (__XEN_TOOLS__)
/* PSR bits (CPSR, SPSR)*/
@@ -314,30 +317,30 @@ typedef UINT64 xen_callback_t;
#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
/* 32 bit modes */
#define PSR_MODE_USR 0x10
#define PSR_MODE_FIQ 0x11
#define PSR_MODE_IRQ 0x12
#define PSR_MODE_SVC 0x13
#define PSR_MODE_MON 0x16
#define PSR_MODE_ABT 0x17
#define PSR_MODE_HYP 0x1a
#define PSR_MODE_UND 0x1b
#define PSR_MODE_SYS 0x1f
#define PSR_MODE_USR 0x10
#define PSR_MODE_FIQ 0x11
#define PSR_MODE_IRQ 0x12
#define PSR_MODE_SVC 0x13
#define PSR_MODE_MON 0x16
#define PSR_MODE_ABT 0x17
#define PSR_MODE_HYP 0x1a
#define PSR_MODE_UND 0x1b
#define PSR_MODE_SYS 0x1f
/* 64 bit modes */
#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
#define PSR_MODE_EL3h 0x0d
#define PSR_MODE_EL3t 0x0c
#define PSR_MODE_EL2h 0x09
#define PSR_MODE_EL2t 0x08
#define PSR_MODE_EL1h 0x05
#define PSR_MODE_EL1t 0x04
#define PSR_MODE_EL0t 0x00
#define PSR_MODE_BIT 0x10/* Set iff AArch32 */
#define PSR_MODE_EL3h 0x0d
#define PSR_MODE_EL3t 0x0c
#define PSR_MODE_EL2h 0x09
#define PSR_MODE_EL2t 0x08
#define PSR_MODE_EL1h 0x05
#define PSR_MODE_EL1t 0x04
#define PSR_MODE_EL0t 0x00
#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
#define SCTLR_GUEST_INIT 0x00c50078
#define SCTLR_GUEST_INIT 0x00c50078
/*
* Virtual machine platform (memory layout, interrupts)
@@ -354,56 +357,56 @@ typedef UINT64 xen_callback_t;
*/
/* vGIC v2 mappings */
#define GUEST_GICD_BASE 0x03001000ULL
#define GUEST_GICD_SIZE 0x00001000ULL
#define GUEST_GICC_BASE 0x03002000ULL
#define GUEST_GICC_SIZE 0x00000100ULL
#define GUEST_GICD_BASE 0x03001000ULL
#define GUEST_GICD_SIZE 0x00001000ULL
#define GUEST_GICC_BASE 0x03002000ULL
#define GUEST_GICC_SIZE 0x00000100ULL
/* vGIC v3 mappings */
#define GUEST_GICV3_GICD_BASE 0x03001000ULL
#define GUEST_GICV3_GICD_SIZE 0x00010000ULL
#define GUEST_GICV3_GICD_BASE 0x03001000ULL
#define GUEST_GICV3_GICD_SIZE 0x00010000ULL
#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL
#define GUEST_GICV3_RDIST_REGIONS 1
#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */
#define GUEST_GICV3_GICR0_SIZE 0x00100000ULL
#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */
#define GUEST_GICV3_GICR0_SIZE 0x00100000ULL
/* 16MB == 4096 pages reserved for guest to use as a region to map its
* grant table in.
*/
#define GUEST_GNTTAB_BASE 0x38000000ULL
#define GUEST_GNTTAB_SIZE 0x01000000ULL
#define GUEST_GNTTAB_BASE 0x38000000ULL
#define GUEST_GNTTAB_SIZE 0x01000000ULL
#define GUEST_MAGIC_BASE 0x39000000ULL
#define GUEST_MAGIC_SIZE 0x01000000ULL
#define GUEST_RAM_BANKS 2
#define GUEST_RAM_BANKS 2
#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
#define GUEST_RAM0_SIZE 0xc0000000ULL
#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
#define GUEST_RAM0_SIZE 0xc0000000ULL
#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
#define GUEST_RAM1_SIZE 0xfe00000000ULL
#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
#define GUEST_RAM1_SIZE 0xfe00000000ULL
#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
/* Largest amount of actual RAM, not including holes */
#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */
#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
/* Interrupts */
#define GUEST_TIMER_VIRT_PPI 27
#define GUEST_TIMER_PHYS_S_PPI 29
#define GUEST_TIMER_PHYS_NS_PPI 30
#define GUEST_EVTCHN_PPI 31
#define GUEST_TIMER_VIRT_PPI 27
#define GUEST_TIMER_PHYS_S_PPI 29
#define GUEST_TIMER_PHYS_NS_PPI 30
#define GUEST_EVTCHN_PPI 31
/* PSCI functions */
#define PSCI_cpu_suspend 0
#define PSCI_cpu_off 1
#define PSCI_cpu_on 2
#define PSCI_migrate 3
#define PSCI_cpu_suspend 0
#define PSCI_cpu_off 1
#define PSCI_cpu_on 2
#define PSCI_migrate 3
#endif

View File

@@ -84,7 +84,7 @@
* of the structure must check that memmap_entries is non-zero
* before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
* The values used in the type field of the memory map table entries are
@@ -107,37 +107,37 @@
* represent the layout described there using C types.
*/
struct hvm_start_info {
UINT32 magic; /* Contains the magic value 0x336ec578 */
UINT32 magic; /* Contains the magic value 0x336ec578 */
/* ("xEn3" with the 0x80 bit of the "E" set).*/
UINT32 version; /* Version of this structure. */
UINT32 flags; /* SIF_xxx flags. */
UINT32 nr_modules; /* Number of modules passed to the kernel. */
UINT64 modlist_paddr; /* Physical address of an array of */
UINT32 version; /* Version of this structure. */
UINT32 flags; /* SIF_xxx flags. */
UINT32 nr_modules; /* Number of modules passed to the kernel. */
UINT64 modlist_paddr; /* Physical address of an array of */
/* hvm_modlist_entry. */
UINT64 cmdline_paddr; /* Physical address of the command line. */
UINT64 rsdp_paddr; /* Physical address of the RSDP ACPI data */
UINT64 cmdline_paddr; /* Physical address of the command line. */
UINT64 rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
/* All following fields only present in version 1 and newer */
UINT64 memmap_paddr; /* Physical address of an array of */
/* All following fields only present in version 1 and newer */
UINT64 memmap_paddr; /* Physical address of an array of */
/* hvm_memmap_table_entry. */
UINT32 memmap_entries; /* Number of entries in the memmap table. */
UINT32 memmap_entries; /* Number of entries in the memmap table. */
/* Value will be zero if there is no memory */
/* map being provided. */
UINT32 reserved; /* Must be zero. */
UINT32 reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
UINT64 paddr; /* Physical address of the module. */
UINT64 size; /* Size of the module in bytes. */
UINT64 cmdline_paddr; /* Physical address of the command line. */
UINT64 reserved;
UINT64 paddr; /* Physical address of the module. */
UINT64 size; /* Size of the module in bytes. */
UINT64 cmdline_paddr; /* Physical address of the command line. */
UINT64 reserved;
};
struct hvm_memmap_table_entry {
UINT64 addr; /* Base address of the memory region */
UINT64 size; /* Size of the memory region in bytes */
UINT32 type; /* Mapping type */
UINT32 reserved; /* Must be zero for Version 1. */
UINT64 addr; /* Base address of the memory region */
UINT64 size; /* Size of the memory region in bytes */
UINT32 type; /* Mapping type */
UINT32 reserved; /* Must be zero for Version 1. */
};
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */

View File

@@ -23,9 +23,10 @@
#ifndef __ASSEMBLY__
struct arch_vcpu_info {
UINTN cr2;
UINTN pad[5]; /* sizeof(vcpu_info_t) == 64 */
UINTN cr2;
UINTN pad[5]; /* sizeof(vcpu_info_t) == 64 */
};
typedef struct arch_vcpu_info arch_vcpu_info_t;
#endif /* !__ASSEMBLY__ */

View File

@@ -23,9 +23,10 @@
#ifndef __ASSEMBLY__
struct arch_vcpu_info {
UINTN cr2;
UINTN pad; /* sizeof(vcpu_info_t) == 64 */
UINTN cr2;
UINTN pad; /* sizeof(vcpu_info_t) == 64 */
};
typedef struct arch_vcpu_info arch_vcpu_info_t;
#endif /* !__ASSEMBLY__ */

View File

@@ -14,13 +14,13 @@
#define __XEN_PUBLIC_ARCH_X86_XEN_H__
/* Structural guest handles introduced in 0x00030201. */
#if __XEN_INTERFACE_VERSION__ >= 0x00030201
#if __XEN_INTERFACE_VERSION__ >= 0x00030201
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#else
#else
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef type * __guest_handle_ ## name
#endif
#endif
/*
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
@@ -33,55 +33,56 @@
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(MDE_CPU_IA32)
#include "xen-x86_32.h"
#elif defined(MDE_CPU_X64)
#include "xen-x86_64.h"
#endif
#if defined (MDE_CPU_IA32)
#include "xen-x86_32.h"
#elif defined (MDE_CPU_X64)
#include "xen-x86_64.h"
#endif
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
typedef UINTN xen_pfn_t;
#define PRI_xen_pfn "lx"
#endif
#define PRI_xen_pfn "lx"
#endif
#define XEN_HAVE_PV_UPCALL_MASK 1
#define XEN_HAVE_PV_UPCALL_MASK 1
/* Maximum number of virtual CPUs in legacy multi-processor guests. */
#define XEN_LEGACY_MAX_VCPUS 32
#define XEN_LEGACY_MAX_VCPUS 32
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
typedef UINTN xen_ulong_t;
#define PRI_xen_ulong "lx"
#define PRI_xen_ulong "lx"
typedef UINT64 tsc_timestamp_t; /* RDTSC timestamp */
#ifdef MDE_CPU_IA32
#pragma pack(4)
#endif
#ifdef MDE_CPU_IA32
#pragma pack(4)
#endif
struct arch_shared_info {
UINTN max_pfn; /* max pfn that appears in table */
/* Frame containing list of mfns containing list of mfns containing p2m. */
xen_pfn_t pfn_to_mfn_frame_list_list;
UINTN nmi_reason;
UINT64 pad[32];
UINTN max_pfn; /* max pfn that appears in table */
/* Frame containing list of mfns containing list of mfns containing p2m. */
xen_pfn_t pfn_to_mfn_frame_list_list;
UINTN nmi_reason;
UINT64 pad[32];
};
typedef struct arch_shared_info arch_shared_info_t;
#ifdef MDE_CPU_IA32
#pragma pack()
#endif
#endif /* !__ASSEMBLY__ */
typedef struct arch_shared_info arch_shared_info_t;
#ifdef MDE_CPU_IA32
#pragma pack()
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */

View File

@@ -44,13 +44,13 @@
*/
/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */
#define EVTCHNOP_close 3
#define EVTCHNOP_send 4
#define EVTCHNOP_alloc_unbound 6
#define EVTCHNOP_close 3
#define EVTCHNOP_send 4
#define EVTCHNOP_alloc_unbound 6
/* ` } */
typedef UINT32 evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
DEFINE_XEN_GUEST_HANDLE (evtchn_port_t);
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
@@ -61,11 +61,12 @@ DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
* 2. <rdom> may be DOMID_SELF, allowing loopback connections.
*/
struct evtchn_alloc_unbound {
/* IN parameters */
domid_t dom, remote_dom;
/* OUT parameters */
evtchn_port_t port;
/* IN parameters */
domid_t dom, remote_dom;
/* OUT parameters */
evtchn_port_t port;
};
typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
/*
@@ -74,9 +75,10 @@ typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
* (EVTCHNSTAT_unbound), awaiting a new connection.
*/
struct evtchn_close {
/* IN parameters. */
evtchn_port_t port;
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_close evtchn_close_t;
/*
@@ -84,9 +86,10 @@ typedef struct evtchn_close evtchn_close_t;
* endpoint is <port>.
*/
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_send evtchn_send_t;
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */

View File

@@ -109,28 +109,30 @@ typedef UINT32 grant_ref_t;
* for backwards compatibility. New guests should use version 2.
*/
#if __XEN_INTERFACE_VERSION__ < 0x0003020a
#define grant_entry_v1 grant_entry
#define grant_entry_v1_t grant_entry_t
#define grant_entry_v1 grant_entry
#define grant_entry_v1_t grant_entry_t
#endif
struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
UINT16 flags;
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
* GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
* GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
*/
UINT32 frame;
/* GTF_xxx: various type and flag information. [XEN,GST] */
UINT16 flags;
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
* GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
* GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
*/
UINT32 frame;
};
typedef struct grant_entry_v1 grant_entry_v1_t;
/* The first few grant table entries will be preserved across grant table
* version changes and may be pre-populated at domain creation by tools.
*/
#define GNTTAB_NR_RESERVED_ENTRIES 8
#define GNTTAB_RESERVED_CONSOLE 0
#define GNTTAB_RESERVED_XENSTORE 1
#define GNTTAB_NR_RESERVED_ENTRIES 8
#define GNTTAB_RESERVED_CONSOLE 0
#define GNTTAB_RESERVED_XENSTORE 1
/*
* Type of grant entry.
@@ -141,11 +143,11 @@ typedef struct grant_entry_v1 grant_entry_v1_t;
* GTF_transitive: Allow @domid to transitively access a subrange of
* @trans_grant in @trans_domid. No mappings are allowed.
*/
#define GTF_invalid (0U<<0)
#define GTF_permit_access (1U<<0)
#define GTF_accept_transfer (2U<<0)
#define GTF_transitive (3U<<0)
#define GTF_type_mask (3U<<0)
#define GTF_invalid (0U<<0)
#define GTF_permit_access (1U<<0)
#define GTF_accept_transfer (2U<<0)
#define GTF_transitive (3U<<0)
#define GTF_type_mask (3U<<0)
/*
* Subflags for GTF_permit_access.
@@ -157,20 +159,20 @@ typedef struct grant_entry_v1 grant_entry_v1_t;
* will only be allowed to copy from the grant, and not
* map it. [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
#define _GTF_reading (3)
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
#define _GTF_PWT (5)
#define GTF_PWT (1U<<_GTF_PWT)
#define _GTF_PCD (6)
#define GTF_PCD (1U<<_GTF_PCD)
#define _GTF_PAT (7)
#define GTF_PAT (1U<<_GTF_PAT)
#define _GTF_sub_page (8)
#define GTF_sub_page (1U<<_GTF_sub_page)
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
#define _GTF_reading (3)
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
#define _GTF_PWT (5)
#define GTF_PWT (1U<<_GTF_PWT)
#define _GTF_PCD (6)
#define GTF_PCD (1U<<_GTF_PCD)
#define _GTF_PAT (7)
#define GTF_PAT (1U<<_GTF_PAT)
#define _GTF_sub_page (8)
#define GTF_sub_page (1U<<_GTF_sub_page)
/*
* Subflags for GTF_accept_transfer:
@@ -182,10 +184,10 @@ typedef struct grant_entry_v1 grant_entry_v1_t;
* after reading GTF_transfer_committed. Xen will always write the frame
* address, followed by ORing this flag, in a timely manner.
*/
#define _GTF_transfer_committed (2)
#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
#define _GTF_transfer_completed (3)
#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
#define _GTF_transfer_committed (2)
#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
#define _GTF_transfer_completed (3)
#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
/*
* Version 2 grant table entries. These fulfil the same role as
@@ -197,68 +199,71 @@ typedef struct grant_entry_v1 grant_entry_v1_t;
* on the grant table version in use by the other domain.
*/
#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
/*
* Version 1 and version 2 grant entries share a common prefix. The
* fields of the prefix are documented as part of struct
* grant_entry_v1.
*/
struct grant_entry_header {
UINT16 flags;
domid_t domid;
UINT16 flags;
domid_t domid;
};
typedef struct grant_entry_header grant_entry_header_t;
/*
* Version 2 of the grant entry structure.
*/
union grant_entry_v2 {
grant_entry_header_t hdr;
grant_entry_header_t hdr;
/*
* This member is used for V1-style full page grants, where either:
*
* -- hdr.type is GTF_accept_transfer, or
* -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
*
* In that case, the frame field has the same semantics as the
* field of the same name in the V1 entry structure.
*/
struct {
grant_entry_header_t hdr;
UINT32 pad0;
UINT64 frame;
} full_page;
/*
* This member is used for V1-style full page grants, where either:
*
* -- hdr.type is GTF_accept_transfer, or
* -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
*
* In that case, the frame field has the same semantics as the
* field of the same name in the V1 entry structure.
*/
struct {
grant_entry_header_t hdr;
UINT32 pad0;
UINT64 frame;
} full_page;
/*
* If the grant type is GTF_grant_access and GTF_sub_page is set,
* @domid is allowed to access bytes [@page_off,@page_off+@length)
* in frame @frame.
*/
struct {
grant_entry_header_t hdr;
UINT16 page_off;
UINT16 length;
UINT64 frame;
} sub_page;
/*
* If the grant type is GTF_grant_access and GTF_sub_page is set,
* @domid is allowed to access bytes [@page_off,@page_off+@length)
* in frame @frame.
*/
struct {
grant_entry_header_t hdr;
UINT16 page_off;
UINT16 length;
UINT64 frame;
} sub_page;
/*
* If the grant is GTF_transitive, @domid is allowed to use the
* grant @gref in domain @trans_domid, as if it was the local
* domain. Obviously, the transitive access must be compatible
* with the original grant.
*
* The current version of Xen does not allow transitive grants
* to be mapped.
*/
struct {
grant_entry_header_t hdr;
domid_t trans_domid;
UINT16 pad0;
grant_ref_t gref;
} transitive;
/*
* If the grant is GTF_transitive, @domid is allowed to use the
* grant @gref in domain @trans_domid, as if it was the local
* domain. Obviously, the transitive access must be compatible
* with the original grant.
*
* The current version of Xen does not allow transitive grants
* to be mapped.
*/
struct {
grant_entry_header_t hdr;
domid_t trans_domid;
UINT16 pad0;
grant_ref_t gref;
} transitive;
UINT32 __spacer[4]; /* Pad to a power of two */
UINT32 __spacer[4]; /* Pad to a power of two */
};
typedef union grant_entry_v2 grant_entry_v2_t;
typedef UINT16 grant_status_t;
@@ -280,8 +285,8 @@ typedef UINT16 grant_status_t;
*/
/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */
#define GNTTABOP_map_grant_ref 0
#define GNTTABOP_unmap_grant_ref 1
#define GNTTABOP_map_grant_ref 0
#define GNTTABOP_unmap_grant_ref 1
/* ` } */
/*
@@ -307,18 +312,19 @@ typedef UINT32 grant_handle_t;
* to be accounted to the correct grant reference!
*/
struct gnttab_map_grant_ref {
/* IN parameters. */
UINT64 host_addr;
UINT32 flags; /* GNTMAP_* */
grant_ref_t ref;
domid_t dom;
/* OUT parameters. */
INT16 status; /* => enum grant_status */
grant_handle_t handle;
UINT64 dev_bus_addr;
/* IN parameters. */
UINT64 host_addr;
UINT32 flags; /* GNTMAP_* */
grant_ref_t ref;
domid_t dom;
/* OUT parameters. */
INT16 status; /* => enum grant_status */
grant_handle_t handle;
UINT64 dev_bus_addr;
};
typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
DEFINE_XEN_GUEST_HANDLE (gnttab_map_grant_ref_t);
/*
* GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
@@ -332,74 +338,76 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
* mappings will remain in the device or host TLBs.
*/
struct gnttab_unmap_grant_ref {
/* IN parameters. */
UINT64 host_addr;
UINT64 dev_bus_addr;
grant_handle_t handle;
/* OUT parameters. */
INT16 status; /* => enum grant_status */
/* IN parameters. */
UINT64 host_addr;
UINT64 dev_bus_addr;
grant_handle_t handle;
/* OUT parameters. */
INT16 status; /* => enum grant_status */
};
typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
DEFINE_XEN_GUEST_HANDLE (gnttab_unmap_grant_ref_t);
/*
* Bitfield values for gnttab_map_grant_ref.flags.
*/
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
#define GNTMAP_device_map (1<<_GNTMAP_device_map)
/* Map the grant entry for access by host CPUs. */
#define _GNTMAP_host_map (1)
#define GNTMAP_host_map (1<<_GNTMAP_host_map)
/* Accesses to the granted frame will be restricted to read-only access. */
#define _GNTMAP_readonly (2)
#define GNTMAP_readonly (1<<_GNTMAP_readonly)
/*
* GNTMAP_host_map subflag:
* 0 => The host mapping is usable only by the guest OS.
* 1 => The host mapping is usable by guest OS + current application.
*/
#define _GNTMAP_application_map (3)
#define GNTMAP_application_map (1<<_GNTMAP_application_map)
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
#define GNTMAP_device_map (1<<_GNTMAP_device_map)
/* Map the grant entry for access by host CPUs. */
#define _GNTMAP_host_map (1)
#define GNTMAP_host_map (1<<_GNTMAP_host_map)
/* Accesses to the granted frame will be restricted to read-only access. */
#define _GNTMAP_readonly (2)
#define GNTMAP_readonly (1<<_GNTMAP_readonly)
/*
* GNTMAP_contains_pte subflag:
* 0 => This map request contains a host virtual address.
* 1 => This map request contains the machine address of the PTE to update.
*/
#define _GNTMAP_contains_pte (4)
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
/*
* GNTMAP_host_map subflag:
* 0 => The host mapping is usable only by the guest OS.
* 1 => The host mapping is usable by guest OS + current application.
*/
#define _GNTMAP_application_map (3)
#define GNTMAP_application_map (1<<_GNTMAP_application_map)
#define _GNTMAP_can_fail (5)
#define GNTMAP_can_fail (1<<_GNTMAP_can_fail)
/*
* GNTMAP_contains_pte subflag:
* 0 => This map request contains a host virtual address.
* 1 => This map request contains the machine address of the PTE to update.
*/
#define _GNTMAP_contains_pte (4)
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
#define _GNTMAP_can_fail (5)
#define GNTMAP_can_fail (1<<_GNTMAP_can_fail)
/*
* Bits to be placed in guest kernel available PTE bits (architecture
* dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
*/
#define _GNTMAP_guest_avail0 (16)
#define GNTMAP_guest_avail_mask ((UINT32)~0 << _GNTMAP_guest_avail0)
#define _GNTMAP_guest_avail0 (16)
#define GNTMAP_guest_avail_mask ((UINT32)~0 << _GNTMAP_guest_avail0)
/*
* Values for error status returns. All errors are -ve.
*/
/* ` enum grant_status { */
#define GNTST_okay (0) /* Normal return. */
#define GNTST_general_error (-1) /* General undefined error. */
#define GNTST_bad_domain (-2) /* Unrecognised domain id. */
#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTST_eagain (-12) /* Operation not done; try again. */
#define GNTST_okay (0) /* Normal return. */
#define GNTST_general_error (-1) /* General undefined error. */
#define GNTST_bad_domain (-2) /* Unrecognised domain id. */
#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTST_eagain (-12) /* Operation not done; try again. */
/* ` } */
#define GNTTABOP_error_msgs { \
#define GNTTABOP_error_msgs { \
"okay", \
"undefined error", \
"unrecognised domain id", \

View File

@@ -8,14 +8,15 @@
#include "../xen.h"
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
#define HVMOP_get_param 1
#define HVMOP_set_param 0
#define HVMOP_get_param 1
struct xen_hvm_param {
domid_t domid; /* IN */
UINT32 index; /* IN */
UINT64 value; /* IN/OUT */
domid_t domid; /* IN */
UINT32 index; /* IN */
UINT64 value; /* IN/OUT */
};
typedef struct xen_hvm_param xen_hvm_param_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
DEFINE_XEN_GUEST_HANDLE (xen_hvm_param_t);
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

View File

@@ -22,26 +22,26 @@
* method is available.
* If val == 0 then CPU0 event-channel notifications are not delivered.
*/
#define HVM_PARAM_CALLBACK_IRQ 0
#define HVM_PARAM_CALLBACK_IRQ 0
/*
* These are not used by Xen. They are here for convenience of HVM-guest
* xenbus implementations.
*/
#define HVM_PARAM_STORE_PFN 1
#define HVM_PARAM_STORE_EVTCHN 2
#define HVM_PARAM_STORE_PFN 1
#define HVM_PARAM_STORE_EVTCHN 2
#define HVM_PARAM_PAE_ENABLED 4
#define HVM_PARAM_IOREQ_PFN 5
#define HVM_PARAM_IOREQ_PFN 5
#define HVM_PARAM_BUFIOREQ_PFN 6
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#define HVM_PARAM_BUFIOREQ_PFN 6
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#if defined(MDE_CPU_IA32) || defined(MDE_CPU_X64)
#if defined (MDE_CPU_IA32) || defined (MDE_CPU_X64)
/* Expose Viridian interfaces to this HVM guest? */
#define HVM_PARAM_VIRIDIAN 9
#define HVM_PARAM_VIRIDIAN 9
#endif
@@ -64,33 +64,33 @@
* Missed interrupts are collapsed together and delivered as one 'late tick'.
* Guest time always tracks wallclock (i.e., real) time.
*/
#define HVM_PARAM_TIMER_MODE 10
#define HVMPTM_delay_for_missed_ticks 0
#define HVMPTM_no_delay_for_missed_ticks 1
#define HVMPTM_no_missed_ticks_pending 2
#define HVMPTM_one_missed_tick_pending 3
#define HVM_PARAM_TIMER_MODE 10
#define HVMPTM_delay_for_missed_ticks 0
#define HVMPTM_no_delay_for_missed_ticks 1
#define HVMPTM_no_missed_ticks_pending 2
#define HVMPTM_one_missed_tick_pending 3
/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
#define HVM_PARAM_HPET_ENABLED 11
#define HVM_PARAM_HPET_ENABLED 11
/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
#define HVM_PARAM_IDENT_PT 12
#define HVM_PARAM_IDENT_PT 12
/* Device Model domain, defaults to 0. */
#define HVM_PARAM_DM_DOMAIN 13
#define HVM_PARAM_DM_DOMAIN 13
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14
#define HVM_PARAM_ACPI_S_STATE 14
/* TSS used on Intel when CR0.PE=0. */
#define HVM_PARAM_VM86_TSS 15
#define HVM_PARAM_VM86_TSS 15
/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
#define HVM_PARAM_VPT_ALIGN 16
#define HVM_PARAM_VPT_ALIGN 16
/* Console debug shared memory ring and event channel */
#define HVM_PARAM_CONSOLE_PFN 17
#define HVM_PARAM_CONSOLE_EVTCHN 18
#define HVM_PARAM_CONSOLE_PFN 17
#define HVM_PARAM_CONSOLE_EVTCHN 18
/*
* Select location of ACPI PM1a and TMR control blocks. Currently two locations
@@ -101,7 +101,7 @@
* PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
* You can find these address definitions in <hvm/ioreq.h>
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
/* Enable blocking memory events, async or sync (pause vcpu until response)
* onchangeonly indicates messages only on a change of value */
@@ -112,14 +112,14 @@
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVM_PARAM_MEMORY_EVENT_MSR 30
#define HVMPME_MODE_MASK (3 << 0)
#define HVMPME_mode_disabled 0
#define HVMPME_mode_async 1
#define HVMPME_mode_sync 2
#define HVMPME_onchangeonly (1 << 2)
#define HVMPME_MODE_MASK (3 << 0)
#define HVMPME_mode_disabled 0
#define HVMPME_mode_async 1
#define HVMPME_mode_sync 2
#define HVMPME_onchangeonly (1 << 2)
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
#define HVM_PARAM_NESTEDHVM 24
/* Params for the mem event rings */
#define HVM_PARAM_PAGING_RING_PFN 27
@@ -127,8 +127,8 @@
#define HVM_PARAM_SHARING_RING_PFN 29
/* SHUTDOWN_* action in case of a triple fault */
#define HVM_PARAM_TRIPLE_FAULT_REASON 31
#define HVM_PARAM_TRIPLE_FAULT_REASON 31
#define HVM_NR_PARAMS 32
#define HVM_NR_PARAMS 32
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */

View File

@@ -28,9 +28,9 @@
*/
#ifndef blkif_vdev_t
#define blkif_vdev_t UINT16
#define blkif_vdev_t UINT16
#endif
#define blkif_sector_t UINT64
#define blkif_sector_t UINT64
/*
* Feature and Parameter Negotiation
@@ -415,8 +415,9 @@
/*
* REQUEST CODES.
*/
#define BLKIF_OP_READ 0
#define BLKIF_OP_WRITE 1
#define BLKIF_OP_READ 0
#define BLKIF_OP_WRITE 1
/*
* All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
* operation code ("barrier request") must be completed prior to the
@@ -425,19 +426,22 @@
*
* Optional. See "feature-barrier" XenBus node documentation above.
*/
#define BLKIF_OP_WRITE_BARRIER 2
#define BLKIF_OP_WRITE_BARRIER 2
/*
* Commit any uncommitted contents of the backing device's volatile cache
* to stable storage.
*
* Optional. See "feature-flush-cache" XenBus node documentation above.
*/
#define BLKIF_OP_FLUSH_DISKCACHE 3
#define BLKIF_OP_FLUSH_DISKCACHE 3
/*
* Used in SLES sources for device specific command packet
* contained within the request. Reserved for that purpose.
*/
#define BLKIF_OP_RESERVED_1 4
#define BLKIF_OP_RESERVED_1 4
/*
* Indicate to the backend device that a region of storage is no longer in
* use, and may be discarded at any time without impact to the client. If
@@ -458,7 +462,7 @@
* "discard-granularity", and "discard-secure" in the XenBus node
* documentation above.
*/
#define BLKIF_OP_DISCARD 5
#define BLKIF_OP_DISCARD 5
/*
* Recognized if "feature-max-indirect-segments" in present in the backend
@@ -482,19 +486,19 @@
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
*/
#define BLKIF_OP_INDIRECT 6
#define BLKIF_OP_INDIRECT 6
/*
* Maximum scatter/gather segments per request.
* This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
* NB. This could be 12 if the ring indexes weren't stored in the same page.
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
/*
* Maximum number of indirect pages to use per request.
*/
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
/*
* NB. first_sect and last_sect in blkif_request_segment, as well as
@@ -505,31 +509,32 @@
* 512-byte units.
*/
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */
UINT8 first_sect, last_sect;
grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */
UINT8 first_sect, last_sect;
};
/*
* Starting ring element for any I/O request.
*/
#if defined(MDE_CPU_IA32)
#if defined (MDE_CPU_IA32)
//
// pack(4) is necessary when these structs are compiled for Ia32.
// Without it, the struct will have a different alignment than the one
// a backend expect for a 32bit guest.
//
#pragma pack(4)
#pragma pack(4)
#endif
struct blkif_request {
UINT8 operation; /* BLKIF_OP_??? */
UINT8 nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
UINT8 operation; /* BLKIF_OP_??? */
UINT8 nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
typedef struct blkif_request blkif_request_t;
/*
@@ -537,58 +542,61 @@ typedef struct blkif_request blkif_request_t;
* sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
*/
struct blkif_request_discard {
UINT8 operation; /* BLKIF_OP_DISCARD */
UINT8 flag; /* BLKIF_DISCARD_SECURE or zero */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t handle; /* same as for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk */
UINT64 nr_sectors; /* number of contiguous sectors to discard*/
UINT8 operation; /* BLKIF_OP_DISCARD */
UINT8 flag; /* BLKIF_DISCARD_SECURE or zero */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t handle; /* same as for read/write requests */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number; /* start sector idx on disk */
UINT64 nr_sectors; /* number of contiguous sectors to discard*/
};
typedef struct blkif_request_discard blkif_request_discard_t;
struct blkif_request_indirect {
UINT8 operation; /* BLKIF_OP_INDIRECT */
UINT8 indirect_op; /* BLKIF_OP_{READ/WRITE} */
UINT16 nr_segments; /* number of segments */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
blkif_vdev_t handle; /* same as for read/write requests */
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
#ifdef MDE_CPU_IA32
UINT64 pad; /* Make it 64 byte aligned on i386 */
#endif
UINT8 operation; /* BLKIF_OP_INDIRECT */
UINT8 indirect_op; /* BLKIF_OP_{READ/WRITE} */
UINT16 nr_segments; /* number of segments */
UINT64 id; /* private guest value, echoed in resp */
blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
blkif_vdev_t handle; /* same as for read/write requests */
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
#ifdef MDE_CPU_IA32
UINT64 pad; /* Make it 64 byte aligned on i386 */
#endif
};
typedef struct blkif_request_indirect blkif_request_indirect_t;
struct blkif_response {
UINT64 id; /* copied from request */
UINT8 operation; /* copied from request */
INT16 status; /* BLKIF_RSP_??? */
UINT64 id; /* copied from request */
UINT8 operation; /* copied from request */
INT16 status; /* BLKIF_RSP_??? */
};
typedef struct blkif_response blkif_response_t;
#if defined(MDE_CPU_IA32)
#pragma pack()
#if defined (MDE_CPU_IA32)
#pragma pack()
#endif
/*
* STATUS RETURN CODES.
*/
/* Operation not supported (only happens on barrier writes). */
/* Operation not supported (only happens on barrier writes). */
#define BLKIF_RSP_EOPNOTSUPP -2
/* Operation failed for some unspecified reason (-EIO). */
#define BLKIF_RSP_ERROR -1
/* Operation completed successfully. */
#define BLKIF_RSP_OKAY 0
/* Operation failed for some unspecified reason (-EIO). */
#define BLKIF_RSP_ERROR -1
/* Operation completed successfully. */
#define BLKIF_RSP_OKAY 0
/*
* Generate blkif ring structures and types.
*/
DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
DEFINE_RING_TYPES (blkif, struct blkif_request, struct blkif_response);
#define VDISK_CDROM 0x1
#define VDISK_REMOVABLE 0x2
#define VDISK_READONLY 0x4
#define VDISK_CDROM 0x1
#define VDISK_REMOVABLE 0x2
#define VDISK_READONLY 0x4
#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */

View File

@@ -13,13 +13,13 @@
typedef UINT32 XENCONS_RING_IDX;
#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
struct xencons_interface {
char in[1024];
char out[2048];
XENCONS_RING_IDX in_cons, in_prod;
XENCONS_RING_IDX out_cons, out_prod;
char in[1024];
char out[2048];
XENCONS_RING_IDX in_cons, in_prod;
XENCONS_RING_IDX out_cons, out_prod;
};
#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */

View File

@@ -7,18 +7,18 @@
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
#if defined(MDE_CPU_IA32)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(MDE_CPU_X64)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__arm__) || defined(__aarch64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#if defined (MDE_CPU_IA32)
#define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined (MDE_CPU_X64)
#define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined (__arm__) || defined (__aarch64__)
#define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#else
# error arch fixup needed here
#error arch fixup needed here
#endif
#endif

View File

@@ -14,19 +14,19 @@
#include "../xen-compat.h"
#if __XEN_INTERFACE_VERSION__ < 0x00030208
#define xen_mb() mb()
#define xen_rmb() rmb()
#define xen_wmb() wmb()
#define xen_mb() mb()
#define xen_rmb() rmb()
#define xen_wmb() wmb()
#endif
typedef UINT32 RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
/*
* Calculate size of a shared ring, given the total available space for the
@@ -36,7 +36,8 @@ typedef UINT32 RING_IDX;
*/
#define __CONST_RING_SIZE(_s, _sz) \
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
sizeof(((struct _s##_sring *)0)->ring[0])))
sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
@@ -77,13 +78,13 @@ typedef UINT32 RING_IDX;
#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
\
/* Shared ring entry */ \
/* Shared ring entry */ \
union __name##_sring_entry { \
__req_t req; \
__rsp_t rsp; \
}; \
\
/* Shared ring page */ \
/* Shared ring page */ \
struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
@@ -100,7 +101,7 @@ struct __name##_sring { \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
\
/* "Front" end's private variables */ \
/* "Front" end's private variables */ \
struct __name##_front_ring { \
RING_IDX req_prod_pvt; \
RING_IDX rsp_cons; \
@@ -108,7 +109,7 @@ struct __name##_front_ring { \
struct __name##_sring *sring; \
}; \
\
/* "Back" end's private variables */ \
/* "Back" end's private variables */ \
struct __name##_back_ring { \
RING_IDX rsp_prod_pvt; \
RING_IDX req_cons; \
@@ -116,7 +117,7 @@ struct __name##_back_ring { \
struct __name##_sring *sring; \
}; \
\
/* Syntactic sugar */ \
/* Syntactic sugar */ \
typedef struct __name##_sring __name##_sring_t; \
typedef struct __name##_front_ring __name##_front_ring_t; \
typedef struct __name##_back_ring __name##_back_ring_t
@@ -137,21 +138,21 @@ typedef struct __name##_back_ring __name##_back_ring_t
*/
/* Initialising empty rings */
#define SHARED_RING_INIT(_s) do { \
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
(VOID)ZeroMem((_s)->private.pvt_pad, sizeof((_s)->private.pvt_pad)); \
(VOID)ZeroMem((_s)->__pad, sizeof((_s)->__pad)); \
} while(0)
#define FRONT_RING_INIT(_r, _s, __size) do { \
#define FRONT_RING_INIT(_r, _s, __size) do { \
(_r)->req_prod_pvt = 0; \
(_r)->rsp_cons = 0; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) do { \
#define BACK_RING_INIT(_r, _s, __size) do { \
(_r)->rsp_prod_pvt = 0; \
(_r)->req_cons = 0; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
@@ -177,7 +178,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
((_r)->sring->rsp_prod - (_r)->rsp_cons)
#ifdef __GNUC__
#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
UINT32 req = (_r)->sring->req_prod - (_r)->req_cons; \
UINT32 rsp = RING_SIZE(_r) - \
((_r)->req_cons - (_r)->rsp_prod_pvt); \
@@ -207,12 +208,12 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
#define RING_PUSH_REQUESTS(_r) do { \
#define RING_PUSH_REQUESTS(_r) do { \
xen_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
#define RING_PUSH_RESPONSES(_r) do { \
#define RING_PUSH_RESPONSES(_r) do { \
xen_wmb(); /* front sees resps /before/ updated producer index */ \
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
@@ -247,7 +248,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
* field appropriately.
*/
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->req_prod; \
RING_IDX __new = (_r)->req_prod_pvt; \
xen_wmb(); /* back sees requests /before/ updated producer index */ \
@@ -257,7 +258,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
(RING_IDX)(__new - __old)); \
} while (0)
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->rsp_prod; \
RING_IDX __new = (_r)->rsp_prod_pvt; \
xen_wmb(); /* front sees resps /before/ updated producer index */ \
@@ -267,7 +268,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
(RING_IDX)(__new - __old)); \
} while (0)
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
if (_work_to_do) break; \
(_r)->sring->req_event = (_r)->req_cons + 1; \
@@ -275,7 +276,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
} while (0)
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
if (_work_to_do) break; \
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \

View File

@@ -18,37 +18,38 @@
* layers.
*/
enum xenbus_state {
XenbusStateUnknown = 0,
XenbusStateUnknown = 0,
XenbusStateInitialising = 1,
XenbusStateInitialising = 1,
/*
* InitWait: Finished early initialisation but waiting for information
* from the peer or hotplug scripts.
*/
XenbusStateInitWait = 2,
/*
* InitWait: Finished early initialisation but waiting for information
* from the peer or hotplug scripts.
*/
XenbusStateInitWait = 2,
/*
* Initialised: Waiting for a connection from the peer.
*/
XenbusStateInitialised = 3,
/*
* Initialised: Waiting for a connection from the peer.
*/
XenbusStateInitialised = 3,
XenbusStateConnected = 4,
XenbusStateConnected = 4,
/*
* Closing: The device is being closed due to an error or an unplug event.
*/
XenbusStateClosing = 5,
/*
* Closing: The device is being closed due to an error or an unplug event.
*/
XenbusStateClosing = 5,
XenbusStateClosed = 6,
XenbusStateClosed = 6,
/*
* Reconfiguring: The device is being reconfigured.
*/
XenbusStateReconfiguring = 7,
/*
* Reconfiguring: The device is being reconfigured.
*/
XenbusStateReconfiguring = 7,
XenbusStateReconfigured = 8
XenbusStateReconfigured = 8
};
typedef enum xenbus_state XenbusState;
#endif /* _XEN_PUBLIC_IO_XENBUS_H */

View File

@@ -10,115 +10,112 @@
#ifndef _XS_WIRE_H
#define _XS_WIRE_H
enum xsd_sockmsg_type
{
XS_DEBUG,
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
XS_WATCH,
XS_UNWATCH,
XS_TRANSACTION_START,
XS_TRANSACTION_END,
XS_INTRODUCE,
XS_RELEASE,
XS_GET_DOMAIN_PATH,
XS_WRITE,
XS_MKDIR,
XS_RM,
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
XS_RESTRICT,
XS_RESET_WATCHES,
enum xsd_sockmsg_type {
XS_DEBUG,
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
XS_WATCH,
XS_UNWATCH,
XS_TRANSACTION_START,
XS_TRANSACTION_END,
XS_INTRODUCE,
XS_RELEASE,
XS_GET_DOMAIN_PATH,
XS_WRITE,
XS_MKDIR,
XS_RM,
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
XS_RESTRICT,
XS_RESET_WATCHES,
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
#define XS_WRITE_CREATE "CREATE"
#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
#define XS_WRITE_NONE "NONE"
#define XS_WRITE_CREATE "CREATE"
#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
/* We hand errors as strings, for portability. */
struct xsd_errors
{
INT32 errnum;
const CHAR8 *errstring;
struct xsd_errors {
INT32 errnum;
const CHAR8 *errstring;
};
#ifdef EINVAL
#define XSD_ERROR(x) { x, #x }
#define XSD_ERROR(x) { x, #x }
/* LINTED: static unused */
static struct xsd_errors xsd_errors[]
#if defined(__GNUC__)
__attribute__((unused))
#endif
= {
XSD_ERROR(EINVAL),
XSD_ERROR(EACCES),
XSD_ERROR(EEXIST),
XSD_ERROR(EISDIR),
XSD_ERROR(ENOENT),
XSD_ERROR(ENOMEM),
XSD_ERROR(ENOSPC),
XSD_ERROR(EIO),
XSD_ERROR(ENOTEMPTY),
XSD_ERROR(ENOSYS),
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
XSD_ERROR(EISCONN),
XSD_ERROR(E2BIG)
};
#if defined (__GNUC__)
__attribute__ ((unused))
#endif
= {
XSD_ERROR (EINVAL),
XSD_ERROR (EACCES),
XSD_ERROR (EEXIST),
XSD_ERROR (EISDIR),
XSD_ERROR (ENOENT),
XSD_ERROR (ENOMEM),
XSD_ERROR (ENOSPC),
XSD_ERROR (EIO),
XSD_ERROR (ENOTEMPTY),
XSD_ERROR (ENOSYS),
XSD_ERROR (EROFS),
XSD_ERROR (EBUSY),
XSD_ERROR (EAGAIN),
XSD_ERROR (EISCONN),
XSD_ERROR (E2BIG)
};
#endif
struct xsd_sockmsg
{
UINT32 type; /* XS_??? */
UINT32 req_id;/* Request identifier, echoed in daemon's response. */
UINT32 tx_id; /* Transaction id (0 if not related to a transaction). */
UINT32 len; /* Length of data following this. */
struct xsd_sockmsg {
UINT32 type; /* XS_??? */
UINT32 req_id; /* Request identifier, echoed in daemon's response. */
UINT32 tx_id; /* Transaction id (0 if not related to a transaction). */
UINT32 len; /* Length of data following this. */
/* Generally followed by nul-terminated string(s). */
/* Generally followed by nul-terminated string(s). */
};
enum xs_watch_type
{
XS_WATCH_PATH = 0,
XS_WATCH_TOKEN
enum xs_watch_type {
XS_WATCH_PATH = 0,
XS_WATCH_TOKEN
};
/*
* `incontents 150 xenstore_struct XenStore wire protocol.
*
* Inter-domain shared memory communications. */
#define XENSTORE_RING_SIZE 1024
#define XENSTORE_RING_SIZE 1024
typedef UINT32 XENSTORE_RING_IDX;
#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
struct xenstore_domain_interface {
CHAR8 req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
CHAR8 rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
UINT32 server_features; /* Bitmap of features supported by the server */
UINT32 connection;
CHAR8 req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
CHAR8 rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
UINT32 server_features; /* Bitmap of features supported by the server */
UINT32 connection;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
#define XENSTORE_PAYLOAD_MAX 4096
#define XENSTORE_PAYLOAD_MAX 4096
/* Violating these just gets you an error back */
#define XENSTORE_ABS_PATH_MAX 3072
#define XENSTORE_REL_PATH_MAX 2048
#define XENSTORE_ABS_PATH_MAX 3072
#define XENSTORE_REL_PATH_MAX 2048
/* The ability to reconnect a ring */
#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
/* Valid values for the connection field */
#define XENSTORE_CONNECTED 0 /* the steady-state */
#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
#define XENSTORE_CONNECTED 0 /* the steady-state */
#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
#endif /* _XS_WIRE_H */

View File

@@ -15,11 +15,11 @@
/* Source mapping space. */
/* ` enum phys_map_space { */
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
* XENMEM_add_to_physmap_batch only. */
/* ` } */
@@ -28,65 +28,68 @@
* pseudophysical address space.
* arg == addr of xen_add_to_physmap_t.
*/
#define XENMEM_add_to_physmap 7
#define XENMEM_add_to_physmap 7
struct xen_add_to_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* Which domain to change the mapping for. */
domid_t domid;
/* Number of pages to go through for gmfn_range */
UINT16 size;
/* Number of pages to go through for gmfn_range */
UINT16 size;
UINT32 space; /* => enum phys_map_space */
UINT32 space; /* => enum phys_map_space */
#define XENMAPIDX_grant_table_status 0x80000000
#define XENMAPIDX_grant_table_status 0x80000000
/* Index into space being mapped. */
xen_ulong_t idx;
/* Index into space being mapped. */
xen_ulong_t idx;
/* GPFN in domid where the source mapping page should appear. */
xen_pfn_t gpfn;
/* GPFN in domid where the source mapping page should appear. */
xen_pfn_t gpfn;
};
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
DEFINE_XEN_GUEST_HANDLE (xen_add_to_physmap_t);
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
#define XENMEM_remove_from_physmap 15
struct xen_remove_from_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* Which domain to change the mapping for. */
domid_t domid;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
};
typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
DEFINE_XEN_GUEST_HANDLE (xen_remove_from_physmap_t);
/*
* Returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map).
* arg == addr of xen_memory_map_t.
*/
#define XENMEM_memory_map 9
#define XENMEM_memory_map 9
struct xen_memory_map {
/*
* On call the number of entries which can be stored in buffer. On
* return the number of entries which have been stored in
* buffer.
*/
UINT32 nr_entries;
/*
* On call the number of entries which can be stored in buffer. On
* return the number of entries which have been stored in
* buffer.
*/
UINT32 nr_entries;
/*
* Entries in the buffer are in the same format as returned by the
* BIOS INT 0x15 EAX=0xE820 call.
*/
XEN_GUEST_HANDLE(void) buffer;
/*
* Entries in the buffer are in the same format as returned by the
* BIOS INT 0x15 EAX=0xE820 call.
*/
XEN_GUEST_HANDLE (void) buffer;
};
typedef struct xen_memory_map xen_memory_map_t;
DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
DEFINE_XEN_GUEST_HANDLE (xen_memory_map_t);
#endif /* __XEN_PUBLIC_MEMORY_H__ */

View File

@@ -11,18 +11,18 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#if defined (__XEN__) || defined (__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
#elif !defined(__XEN_INTERFACE_VERSION__)
#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
#elif !defined (__XEN_INTERFACE_VERSION__)
/* Guests which do not specify a version get the legacy interface. */
#define __XEN_INTERFACE_VERSION__ 0x00000000
#define __XEN_INTERFACE_VERSION__ 0x00000000
#endif
#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
#error "These header files do not support the requested interface version."
#error "These header files do not support the requested interface version."
#endif
#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */

View File

@@ -14,33 +14,33 @@
//
// Xen interface version used by Tianocore
//
#define __XEN_INTERFACE_VERSION__ 0x00040400
#define __XEN_INTERFACE_VERSION__ 0x00040400
#include "xen-compat.h"
#if defined(MDE_CPU_IA32) || defined(MDE_CPU_X64)
#include "arch-x86/xen.h"
#elif defined(__arm__) || defined (__aarch64__)
#include "arch-arm/xen.h"
#if defined (MDE_CPU_IA32) || defined (MDE_CPU_X64)
#include "arch-x86/xen.h"
#elif defined (__arm__) || defined (__aarch64__)
#include "arch-arm/xen.h"
#else
#error "Unsupported architecture"
#error "Unsupported architecture"
#endif
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */
DEFINE_XEN_GUEST_HANDLE(CHAR8);
__DEFINE_XEN_GUEST_HANDLE(uchar, UINT8);
DEFINE_XEN_GUEST_HANDLE(INT32);
__DEFINE_XEN_GUEST_HANDLE(uint, UINT32);
#if __XEN_INTERFACE_VERSION__ < 0x00040300
DEFINE_XEN_GUEST_HANDLE(INTN);
__DEFINE_XEN_GUEST_HANDLE(ulong, UINTN);
#endif
DEFINE_XEN_GUEST_HANDLE(VOID);
DEFINE_XEN_GUEST_HANDLE (CHAR8);
__DEFINE_XEN_GUEST_HANDLE (uchar, UINT8);
DEFINE_XEN_GUEST_HANDLE (INT32);
__DEFINE_XEN_GUEST_HANDLE (uint, UINT32);
#if __XEN_INTERFACE_VERSION__ < 0x00040300
DEFINE_XEN_GUEST_HANDLE (INTN);
__DEFINE_XEN_GUEST_HANDLE (ulong, UINTN);
#endif
DEFINE_XEN_GUEST_HANDLE (VOID);
DEFINE_XEN_GUEST_HANDLE(UINT64);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
DEFINE_XEN_GUEST_HANDLE (UINT64);
DEFINE_XEN_GUEST_HANDLE (xen_pfn_t);
DEFINE_XEN_GUEST_HANDLE (xen_ulong_t);
#endif
/*
@@ -51,55 +51,55 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
* ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()
*/
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_set_gdt 2
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
#define __HYPERVISOR_platform_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
#define __HYPERVISOR_update_descriptor 10
#define __HYPERVISOR_memory_op 12
#define __HYPERVISOR_multicall 13
#define __HYPERVISOR_update_va_mapping 14
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
#define __HYPERVISOR_xen_version 17
#define __HYPERVISOR_console_io 18
#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_iret 23 /* x86 only */
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
#define __HYPERVISOR_sysctl 35
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_set_gdt 2
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
#define __HYPERVISOR_sched_op_compat 6/* compat since 0x00030101 */
#define __HYPERVISOR_platform_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
#define __HYPERVISOR_update_descriptor 10
#define __HYPERVISOR_memory_op 12
#define __HYPERVISOR_multicall 13
#define __HYPERVISOR_update_va_mapping 14
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_event_channel_op_compat 16/* compat since 0x00030202 */
#define __HYPERVISOR_xen_version 17
#define __HYPERVISOR_console_io 18
#define __HYPERVISOR_physdev_op_compat 19/* compat since 0x00030202 */
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_iret 23/* x86 only */
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25/* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
#define __HYPERVISOR_sysctl 35
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39/* reserved for XenClient */
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#define __HYPERVISOR_arch_1 49
#define __HYPERVISOR_arch_2 50
#define __HYPERVISOR_arch_3 51
#define __HYPERVISOR_arch_4 52
#define __HYPERVISOR_arch_5 53
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
#define __HYPERVISOR_arch_0 48
#define __HYPERVISOR_arch_1 49
#define __HYPERVISOR_arch_2 50
#define __HYPERVISOR_arch_3 51
#define __HYPERVISOR_arch_4 52
#define __HYPERVISOR_arch_5 53
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
/* ` } */
@@ -109,21 +109,21 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
/* New sched_op hypercall introduced in 0x00030101. */
#if __XEN_INTERFACE_VERSION__ < 0x00030101
#undef __HYPERVISOR_sched_op
#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
#undef __HYPERVISOR_sched_op
#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
#endif
/* New event-channel and physdev hypercalls introduced in 0x00030202. */
#if __XEN_INTERFACE_VERSION__ < 0x00030202
#undef __HYPERVISOR_event_channel_op
#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
#undef __HYPERVISOR_physdev_op
#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
#undef __HYPERVISOR_event_channel_op
#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
#undef __HYPERVISOR_physdev_op
#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
#endif
/* New platform_op hypercall introduced in 0x00030204. */
#if __XEN_INTERFACE_VERSION__ < 0x00030204
#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
#endif
#ifndef __ASSEMBLY__
@@ -131,10 +131,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
typedef UINT16 domid_t;
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
#define DOMID_FIRST_RESERVED (0x7FF0U)
#define DOMID_FIRST_RESERVED (0x7FF0U)
/* DOMID_SELF is used in certain contexts to refer to oneself. */
#define DOMID_SELF (0x7FF0U)
#define DOMID_SELF (0x7FF0U)
/*
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.
@@ -145,7 +145,7 @@ typedef UINT16 domid_t;
* This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
* be specified by any calling domain.
*/
#define DOMID_IO (0x7FF1U)
#define DOMID_IO (0x7FF1U)
/*
* DOMID_XEN is used to allow privileged domains to map restricted parts of
@@ -160,85 +160,89 @@ typedef UINT16 domid_t;
#define DOMID_COW (0x7FF3U)
/* DOMID_INVALID is used to identify pages with unknown owner. */
#define DOMID_INVALID (0x7FF4U)
#define DOMID_INVALID (0x7FF4U)
/* Idle domain. */
#define DOMID_IDLE (0x7FFFU)
#define DOMID_IDLE (0x7FFFU)
#if __XEN_INTERFACE_VERSION__ < 0x00040400
#if __XEN_INTERFACE_VERSION__ < 0x00040400
/*
* Event channel endpoints per domain (when using the 2-level ABI):
* 1024 if a INTN is 32 bits; 4096 if a INTN is 64 bits.
*/
#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
#endif
#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
#endif
struct vcpu_time_info {
/*
* Updates to the following values are preceded and followed by an
* increment of 'version'. The guest can therefore detect updates by
* looking for changes to 'version'. If the least-significant bit of
* the version number is set then an update is in progress and the guest
* must wait to read a consistent set of values.
* The correct way to interact with the version number is similar to
* Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
*/
UINT32 Version;
UINT32 pad0;
UINT64 TscTimestamp; /* TSC at last update of time vals. */
UINT64 SystemTime; /* Time, in nanosecs, since boot. */
/*
* Current system time:
* system_time +
* ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
* CPU frequency (Hz):
* ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
*/
UINT32 TscToSystemMultiplier;
INT8 TscShift;
INT8 pad1[3];
/*
* Updates to the following values are preceded and followed by an
* increment of 'version'. The guest can therefore detect updates by
* looking for changes to 'version'. If the least-significant bit of
* the version number is set then an update is in progress and the guest
* must wait to read a consistent set of values.
* The correct way to interact with the version number is similar to
* Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
*/
UINT32 Version;
UINT32 pad0;
UINT64 TscTimestamp; /* TSC at last update of time vals. */
UINT64 SystemTime; /* Time, in nanosecs, since boot. */
/*
* Current system time:
* system_time +
* ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
* CPU frequency (Hz):
* ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
*/
UINT32 TscToSystemMultiplier;
INT8 TscShift;
INT8 pad1[3];
}; /* 32 bytes */
typedef struct vcpu_time_info XEN_VCPU_TIME_INFO;
struct vcpu_info {
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
* a pending notification for a particular VCPU. It is then cleared
* by the guest OS /before/ checking for pending work, thus avoiding
* a set-and-check race. Note that the mask is only accessed by Xen
* on the CPU that is currently hosting the VCPU. This means that the
* pending and mask flags can be updated by the guest without special
* synchronisation (i.e., no need for the x86 LOCK prefix).
* This may seem suboptimal because if the pending flag is set by
* a different CPU then an IPI may be scheduled even when the mask
* is set. However, note:
* 1. The task of 'interrupt holdoff' is covered by the per-event-
* channel mask bits. A 'noisy' event that is continually being
* triggered can be masked at source at this very precise
* granularity.
* 2. The main purpose of the per-VCPU mask is therefore to restrict
* reentrant execution: whether for concurrency control, or to
* prevent unbounded stack usage. Whatever the purpose, we expect
* that the mask will be asserted only for short periods at a time,
* and so the likelihood of a 'spurious' IPI is suitably small.
* The mask is read before making an event upcall to the guest: a
* non-zero mask therefore guarantees that the VCPU will not receive
* an upcall activation. The mask is cleared when the VCPU requests
* to block: this avoids wakeup-waiting races.
*/
UINT8 evtchn_upcall_pending;
#ifdef XEN_HAVE_PV_UPCALL_MASK
UINT8 evtchn_upcall_mask;
#else /* XEN_HAVE_PV_UPCALL_MASK */
UINT8 pad0;
#endif /* XEN_HAVE_PV_UPCALL_MASK */
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct vcpu_time_info Time;
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
* a pending notification for a particular VCPU. It is then cleared
* by the guest OS /before/ checking for pending work, thus avoiding
* a set-and-check race. Note that the mask is only accessed by Xen
* on the CPU that is currently hosting the VCPU. This means that the
* pending and mask flags can be updated by the guest without special
* synchronisation (i.e., no need for the x86 LOCK prefix).
* This may seem suboptimal because if the pending flag is set by
* a different CPU then an IPI may be scheduled even when the mask
* is set. However, note:
* 1. The task of 'interrupt holdoff' is covered by the per-event-
* channel mask bits. A 'noisy' event that is continually being
* triggered can be masked at source at this very precise
* granularity.
* 2. The main purpose of the per-VCPU mask is therefore to restrict
* reentrant execution: whether for concurrency control, or to
* prevent unbounded stack usage. Whatever the purpose, we expect
* that the mask will be asserted only for short periods at a time,
* and so the likelihood of a 'spurious' IPI is suitably small.
* The mask is read before making an event upcall to the guest: a
* non-zero mask therefore guarantees that the VCPU will not receive
* an upcall activation. The mask is cleared when the VCPU requests
* to block: this avoids wakeup-waiting races.
*/
UINT8 evtchn_upcall_pending;
#ifdef XEN_HAVE_PV_UPCALL_MASK
UINT8 evtchn_upcall_mask;
#else /* XEN_HAVE_PV_UPCALL_MASK */
UINT8 pad0;
#endif /* XEN_HAVE_PV_UPCALL_MASK */
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct vcpu_time_info Time;
}; /* 64 bytes (x86) */
#ifndef __XEN__
#ifndef __XEN__
typedef struct vcpu_info vcpu_info_t;
#endif
#endif
/*
* `incontents 200 startofday_shared Start-of-day shared data structure
@@ -250,71 +254,71 @@ typedef struct vcpu_info vcpu_info_t;
* of this structure remaining constant.
*/
struct shared_info {
struct vcpu_info VcpuInfo[XEN_LEGACY_MAX_VCPUS];
struct vcpu_info VcpuInfo[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
* asynchronous event notifications. There are three classes of event that
* are delivered by this mechanism:
* 1. Bi-directional inter- and intra-domain connections. Domains must
* arrange out-of-band to set up a connection (usually by allocating
* an unbound 'listener' port and avertising that via a storage service
* such as xenstore).
* 2. Physical interrupts. A domain with suitable hardware-access
* privileges can bind an event-channel port to a physical interrupt
* source.
* 3. Virtual interrupts ('events'). A domain can bind an event-channel
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
* will cause an asynchronous upcall to be scheduled. This bit is only
* updated by the guest. It is read-only within Xen. If a channel
* becomes pending while the channel is masked then the 'edge' is lost
* (i.e., when the channel is unmasked, the guest must manually handle
* pending notifications as no upcall will be scheduled by Xen).
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C INTN' in the PENDING bitfield array.
*/
xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* A domain can create "event channels" on which it can send and receive
* asynchronous event notifications. There are three classes of event that
* are delivered by this mechanism:
* 1. Bi-directional inter- and intra-domain connections. Domains must
* arrange out-of-band to set up a connection (usually by allocating
* an unbound 'listener' port and avertising that via a storage service
* such as xenstore).
* 2. Physical interrupts. A domain with suitable hardware-access
* privileges can bind an event-channel port to a physical interrupt
* source.
* 3. Virtual interrupts ('events'). A domain can bind an event-channel
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
* will cause an asynchronous upcall to be scheduled. This bit is only
* updated by the guest. It is read-only within Xen. If a channel
* becomes pending while the channel is masked then the 'edge' is lost
* (i.e., when the channel is unmasked, the guest must manually handle
* pending notifications as no upcall will be scheduled by Xen).
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C INTN' in the PENDING bitfield array.
*/
xen_ulong_t evtchn_pending[sizeof (xen_ulong_t) * 8];
xen_ulong_t evtchn_mask[sizeof (xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
* their gettimeofday() syscall on this wallclock-base value.
*/
UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */
UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
struct arch_shared_info arch;
/*
* Wallclock time: updated only by control software. Guests should base
* their gettimeofday() syscall on this wallclock-base value.
*/
UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */
UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
struct arch_shared_info arch;
};
#ifndef __XEN__
#ifndef __XEN__
typedef struct shared_info shared_info_t;
typedef struct shared_info XEN_SHARED_INFO;
#endif
#endif
/* Turn a plain number into a C UINTN constant. */
#define __mk_unsigned_long(x) x ## UL
#define mk_unsigned_long(x) __mk_unsigned_long(x)
#define __mk_unsigned_long(x) x ## UL
#define mk_unsigned_long(x) __mk_unsigned_long(x)
__DEFINE_XEN_GUEST_HANDLE(uint8, UINT8);
__DEFINE_XEN_GUEST_HANDLE(uint16, UINT16);
__DEFINE_XEN_GUEST_HANDLE(uint32, UINT32);
__DEFINE_XEN_GUEST_HANDLE(uint64, UINT64);
__DEFINE_XEN_GUEST_HANDLE (uint8, UINT8);
__DEFINE_XEN_GUEST_HANDLE (uint16, UINT16);
__DEFINE_XEN_GUEST_HANDLE (uint32, UINT32);
__DEFINE_XEN_GUEST_HANDLE (uint64, UINT64);
#else /* __ASSEMBLY__ */
/* In assembly code we cannot use C numeric constant suffixes. */
#define mk_unsigned_long(x) x
#define mk_unsigned_long(x) x
#endif /* !__ASSEMBLY__ */