There were 3 variables indicating the state of the event log region. However, there's no need to keep track of those individually. The only thing required is to know is if elog_scan_flash() failed. There's no other tracking required beyond that. BUG=chrome-os-partner:55932 Change-Id: I88ad32091d3c37966a2ac6272f8ad95bcc8c4270 Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: https://review.coreboot.org/16100 Tested-by: build bot (Jenkins) Reviewed-by: Martin Roth <martinroth@google.com>
890 lines
20 KiB
C
890 lines
20 KiB
C
/*
|
|
* This file is part of the coreboot project.
|
|
*
|
|
* Copyright (C) 2012 The ChromiumOS Authors. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; version 2 of the License.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#if CONFIG_HAVE_ACPI_RESUME == 1
|
|
#include <arch/acpi.h>
|
|
#endif
|
|
#include <cbmem.h>
|
|
#include <console/console.h>
|
|
#if CONFIG_ARCH_X86
|
|
#include <pc80/mc146818rtc.h>
|
|
#endif
|
|
#include <bcd.h>
|
|
#include <boot_device.h>
|
|
#include <commonlib/region.h>
|
|
#include <fmap.h>
|
|
#include <rtc.h>
|
|
#include <smbios.h>
|
|
#include <spi-generic.h>
|
|
#include <spi_flash.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
#include <elog.h>
|
|
#include "elog_internal.h"
|
|
|
|
|
|
#if CONFIG_ELOG_DEBUG
|
|
#define elog_debug(STR...) printk(BIOS_DEBUG, STR)
|
|
#else
|
|
#define elog_debug(STR...)
|
|
#endif
|
|
|
|
#define NV_NEEDS_ERASE (~(size_t)0)
|
|
/*
|
|
* Static variables for ELOG state
|
|
*/
|
|
static u16 total_size;
|
|
static u32 flash_base;
|
|
static u16 full_threshold;
|
|
static u16 shrink_size;
|
|
|
|
/*
|
|
* The non-volatile storage chases the mirrored copy. When nv_last_write
|
|
* is less than the mirrored last write the non-volatile storage needs
|
|
* to be updated.
|
|
*/
|
|
static size_t mirror_last_write;
|
|
static size_t nv_last_write;
|
|
|
|
static struct spi_flash *elog_spi;
|
|
/* Device that mirrors the eventlog in memory. */
|
|
static struct mem_region_device mirror_dev;
|
|
|
|
static enum {
|
|
ELOG_UNINITIALIZED = 0,
|
|
ELOG_INITIALIZED,
|
|
ELOG_BROKEN,
|
|
} elog_initialized = ELOG_UNINITIALIZED;
|
|
|
|
static inline struct region_device *mirror_dev_get(void)
|
|
{
|
|
return &mirror_dev.rdev;
|
|
}
|
|
|
|
static size_t elog_events_start(void)
|
|
{
|
|
/* Events are added directly after the header. */
|
|
return sizeof(struct elog_header);
|
|
}
|
|
|
|
static size_t elog_events_total_space(void)
|
|
{
|
|
return total_size - elog_events_start();
|
|
}
|
|
|
|
static struct event_header *elog_get_event_buffer(size_t offset, size_t size)
|
|
{
|
|
return rdev_mmap(mirror_dev_get(), offset, size);
|
|
}
|
|
|
|
static struct event_header *elog_get_next_event_buffer(size_t size)
|
|
{
|
|
return elog_get_event_buffer(mirror_last_write, size);
|
|
}
|
|
|
|
static void elog_put_event_buffer(struct event_header *event)
|
|
{
|
|
rdev_munmap(mirror_dev_get(), event);
|
|
}
|
|
|
|
static size_t elog_mirror_reset_last_write(void)
|
|
{
|
|
/* Return previous write value. */
|
|
size_t prev = mirror_last_write;
|
|
mirror_last_write = 0;
|
|
return prev;
|
|
}
|
|
|
|
static void elog_mirror_increment_last_write(size_t size)
|
|
{
|
|
mirror_last_write += size;
|
|
}
|
|
|
|
static void elog_nv_reset_last_write(void)
|
|
{
|
|
nv_last_write = 0;
|
|
}
|
|
|
|
static void elog_nv_increment_last_write(size_t size)
|
|
{
|
|
nv_last_write += size;
|
|
}
|
|
|
|
static void elog_nv_needs_possible_erase(void)
|
|
{
|
|
/* If last write is 0 it means it is already erased. */
|
|
if (nv_last_write != 0)
|
|
nv_last_write = NV_NEEDS_ERASE;
|
|
}
|
|
|
|
static bool elog_should_shrink(void)
|
|
{
|
|
return mirror_last_write >= full_threshold;
|
|
}
|
|
|
|
static bool elog_nv_needs_erase(void)
|
|
{
|
|
return nv_last_write == NV_NEEDS_ERASE;
|
|
}
|
|
|
|
static bool elog_nv_needs_update(void)
|
|
{
|
|
return nv_last_write != mirror_last_write;
|
|
}
|
|
|
|
static size_t elog_nv_region_to_update(size_t *offset)
|
|
{
|
|
*offset = nv_last_write;
|
|
return mirror_last_write - nv_last_write;
|
|
}
|
|
|
|
/*
|
|
* When parsing state from the NV one needs to adjust both the NV and mirror
|
|
* write state. Therefore, provide helper functions which adjust both
|
|
* at the same time.
|
|
*/
|
|
static void elog_tandem_reset_last_write(void)
|
|
{
|
|
elog_mirror_reset_last_write();
|
|
elog_nv_reset_last_write();
|
|
}
|
|
|
|
static void elog_tandem_increment_last_write(size_t size)
|
|
{
|
|
elog_mirror_increment_last_write(size);
|
|
elog_nv_increment_last_write(size);
|
|
}
|
|
|
|
/*
|
|
* Update the checksum at the last byte
|
|
*/
|
|
static void elog_update_checksum(struct event_header *event, u8 checksum)
|
|
{
|
|
u8 *event_data = (u8*)event;
|
|
event_data[event->length - 1] = checksum;
|
|
}
|
|
|
|
/*
|
|
* Simple byte checksum for events
|
|
*/
|
|
static u8 elog_checksum_event(struct event_header *event)
|
|
{
|
|
u8 index, checksum = 0;
|
|
u8 *data = (u8*)event;
|
|
|
|
for (index = 0; index < event->length; index++)
|
|
checksum += data[index];
|
|
return checksum;
|
|
}
|
|
|
|
/*
|
|
* Check if mirrored buffer is filled with ELOG_TYPE_EOL byte from the
|
|
* provided offset to the end of the mirrored buffer.
|
|
*/
|
|
static int elog_is_buffer_clear(size_t offset)
|
|
{
|
|
size_t i;
|
|
const struct region_device *rdev = mirror_dev_get();
|
|
size_t size = region_device_sz(rdev) - offset;
|
|
uint8_t *buffer = rdev_mmap(rdev, offset, size);
|
|
int ret = 1;
|
|
|
|
elog_debug("elog_is_buffer_clear(offset=%zu size=%zu)\n", offset, size);
|
|
|
|
if (buffer == NULL)
|
|
return 0;
|
|
|
|
for (i = 0; i < size; i++) {
|
|
if (buffer[i] != ELOG_TYPE_EOL) {
|
|
ret = 0;
|
|
break;
|
|
}
|
|
}
|
|
rdev_munmap(rdev, buffer);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Verify if the mirrored elog structure is valid.
|
|
* Returns 1 if the header is valid, 0 otherwise
|
|
*/
|
|
static int elog_is_header_valid(void)
|
|
{
|
|
struct elog_header *header;
|
|
|
|
elog_debug("elog_is_header_valid()\n");
|
|
|
|
header = rdev_mmap(mirror_dev_get(), 0, sizeof(*header));
|
|
|
|
if (header == NULL) {
|
|
printk(BIOS_ERR, "ELOG: could not map header.\n");
|
|
return 0;
|
|
}
|
|
|
|
if (header->magic != ELOG_SIGNATURE) {
|
|
printk(BIOS_ERR, "ELOG: header magic 0x%X != 0x%X\n",
|
|
header->magic, ELOG_SIGNATURE);
|
|
return 0;
|
|
}
|
|
if (header->version != ELOG_VERSION) {
|
|
printk(BIOS_ERR, "ELOG: header version %u != %u\n",
|
|
header->version, ELOG_VERSION);
|
|
return 0;
|
|
}
|
|
if (header->header_size != sizeof(*header)) {
|
|
printk(BIOS_ERR, "ELOG: header size mismatch %u != %zu\n",
|
|
header->header_size, sizeof(*header));
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* Validate the event header and data.
|
|
*/
|
|
static size_t elog_is_event_valid(size_t offset)
|
|
{
|
|
uint8_t checksum;
|
|
struct event_header *event;
|
|
uint8_t len;
|
|
const size_t len_offset = offsetof(struct event_header, length);
|
|
const size_t size = sizeof(len);
|
|
|
|
/* Read and validate length. */
|
|
if (rdev_readat(mirror_dev_get(), &len, offset + len_offset, size) < 0)
|
|
return 0;
|
|
|
|
/* Event length must be at least header size + checksum */
|
|
if (len < (sizeof(*event) + sizeof(checksum)))
|
|
return 0;
|
|
|
|
if (len > MAX_EVENT_SIZE)
|
|
return 0;
|
|
|
|
event = elog_get_event_buffer(offset, len);
|
|
if (!event)
|
|
return 0;
|
|
|
|
/* If event checksum is invalid the area is corrupt */
|
|
checksum = elog_checksum_event(event);
|
|
elog_put_event_buffer(event);
|
|
|
|
if (checksum != 0)
|
|
return 0;
|
|
|
|
/* Event is valid */
|
|
return len;
|
|
}
|
|
|
|
/*
|
|
* Write 'size' bytes of data from provided 'offset' in the mirrored elog to
|
|
* the flash backing store. This will not erase the flash and it assumes the
|
|
* flash area has been erased appropriately.
|
|
*/
|
|
static void elog_flash_write(size_t offset, size_t size)
|
|
{
|
|
void *address;
|
|
const struct region_device *rdev = mirror_dev_get();
|
|
|
|
if (!size || !elog_spi)
|
|
return;
|
|
|
|
address = rdev_mmap(rdev, offset, size);
|
|
|
|
/* Ensure offset is absolute. */
|
|
offset += flash_base;
|
|
|
|
elog_debug("elog_flash_write(address=0x%p offset=0x%08x size=%u)\n",
|
|
address, offset, size);
|
|
|
|
if (address == NULL)
|
|
return;
|
|
|
|
/* Write the data to flash */
|
|
elog_spi->write(elog_spi, offset, size, address);
|
|
|
|
rdev_munmap(rdev, address);
|
|
}
|
|
|
|
/*
|
|
* Erase the first block specified in the address.
|
|
* Only handles flash area within a single flash block.
|
|
*/
|
|
static void elog_flash_erase(void)
|
|
{
|
|
if (!elog_spi)
|
|
return;
|
|
|
|
elog_debug("elog_flash_erase(offset=0x%08x size=%u)\n",
|
|
flash_base, total_size);
|
|
|
|
/* Erase the sectors in this region */
|
|
elog_spi->erase(elog_spi, flash_base, total_size);
|
|
}
|
|
|
|
/*
|
|
* Scan the event area and validate each entry and update the ELOG state.
|
|
*/
|
|
static int elog_update_event_buffer_state(void)
|
|
{
|
|
size_t offset = elog_events_start();
|
|
|
|
elog_debug("elog_update_event_buffer_state()\n");
|
|
|
|
/* Go through each event and validate it */
|
|
while (1) {
|
|
uint8_t type;
|
|
const size_t type_offset = offsetof(struct event_header, type);
|
|
size_t len;
|
|
const size_t size = sizeof(type);
|
|
|
|
if (rdev_readat(mirror_dev_get(), &type,
|
|
offset + type_offset, size) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
/* The end of the event marker has been found */
|
|
if (type == ELOG_TYPE_EOL)
|
|
break;
|
|
|
|
/* Validate the event */
|
|
len = elog_is_event_valid(offset);
|
|
|
|
if (!len)
|
|
return -1;
|
|
|
|
/* Move to the next event */
|
|
elog_tandem_increment_last_write(len);
|
|
offset += len;
|
|
}
|
|
|
|
/* Ensure the remaining buffer is empty */
|
|
if (!elog_is_buffer_clear(offset))
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int elog_scan_flash(void)
|
|
{
|
|
elog_debug("elog_scan_flash()\n");
|
|
void *mirror_buffer;
|
|
const struct region_device *rdev = mirror_dev_get();
|
|
|
|
/* Fill memory buffer by reading from SPI */
|
|
mirror_buffer = rdev_mmap_full(rdev);
|
|
elog_spi->read(elog_spi, flash_base, total_size, mirror_buffer);
|
|
rdev_munmap(rdev, mirror_buffer);
|
|
|
|
/* No writes have been done yet. */
|
|
elog_tandem_reset_last_write();
|
|
|
|
/* Check if the area is empty or not */
|
|
if (elog_is_buffer_clear(0))
|
|
return -1;
|
|
|
|
/* Indicate that header possibly written. */
|
|
elog_tandem_increment_last_write(elog_events_start());
|
|
|
|
/* Validate the header */
|
|
if (!elog_is_header_valid())
|
|
return -1;
|
|
|
|
return elog_update_event_buffer_state();
|
|
}
|
|
|
|
static void elog_write_header_in_mirror(void)
|
|
{
|
|
static const struct elog_header header = {
|
|
.magic = ELOG_SIGNATURE,
|
|
.version = ELOG_VERSION,
|
|
.header_size = sizeof(struct elog_header),
|
|
.reserved = {
|
|
[0] = ELOG_TYPE_EOL,
|
|
[1] = ELOG_TYPE_EOL,
|
|
},
|
|
};
|
|
|
|
rdev_writeat(mirror_dev_get(), &header, 0, sizeof(header));
|
|
elog_mirror_increment_last_write(elog_events_start());
|
|
}
|
|
|
|
static void elog_move_events_to_front(size_t offset, size_t size)
|
|
{
|
|
void *src;
|
|
void *dest;
|
|
size_t start_offset = elog_events_start();
|
|
const struct region_device *rdev = mirror_dev_get();
|
|
|
|
src = rdev_mmap(rdev, offset, size);
|
|
dest = rdev_mmap(rdev, start_offset, size);
|
|
|
|
if (src == NULL || dest == NULL) {
|
|
printk(BIOS_ERR, "ELOG: failure moving events!\n");
|
|
rdev_munmap(rdev, dest);
|
|
rdev_munmap(rdev, src);
|
|
return;
|
|
}
|
|
|
|
/* Move the events to the front. */
|
|
memmove(dest, src, size);
|
|
rdev_munmap(rdev, dest);
|
|
rdev_munmap(rdev, src);
|
|
|
|
/* Mark EOL for previously used buffer until the end. */
|
|
offset = start_offset + size;
|
|
size = region_device_sz(rdev) - offset;
|
|
dest = rdev_mmap(rdev, offset, size);
|
|
if (dest == NULL) {
|
|
printk(BIOS_ERR, "ELOG: failure filling EOL!\n");
|
|
return;
|
|
}
|
|
memset(dest, ELOG_TYPE_EOL, size);
|
|
rdev_munmap(rdev, dest);
|
|
}
|
|
|
|
/* Perform the shrink and move events returning the size of bytes shrunk. */
|
|
static size_t elog_do_shrink(size_t requested_size, size_t last_write)
|
|
{
|
|
const struct region_device *rdev = mirror_dev_get();
|
|
size_t offset = elog_events_start();
|
|
size_t remaining_size;
|
|
|
|
while (1) {
|
|
const size_t type_offset = offsetof(struct event_header, type);
|
|
const size_t len_offset = offsetof(struct event_header, length);
|
|
const size_t size = sizeof(uint8_t);
|
|
uint8_t type;
|
|
uint8_t len;
|
|
|
|
/* Next event has exceeded constraints */
|
|
if (offset > requested_size)
|
|
break;
|
|
|
|
if (rdev_readat(rdev, &type, offset + type_offset, size) < 0)
|
|
break;
|
|
|
|
/* Reached the end of the area */
|
|
if (type == ELOG_TYPE_EOL)
|
|
break;
|
|
|
|
if (rdev_readat(rdev, &len, offset + len_offset, size) < 0)
|
|
break;
|
|
|
|
offset += len;
|
|
}
|
|
|
|
/*
|
|
* Move the events and update the last write. The last write before
|
|
* shrinking was captured prior to resetting the counter to determine
|
|
* actual size we're keeping.
|
|
*/
|
|
remaining_size = last_write - offset;
|
|
elog_move_events_to_front(offset, remaining_size);
|
|
elog_mirror_increment_last_write(remaining_size);
|
|
|
|
/* Return the amount of data removed. */
|
|
return offset - elog_events_start();
|
|
}
|
|
|
|
/*
|
|
* Shrink the log, deleting old entries and moving the
|
|
* remaining ones to the front of the log.
|
|
*/
|
|
static void elog_shrink_by_size(size_t requested_size)
|
|
{
|
|
size_t shrunk_size;
|
|
size_t captured_last_write;
|
|
size_t total_event_space = elog_events_total_space();
|
|
|
|
elog_debug("%s()\n", __func__);
|
|
|
|
/* Indicate possible erase required. */
|
|
elog_nv_needs_possible_erase();
|
|
|
|
/* Capture the last write to determine data size in buffer to shrink. */
|
|
captured_last_write = elog_mirror_reset_last_write();
|
|
|
|
/* Prepare new header. */
|
|
elog_write_header_in_mirror();
|
|
|
|
/* Determine if any actual shrinking is required. */
|
|
if (requested_size >= total_event_space)
|
|
shrunk_size = total_event_space;
|
|
else
|
|
shrunk_size = elog_do_shrink(requested_size,
|
|
captured_last_write);
|
|
|
|
/* Add clear event */
|
|
elog_add_event_word(ELOG_TYPE_LOG_CLEAR, shrunk_size);
|
|
}
|
|
|
|
static int elog_prepare_empty(void)
|
|
{
|
|
elog_debug("elog_prepare_empty()\n");
|
|
elog_shrink_by_size(elog_events_total_space());
|
|
|
|
if (elog_initialized != ELOG_INITIALIZED)
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void elog_shrink(void)
|
|
{
|
|
if (elog_should_shrink())
|
|
elog_shrink_by_size(shrink_size);
|
|
}
|
|
|
|
#ifndef __SMM__
|
|
#if IS_ENABLED(CONFIG_ARCH_X86)
|
|
|
|
/*
|
|
* Convert a flash offset into a memory mapped flash address
|
|
*/
|
|
static inline u8 *elog_flash_offset_to_address(void)
|
|
{
|
|
/* Only support memory-mapped SPI devices. */
|
|
if (!IS_ENABLED(CONFIG_SPI_FLASH_MEMORY_MAPPED))
|
|
return NULL;
|
|
|
|
if (!elog_spi)
|
|
return NULL;
|
|
|
|
return rdev_mmap(boot_device_ro(), flash_base, total_size);
|
|
}
|
|
|
|
/*
|
|
* Fill out SMBIOS Type 15 table entry so the
|
|
* event log can be discovered at runtime.
|
|
*/
|
|
int elog_smbios_write_type15(unsigned long *current, int handle)
|
|
{
|
|
struct smbios_type15 *t = (struct smbios_type15 *)*current;
|
|
int len = sizeof(struct smbios_type15);
|
|
|
|
#if CONFIG_ELOG_CBMEM
|
|
/* Save event log buffer into CBMEM for the OS to read */
|
|
void *cbmem = cbmem_add(CBMEM_ID_ELOG, total_size);
|
|
if (!cbmem)
|
|
return 0;
|
|
rdev_readat(mirror_dev_get(), cbmem, 0, total_size);
|
|
#endif
|
|
|
|
memset(t, 0, len);
|
|
t->type = SMBIOS_EVENT_LOG;
|
|
t->length = len - 2;
|
|
t->handle = handle;
|
|
t->area_length = total_size - 1;
|
|
t->header_offset = 0;
|
|
t->data_offset = sizeof(struct elog_header);
|
|
t->access_method = SMBIOS_EVENTLOG_ACCESS_METHOD_MMIO32;
|
|
t->log_status = SMBIOS_EVENTLOG_STATUS_VALID;
|
|
t->change_token = 0;
|
|
#if CONFIG_ELOG_CBMEM
|
|
t->address = (u32)cbmem;
|
|
#else
|
|
t->address = (u32)elog_flash_offset_to_address();
|
|
#endif
|
|
t->header_format = ELOG_HEADER_TYPE_OEM;
|
|
t->log_type_descriptors = 0;
|
|
t->log_type_descriptor_length = 2;
|
|
|
|
*current += len;
|
|
return len;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
/*
|
|
* Clear the entire event log
|
|
*/
|
|
int elog_clear(void)
|
|
{
|
|
elog_debug("elog_clear()\n");
|
|
|
|
/* Make sure ELOG structures are initialized */
|
|
if (elog_init() < 0)
|
|
return -1;
|
|
|
|
return elog_prepare_empty();
|
|
}
|
|
|
|
static void elog_find_flash(void)
|
|
{
|
|
struct region r;
|
|
size_t reserved_space = ELOG_MIN_AVAILABLE_ENTRIES * MAX_EVENT_SIZE;
|
|
|
|
elog_debug("elog_find_flash()\n");
|
|
|
|
/* Find the ELOG base and size in FMAP */
|
|
if (fmap_locate_area("RW_ELOG", &r) < 0) {
|
|
printk(BIOS_WARNING, "ELOG: Unable to find RW_ELOG in FMAP\n");
|
|
flash_base = total_size = 0;
|
|
} else {
|
|
flash_base = region_offset(&r);
|
|
/* Keep 4KiB max size until large malloc()s have been fixed. */
|
|
total_size = MIN(4*KiB, region_sz(&r));
|
|
}
|
|
|
|
full_threshold = total_size - reserved_space;
|
|
shrink_size = MIN(total_size * ELOG_SHRINK_PERCENTAGE / 100,
|
|
full_threshold);
|
|
}
|
|
|
|
static int elog_sync_to_nv(void)
|
|
{
|
|
size_t offset;
|
|
size_t size;
|
|
bool erase_needed;
|
|
|
|
/* Determine if any updates are required. */
|
|
if (!elog_nv_needs_update())
|
|
return 0;
|
|
|
|
erase_needed = elog_nv_needs_erase();
|
|
|
|
/* Erase if necessary. */
|
|
if (erase_needed) {
|
|
elog_flash_erase();
|
|
elog_nv_reset_last_write();
|
|
}
|
|
|
|
size = elog_nv_region_to_update(&offset);
|
|
|
|
elog_flash_write(offset, size);
|
|
elog_nv_increment_last_write(size);
|
|
|
|
/*
|
|
* If erase wasn't performed then don't rescan. Assume the appended
|
|
* write was successful.
|
|
*/
|
|
if (!erase_needed)
|
|
return 0;
|
|
|
|
/* Mark broken if the scan failed after a sync. */
|
|
if (elog_scan_flash() < 0) {
|
|
printk(BIOS_ERR, "ELOG: Sync back from NV storage failed.\n");
|
|
elog_initialized = ELOG_BROKEN;
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Event log main entry point
|
|
*/
|
|
int elog_init(void)
|
|
{
|
|
void *mirror_buffer;
|
|
|
|
switch (elog_initialized) {
|
|
case ELOG_UNINITIALIZED:
|
|
break;
|
|
case ELOG_INITIALIZED:
|
|
return 0;
|
|
case ELOG_BROKEN:
|
|
return -1;
|
|
}
|
|
elog_initialized = ELOG_BROKEN;
|
|
|
|
elog_debug("elog_init()\n");
|
|
|
|
/* Probe SPI chip. SPI controller must already be initialized. */
|
|
elog_spi = spi_flash_probe(CONFIG_BOOT_MEDIA_SPI_BUS, 0);
|
|
if (!elog_spi) {
|
|
printk(BIOS_ERR, "ELOG: Unable to find SPI flash\n");
|
|
return -1;
|
|
}
|
|
|
|
/* Set up the backing store */
|
|
elog_find_flash();
|
|
if (flash_base == 0) {
|
|
printk(BIOS_ERR, "ELOG: Invalid flash base\n");
|
|
return -1;
|
|
} else if (total_size < sizeof(struct elog_header) + MAX_EVENT_SIZE) {
|
|
printk(BIOS_ERR, "ELOG: Region too small to hold any events\n");
|
|
return -1;
|
|
} else if (total_size - shrink_size >= full_threshold) {
|
|
printk(BIOS_ERR,
|
|
"ELOG: SHRINK_PERCENTAGE set too small for MIN_AVAILABLE_ENTRIES\n");
|
|
return -1;
|
|
}
|
|
|
|
mirror_buffer = malloc(total_size);
|
|
if (!mirror_buffer) {
|
|
printk(BIOS_ERR, "ELOG: Unable to allocate backing store\n");
|
|
return -1;
|
|
}
|
|
mem_region_device_rw_init(&mirror_dev, mirror_buffer, total_size);
|
|
|
|
/*
|
|
* Mark as initialized to allow elog_init() to be called and deemed
|
|
* successful in the prepare/shrink path which adds events.
|
|
*/
|
|
elog_initialized = ELOG_INITIALIZED;
|
|
|
|
/* Load the log from flash and prepare the flash if necessary. */
|
|
if (elog_scan_flash() < 0) {
|
|
printk(BIOS_ERR, "ELOG: flash area invalid\n");
|
|
if (elog_prepare_empty() < 0) {
|
|
printk(BIOS_ERR, "ELOG: Unable to prepare flash\n");
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
printk(BIOS_INFO, "ELOG: FLASH @0x%p [SPI 0x%08x]\n",
|
|
mirror_buffer, flash_base);
|
|
|
|
printk(BIOS_INFO, "ELOG: area is %d bytes, full threshold %d,"
|
|
" shrink size %d\n", total_size, full_threshold, shrink_size);
|
|
|
|
#if !defined(__SMM__)
|
|
/* Log boot count event except in S3 resume */
|
|
#if CONFIG_ELOG_BOOT_COUNT == 1
|
|
#if CONFIG_HAVE_ACPI_RESUME == 1
|
|
if (!acpi_is_wakeup_s3())
|
|
#endif
|
|
elog_add_event_dword(ELOG_TYPE_BOOT, boot_count_read());
|
|
#else
|
|
/* If boot count is not implemented, fake it. */
|
|
elog_add_event_dword(ELOG_TYPE_BOOT, 0);
|
|
#endif
|
|
|
|
#if CONFIG_ARCH_X86
|
|
/* Check and log POST codes from previous boot */
|
|
if (CONFIG_CMOS_POST)
|
|
cmos_post_log();
|
|
#endif
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Populate timestamp in event header with current time
|
|
*/
|
|
static void elog_fill_timestamp(struct event_header *event)
|
|
{
|
|
#if IS_ENABLED(CONFIG_RTC)
|
|
struct rtc_time time;
|
|
|
|
rtc_get(&time);
|
|
event->second = bin2bcd(time.sec);
|
|
event->minute = bin2bcd(time.min);
|
|
event->hour = bin2bcd(time.hour);
|
|
event->day = bin2bcd(time.mday);
|
|
event->month = bin2bcd(time.mon);
|
|
event->year = bin2bcd(time.year % 100);
|
|
|
|
/* Basic sanity check of expected ranges */
|
|
if (event->month > 0x12 || event->day > 0x31 || event->hour > 0x23 ||
|
|
event->minute > 0x59 || event->second > 0x59)
|
|
#endif
|
|
{
|
|
event->year = 0;
|
|
event->month = 0;
|
|
event->day = 0;
|
|
event->hour = 0;
|
|
event->minute = 0;
|
|
event->second = 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Add an event to the log
|
|
*/
|
|
void elog_add_event_raw(u8 event_type, void *data, u8 data_size)
|
|
{
|
|
struct event_header *event;
|
|
u8 event_size;
|
|
|
|
elog_debug("elog_add_event_raw(type=%X)\n", event_type);
|
|
|
|
/* Make sure ELOG structures are initialized */
|
|
if (elog_init() < 0)
|
|
return;
|
|
|
|
/* Header + Data + Checksum */
|
|
event_size = sizeof(*event) + data_size + 1;
|
|
if (event_size > MAX_EVENT_SIZE) {
|
|
printk(BIOS_ERR, "ELOG: Event(%X) data size too "
|
|
"big (%d)\n", event_type, event_size);
|
|
return;
|
|
}
|
|
|
|
/* Make sure event data can fit */
|
|
event = elog_get_next_event_buffer(event_size);
|
|
if (event == NULL) {
|
|
printk(BIOS_ERR, "ELOG: Event(%X) does not fit\n",
|
|
event_type);
|
|
return;
|
|
}
|
|
|
|
/* Fill out event data */
|
|
event->type = event_type;
|
|
event->length = event_size;
|
|
elog_fill_timestamp(event);
|
|
|
|
if (data_size)
|
|
memcpy(&event[1], data, data_size);
|
|
|
|
/* Zero the checksum byte and then compute checksum */
|
|
elog_update_checksum(event, 0);
|
|
elog_update_checksum(event, -(elog_checksum_event(event)));
|
|
elog_put_event_buffer(event);
|
|
|
|
elog_mirror_increment_last_write(event_size);
|
|
|
|
printk(BIOS_INFO, "ELOG: Event(%X) added with size %d\n",
|
|
event_type, event_size);
|
|
|
|
/* Shrink the log if we are getting too full */
|
|
elog_shrink();
|
|
|
|
/* Ensure the updates hit the non-volatile storage. */
|
|
elog_sync_to_nv();
|
|
}
|
|
|
|
void elog_add_event(u8 event_type)
|
|
{
|
|
elog_add_event_raw(event_type, NULL, 0);
|
|
}
|
|
|
|
void elog_add_event_byte(u8 event_type, u8 data)
|
|
{
|
|
elog_add_event_raw(event_type, &data, sizeof(data));
|
|
}
|
|
|
|
void elog_add_event_word(u8 event_type, u16 data)
|
|
{
|
|
elog_add_event_raw(event_type, &data, sizeof(data));
|
|
}
|
|
|
|
void elog_add_event_dword(u8 event_type, u32 data)
|
|
{
|
|
elog_add_event_raw(event_type, &data, sizeof(data));
|
|
}
|
|
|
|
void elog_add_event_wake(u8 source, u32 instance)
|
|
{
|
|
struct elog_event_data_wake wake = {
|
|
.source = source,
|
|
.instance = instance
|
|
};
|
|
elog_add_event_raw(ELOG_TYPE_WAKE_SOURCE, &wake, sizeof(wake));
|
|
}
|