mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Convert all PFB registers to use the kernel's register macro and update the code accordingly. NV_PGSP_QUEUE_HEAD was somehow caught in the PFB section, so move it to its own section and convert it as well. Reviewed-by: Eliot Courtney <ecourtney@nvidia.com> Reviewed-by: Gary Guo <gary@garyguo.net> Acked-by: Danilo Krummrich <dakr@kernel.org> Link: https://patch.msgid.link/20260325-b4-nova-register-v4-4-bdf172f0f6ca@nvidia.com Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
832 lines
31 KiB
Rust
832 lines
31 KiB
Rust
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
mod continuation;
|
|
|
|
use core::mem;
|
|
|
|
use kernel::{
|
|
device,
|
|
dma::{
|
|
Coherent,
|
|
DmaAddress, //
|
|
},
|
|
dma_write,
|
|
io::{
|
|
poll::read_poll_timeout,
|
|
Io, //
|
|
},
|
|
new_mutex,
|
|
prelude::*,
|
|
sync::{
|
|
aref::ARef,
|
|
Mutex, //
|
|
},
|
|
time::Delta,
|
|
transmute::{
|
|
AsBytes,
|
|
FromBytes, //
|
|
},
|
|
};
|
|
|
|
use continuation::{
|
|
ContinuationRecord,
|
|
SplitState, //
|
|
};
|
|
|
|
use pin_init::pin_init_scope;
|
|
|
|
use crate::{
|
|
driver::Bar0,
|
|
gsp::{
|
|
fw::{
|
|
GspMsgElement,
|
|
MsgFunction,
|
|
MsgqRxHeader,
|
|
MsgqTxHeader,
|
|
GSP_MSG_QUEUE_ELEMENT_SIZE_MAX, //
|
|
},
|
|
PteArray,
|
|
GSP_PAGE_SHIFT,
|
|
GSP_PAGE_SIZE, //
|
|
},
|
|
num,
|
|
regs,
|
|
sbuffer::SBufferIter, //
|
|
};
|
|
|
|
/// Marker type representing the absence of a reply for a command. Commands using this as their
|
|
/// reply type are sent using [`Cmdq::send_command_no_wait`].
|
|
pub(crate) struct NoReply;
|
|
|
|
/// Trait implemented by types representing a command to send to the GSP.
|
|
///
|
|
/// The main purpose of this trait is to provide [`Cmdq`] with the information it needs to send
|
|
/// a given command.
|
|
///
|
|
/// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
|
|
/// into the space reserved for it in the command queue buffer.
|
|
///
|
|
/// Some commands may be followed by a variable-length payload. For these, the
|
|
/// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
|
|
/// defined as well.
|
|
pub(crate) trait CommandToGsp {
|
|
/// Function identifying this command to the GSP.
|
|
const FUNCTION: MsgFunction;
|
|
|
|
/// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
|
|
type Command: FromBytes + AsBytes;
|
|
|
|
/// Type of the reply expected from the GSP, or [`NoReply`] for commands that don't
|
|
/// have a reply.
|
|
type Reply;
|
|
|
|
/// Error type returned by [`CommandToGsp::init`].
|
|
type InitError;
|
|
|
|
/// In-place command initializer responsible for filling the command in the command queue
|
|
/// buffer.
|
|
fn init(&self) -> impl Init<Self::Command, Self::InitError>;
|
|
|
|
/// Size of the variable-length payload following the command structure generated by
|
|
/// [`CommandToGsp::init`].
|
|
///
|
|
/// Most commands don't have a variable-length payload, so this is zero by default.
|
|
fn variable_payload_len(&self) -> usize {
|
|
0
|
|
}
|
|
|
|
/// Method initializing the variable-length payload.
|
|
///
|
|
/// The command buffer is circular, which means that we may need to jump back to its beginning
|
|
/// while in the middle of a command. For this reason, the variable-length payload is
|
|
/// initialized using a [`SBufferIter`].
|
|
///
|
|
/// This method will receive a buffer of the length returned by
|
|
/// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
|
|
/// unwritten space will lead to an error.
|
|
///
|
|
/// Most commands don't have a variable-length payload, so this does nothing by default.
|
|
fn init_variable_payload(
|
|
&self,
|
|
_dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
|
|
) -> Result {
|
|
Ok(())
|
|
}
|
|
|
|
/// Total size of the command (including its variable-length payload) without the
|
|
/// [`GspMsgElement`] header.
|
|
fn size(&self) -> usize {
|
|
size_of::<Self::Command>() + self.variable_payload_len()
|
|
}
|
|
}
|
|
|
|
/// Trait representing messages received from the GSP.
|
|
///
|
|
/// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
|
|
pub(crate) trait MessageFromGsp: Sized {
|
|
/// Function identifying this message from the GSP.
|
|
const FUNCTION: MsgFunction;
|
|
|
|
/// Error type returned by [`MessageFromGsp::read`].
|
|
type InitError;
|
|
|
|
/// Type containing the raw message to be read from the message queue.
|
|
type Message: FromBytes;
|
|
|
|
/// Method reading the message from the message queue and returning it.
|
|
///
|
|
/// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
|
|
/// it.
|
|
fn read(
|
|
msg: &Self::Message,
|
|
sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
|
|
) -> Result<Self, Self::InitError>;
|
|
}
|
|
|
|
/// Number of GSP pages making the [`Msgq`].
|
|
pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
|
|
|
|
/// Circular buffer of a [`Msgq`].
|
|
///
|
|
/// This area of memory is to be shared between the driver and the GSP to exchange commands or
|
|
/// messages.
|
|
#[repr(C, align(0x1000))]
|
|
#[derive(Debug)]
|
|
struct MsgqData {
|
|
data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
|
|
}
|
|
|
|
// Annoyingly we are forced to use a literal to specify the alignment of
|
|
// `MsgqData`, so check that it corresponds to the actual GSP page size here.
|
|
static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
|
|
|
|
/// Unidirectional message queue.
|
|
///
|
|
/// Contains the data for a message queue, that either the driver or GSP writes to.
|
|
///
|
|
/// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
|
|
/// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
|
|
/// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
|
|
#[repr(C)]
|
|
// There is no struct defined for this in the open-gpu-kernel-source headers.
|
|
// Instead it is defined by code in `GspMsgQueuesInit()`.
|
|
// TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
|
|
pub(super) struct Msgq {
|
|
/// Header for sending messages, including the write pointer.
|
|
pub(super) tx: MsgqTxHeader,
|
|
/// Header for receiving messages, including the read pointer.
|
|
pub(super) rx: MsgqRxHeader,
|
|
/// The message queue proper.
|
|
msgq: MsgqData,
|
|
}
|
|
|
|
/// Structure shared between the driver and the GSP and containing the command and message queues.
|
|
#[repr(C)]
|
|
// TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
|
|
pub(super) struct GspMem {
|
|
/// Self-mapping page table entries.
|
|
ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
|
|
/// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
|
|
/// write and read pointers that the CPU updates. This means that the read pointer here is an
|
|
/// index into the GSP queue.
|
|
///
|
|
/// This member is read-only for the GSP.
|
|
pub(super) cpuq: Msgq,
|
|
/// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
|
|
/// write and read pointers that the GSP updates. This means that the read pointer here is an
|
|
/// index into the CPU queue.
|
|
///
|
|
/// This member is read-only for the driver.
|
|
pub(super) gspq: Msgq,
|
|
}
|
|
|
|
impl GspMem {
|
|
const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
|
|
}
|
|
|
|
// SAFETY: These structs don't meet the no-padding requirements of AsBytes but
|
|
// that is not a problem because they are not used outside the kernel.
|
|
unsafe impl AsBytes for GspMem {}
|
|
|
|
// SAFETY: These structs don't meet the no-padding requirements of FromBytes but
|
|
// that is not a problem because they are not used outside the kernel.
|
|
unsafe impl FromBytes for GspMem {}
|
|
|
|
/// Wrapper around [`GspMem`] to share it with the GPU using a [`Coherent`].
|
|
///
|
|
/// This provides the low-level functionality to communicate with the GSP, including allocation of
|
|
/// queue space to write messages to and management of read/write pointers.
|
|
///
|
|
/// This is shared with the GSP, with clear ownership rules regarding the command queues:
|
|
///
|
|
/// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
|
|
/// pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
|
|
/// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
|
|
/// pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
|
|
struct DmaGspMem(Coherent<GspMem>);
|
|
|
|
impl DmaGspMem {
|
|
/// Allocate a new instance and map it for `dev`.
|
|
fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
|
|
const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
|
|
const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
|
|
|
|
let gsp_mem = Coherent::<GspMem>::zeroed(dev, GFP_KERNEL)?;
|
|
|
|
let start = gsp_mem.dma_handle();
|
|
// Write values one by one to avoid an on-stack instance of `PteArray`.
|
|
for i in 0..GspMem::PTE_ARRAY_SIZE {
|
|
dma_write!(gsp_mem, .ptes.0[i], PteArray::<0>::entry(start, i)?);
|
|
}
|
|
|
|
dma_write!(
|
|
gsp_mem,
|
|
.cpuq.tx,
|
|
MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
|
|
);
|
|
dma_write!(gsp_mem, .cpuq.rx, MsgqRxHeader::new());
|
|
|
|
Ok(Self(gsp_mem))
|
|
}
|
|
|
|
/// Returns the region of the CPU message queue that the driver is currently allowed to write
|
|
/// to.
|
|
///
|
|
/// As the message queue is a circular buffer, the region may be discontiguous in memory. In
|
|
/// that case the second slice will have a non-zero length.
|
|
fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
|
|
let tx = self.cpu_write_ptr() as usize;
|
|
let rx = self.gsp_read_ptr() as usize;
|
|
|
|
// SAFETY:
|
|
// - We will only access the driver-owned part of the shared memory.
|
|
// - Per the safety statement of the function, no concurrent access will be performed.
|
|
let gsp_mem = unsafe { &mut *self.0.as_mut() };
|
|
// PANIC: per the invariant of `cpu_write_ptr`, `tx` is `< MSGQ_NUM_PAGES`.
|
|
let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
|
|
|
|
// The area starting at `tx` and ending at `rx - 2` modulo MSGQ_NUM_PAGES, inclusive,
|
|
// belongs to the driver for writing.
|
|
|
|
if rx == 0 {
|
|
// Since `rx` is zero, leave an empty slot at end of the buffer.
|
|
let last = after_tx.len() - 1;
|
|
(&mut after_tx[..last], &mut [])
|
|
} else if rx <= tx {
|
|
// The area is discontiguous and we leave an empty slot before `rx`.
|
|
// PANIC:
|
|
// - The index `rx - 1` is non-negative because `rx != 0` in this branch.
|
|
// - The index does not exceed `before_tx.len()` (which equals `tx`) because
|
|
// `rx <= tx` in this branch.
|
|
(after_tx, &mut before_tx[..(rx - 1)])
|
|
} else {
|
|
// The area is contiguous and we leave an empty slot before `rx`.
|
|
// PANIC:
|
|
// - The index `rx - tx - 1` is non-negative because `rx > tx` in this branch.
|
|
// - The index does not exceed `after_tx.len()` (which is `MSGQ_NUM_PAGES - tx`)
|
|
// because `rx < MSGQ_NUM_PAGES` by the `gsp_read_ptr` invariant.
|
|
(&mut after_tx[..(rx - tx - 1)], &mut [])
|
|
}
|
|
}
|
|
|
|
/// Returns the size of the region of the CPU message queue that the driver is currently allowed
|
|
/// to write to, in bytes.
|
|
fn driver_write_area_size(&self) -> usize {
|
|
let tx = self.cpu_write_ptr();
|
|
let rx = self.gsp_read_ptr();
|
|
|
|
// `rx` and `tx` are both in `0..MSGQ_NUM_PAGES` per the invariants of `gsp_read_ptr` and
|
|
// `cpu_write_ptr`. The minimum value case is where `rx == 0` and `tx == MSGQ_NUM_PAGES -
|
|
// 1`, which gives `0 + MSGQ_NUM_PAGES - (MSGQ_NUM_PAGES - 1) - 1 == 0`.
|
|
let slots = (rx + MSGQ_NUM_PAGES - tx - 1) % MSGQ_NUM_PAGES;
|
|
num::u32_as_usize(slots) * GSP_PAGE_SIZE
|
|
}
|
|
|
|
/// Returns the region of the GSP message queue that the driver is currently allowed to read
|
|
/// from.
|
|
///
|
|
/// As the message queue is a circular buffer, the region may be discontiguous in memory. In
|
|
/// that case the second slice will have a non-zero length.
|
|
fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
|
|
let tx = self.gsp_write_ptr() as usize;
|
|
let rx = self.cpu_read_ptr() as usize;
|
|
|
|
// SAFETY:
|
|
// - We will only access the driver-owned part of the shared memory.
|
|
// - Per the safety statement of the function, no concurrent access will be performed.
|
|
let gsp_mem = unsafe { &*self.0.as_ptr() };
|
|
let data = &gsp_mem.gspq.msgq.data;
|
|
|
|
// The area starting at `rx` and ending at `tx - 1` modulo MSGQ_NUM_PAGES, inclusive,
|
|
// belongs to the driver for reading.
|
|
// PANIC:
|
|
// - per the invariant of `cpu_read_ptr`, `rx < MSGQ_NUM_PAGES`
|
|
// - per the invariant of `gsp_write_ptr`, `tx < MSGQ_NUM_PAGES`
|
|
if rx <= tx {
|
|
// The area is contiguous.
|
|
(&data[rx..tx], &[])
|
|
} else {
|
|
// The area is discontiguous.
|
|
(&data[rx..], &data[..tx])
|
|
}
|
|
}
|
|
|
|
/// Allocates a region on the command queue that is large enough to send a command of `size`
|
|
/// bytes, waiting for space to become available based on the provided timeout.
|
|
///
|
|
/// This returns a [`GspCommand`] ready to be written to by the caller.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `EMSGSIZE` if the command is larger than [`GSP_MSG_QUEUE_ELEMENT_SIZE_MAX`].
|
|
/// - `ETIMEDOUT` if space does not become available within the timeout.
|
|
/// - `EIO` if the command header is not properly aligned.
|
|
fn allocate_command(&mut self, size: usize, timeout: Delta) -> Result<GspCommand<'_>> {
|
|
if size_of::<GspMsgElement>() + size > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX {
|
|
return Err(EMSGSIZE);
|
|
}
|
|
read_poll_timeout(
|
|
|| Ok(self.driver_write_area_size()),
|
|
|available_bytes| *available_bytes >= size_of::<GspMsgElement>() + size,
|
|
Delta::from_micros(1),
|
|
timeout,
|
|
)?;
|
|
|
|
// Get the current writable area as an array of bytes.
|
|
let (slice_1, slice_2) = {
|
|
let (slice_1, slice_2) = self.driver_write_area();
|
|
|
|
#[allow(clippy::incompatible_msrv)]
|
|
(slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
|
|
};
|
|
|
|
// Extract area for the `GspMsgElement`.
|
|
let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
|
|
|
|
// Create the contents area.
|
|
let (slice_1, slice_2) = if slice_1.len() > size {
|
|
// Contents fits entirely in `slice_1`.
|
|
(&mut slice_1[..size], &mut slice_2[0..0])
|
|
} else {
|
|
// Need all of `slice_1` and some of `slice_2`.
|
|
let slice_2_len = size - slice_1.len();
|
|
(slice_1, &mut slice_2[..slice_2_len])
|
|
};
|
|
|
|
Ok(GspCommand {
|
|
header,
|
|
contents: (slice_1, slice_2),
|
|
})
|
|
}
|
|
|
|
// Returns the index of the memory page the GSP will write the next message to.
|
|
//
|
|
// # Invariants
|
|
//
|
|
// - The returned value is within `0..MSGQ_NUM_PAGES`.
|
|
fn gsp_write_ptr(&self) -> u32 {
|
|
super::fw::gsp_mem::gsp_write_ptr(&self.0)
|
|
}
|
|
|
|
// Returns the index of the memory page the GSP will read the next command from.
|
|
//
|
|
// # Invariants
|
|
//
|
|
// - The returned value is within `0..MSGQ_NUM_PAGES`.
|
|
fn gsp_read_ptr(&self) -> u32 {
|
|
super::fw::gsp_mem::gsp_read_ptr(&self.0)
|
|
}
|
|
|
|
// Returns the index of the memory page the CPU can read the next message from.
|
|
//
|
|
// # Invariants
|
|
//
|
|
// - The returned value is within `0..MSGQ_NUM_PAGES`.
|
|
fn cpu_read_ptr(&self) -> u32 {
|
|
super::fw::gsp_mem::cpu_read_ptr(&self.0)
|
|
}
|
|
|
|
// Informs the GSP that it can send `elem_count` new pages into the message queue.
|
|
fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
|
|
super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
|
|
}
|
|
|
|
// Returns the index of the memory page the CPU can write the next command to.
|
|
//
|
|
// # Invariants
|
|
//
|
|
// - The returned value is within `0..MSGQ_NUM_PAGES`.
|
|
fn cpu_write_ptr(&self) -> u32 {
|
|
super::fw::gsp_mem::cpu_write_ptr(&self.0)
|
|
}
|
|
|
|
// Informs the GSP that it can process `elem_count` new pages from the command queue.
|
|
fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
|
|
super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
|
|
}
|
|
}
|
|
|
|
/// A command ready to be sent on the command queue.
|
|
///
|
|
/// This is the type returned by [`DmaGspMem::allocate_command`].
|
|
struct GspCommand<'a> {
|
|
// Writable reference to the header of the command.
|
|
header: &'a mut GspMsgElement,
|
|
// Writable slices to the contents of the command. The second slice is zero unless the command
|
|
// loops over the command queue.
|
|
contents: (&'a mut [u8], &'a mut [u8]),
|
|
}
|
|
|
|
/// A message ready to be processed from the message queue.
|
|
///
|
|
/// This is the type returned by [`Cmdq::wait_for_msg`].
|
|
struct GspMessage<'a> {
|
|
// Reference to the header of the message.
|
|
header: &'a GspMsgElement,
|
|
// Slices to the contents of the message. The second slice is zero unless the message loops
|
|
// over the message queue.
|
|
contents: (&'a [u8], &'a [u8]),
|
|
}
|
|
|
|
/// GSP command queue.
|
|
///
|
|
/// Provides the ability to send commands and receive messages from the GSP using a shared memory
|
|
/// area.
|
|
#[pin_data]
|
|
pub(crate) struct Cmdq {
|
|
/// Inner mutex-protected state.
|
|
#[pin]
|
|
inner: Mutex<CmdqInner>,
|
|
/// DMA handle of the command queue's shared memory region.
|
|
pub(super) dma_handle: DmaAddress,
|
|
}
|
|
|
|
impl Cmdq {
|
|
/// Offset of the data after the PTEs.
|
|
const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
|
|
|
|
/// Offset of command queue ring buffer.
|
|
pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
|
|
+ core::mem::offset_of!(Msgq, msgq)
|
|
- Self::POST_PTE_OFFSET;
|
|
|
|
/// Offset of message queue ring buffer.
|
|
pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
|
|
+ core::mem::offset_of!(Msgq, msgq)
|
|
- Self::POST_PTE_OFFSET;
|
|
|
|
/// Number of page table entries for the GSP shared region.
|
|
pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
|
|
|
|
/// Default timeout for receiving a message from the GSP.
|
|
pub(super) const RECEIVE_TIMEOUT: Delta = Delta::from_secs(5);
|
|
|
|
/// Creates a new command queue for `dev`.
|
|
pub(crate) fn new(dev: &device::Device<device::Bound>) -> impl PinInit<Self, Error> + '_ {
|
|
pin_init_scope(move || {
|
|
let gsp_mem = DmaGspMem::new(dev)?;
|
|
|
|
Ok(try_pin_init!(Self {
|
|
dma_handle: gsp_mem.0.dma_handle(),
|
|
inner <- new_mutex!(CmdqInner {
|
|
dev: dev.into(),
|
|
gsp_mem,
|
|
seq: 0,
|
|
}),
|
|
}))
|
|
})
|
|
}
|
|
|
|
/// Computes the checksum for the message pointed to by `it`.
|
|
///
|
|
/// A message is made of several parts, so `it` is an iterator over byte slices representing
|
|
/// these parts.
|
|
fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
|
|
let sum64 = it
|
|
.enumerate()
|
|
.map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
|
|
.fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
|
|
|
|
((sum64 >> 32) as u32) ^ (sum64 as u32)
|
|
}
|
|
|
|
/// Notifies the GSP that we have updated the command queue pointers.
|
|
fn notify_gsp(bar: &Bar0) {
|
|
bar.write_reg(regs::NV_PGSP_QUEUE_HEAD::zeroed().with_address(0u32));
|
|
}
|
|
|
|
/// Sends `command` to the GSP and waits for the reply.
|
|
///
|
|
/// Messages with non-matching function codes are silently consumed until the expected reply
|
|
/// arrives.
|
|
///
|
|
/// The queue is locked for the entire send+receive cycle to ensure that no other command can
|
|
/// be interleaved.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `ETIMEDOUT` if space does not become available to send the command, or if the reply is
|
|
/// not received within the timeout.
|
|
/// - `EIO` if the variable payload requested by the command has not been entirely
|
|
/// written to by its [`CommandToGsp::init_variable_payload`] method.
|
|
///
|
|
/// Error codes returned by the command and reply initializers are propagated as-is.
|
|
pub(crate) fn send_command<M>(&self, bar: &Bar0, command: M) -> Result<M::Reply>
|
|
where
|
|
M: CommandToGsp,
|
|
M::Reply: MessageFromGsp,
|
|
Error: From<M::InitError>,
|
|
Error: From<<M::Reply as MessageFromGsp>::InitError>,
|
|
{
|
|
let mut inner = self.inner.lock();
|
|
inner.send_command(bar, command)?;
|
|
|
|
loop {
|
|
match inner.receive_msg::<M::Reply>(Self::RECEIVE_TIMEOUT) {
|
|
Ok(reply) => break Ok(reply),
|
|
Err(ERANGE) => continue,
|
|
Err(e) => break Err(e),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Sends `command` to the GSP without waiting for a reply.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `ETIMEDOUT` if space does not become available within the timeout.
|
|
/// - `EIO` if the variable payload requested by the command has not been entirely
|
|
/// written to by its [`CommandToGsp::init_variable_payload`] method.
|
|
///
|
|
/// Error codes returned by the command initializers are propagated as-is.
|
|
pub(crate) fn send_command_no_wait<M>(&self, bar: &Bar0, command: M) -> Result
|
|
where
|
|
M: CommandToGsp<Reply = NoReply>,
|
|
Error: From<M::InitError>,
|
|
{
|
|
self.inner.lock().send_command(bar, command)
|
|
}
|
|
|
|
/// Receive a message from the GSP.
|
|
///
|
|
/// See [`CmdqInner::receive_msg`] for details.
|
|
pub(crate) fn receive_msg<M: MessageFromGsp>(&self, timeout: Delta) -> Result<M>
|
|
where
|
|
// This allows all error types, including `Infallible`, to be used for `M::InitError`.
|
|
Error: From<M::InitError>,
|
|
{
|
|
self.inner.lock().receive_msg(timeout)
|
|
}
|
|
}
|
|
|
|
/// Inner mutex protected state of [`Cmdq`].
|
|
struct CmdqInner {
|
|
/// Device this command queue belongs to.
|
|
dev: ARef<device::Device>,
|
|
/// Current command sequence number.
|
|
seq: u32,
|
|
/// Memory area shared with the GSP for communicating commands and messages.
|
|
gsp_mem: DmaGspMem,
|
|
}
|
|
|
|
impl CmdqInner {
|
|
/// Timeout for waiting for space on the command queue.
|
|
const ALLOCATE_TIMEOUT: Delta = Delta::from_secs(1);
|
|
|
|
/// Sends `command` to the GSP, without splitting it.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `EMSGSIZE` if the command exceeds the maximum queue element size.
|
|
/// - `ETIMEDOUT` if space does not become available within the timeout.
|
|
/// - `EIO` if the variable payload requested by the command has not been entirely
|
|
/// written to by its [`CommandToGsp::init_variable_payload`] method.
|
|
///
|
|
/// Error codes returned by the command initializers are propagated as-is.
|
|
fn send_single_command<M>(&mut self, bar: &Bar0, command: M) -> Result
|
|
where
|
|
M: CommandToGsp,
|
|
// This allows all error types, including `Infallible`, to be used for `M::InitError`.
|
|
Error: From<M::InitError>,
|
|
{
|
|
let size_in_bytes = command.size();
|
|
let dst = self
|
|
.gsp_mem
|
|
.allocate_command(size_in_bytes, Self::ALLOCATE_TIMEOUT)?;
|
|
|
|
// Extract area for the command itself. The GSP message header and the command header
|
|
// together are guaranteed to fit entirely into a single page, so it's ok to only look
|
|
// at `dst.contents.0` here.
|
|
let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
|
|
|
|
// Fill the header and command in-place.
|
|
let msg_element = GspMsgElement::init(self.seq, size_in_bytes, M::FUNCTION);
|
|
// SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
|
|
// fails.
|
|
unsafe {
|
|
msg_element.__init(core::ptr::from_mut(dst.header))?;
|
|
command.init().__init(core::ptr::from_mut(cmd))?;
|
|
}
|
|
|
|
// Fill the variable-length payload, which may be empty.
|
|
let mut sbuffer = SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
|
|
command.init_variable_payload(&mut sbuffer)?;
|
|
|
|
if !sbuffer.is_empty() {
|
|
return Err(EIO);
|
|
}
|
|
drop(sbuffer);
|
|
|
|
// Compute checksum now that the whole message is ready.
|
|
dst.header
|
|
.set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
|
|
dst.header.as_bytes(),
|
|
dst.contents.0,
|
|
dst.contents.1,
|
|
])));
|
|
|
|
dev_dbg!(
|
|
&self.dev,
|
|
"GSP RPC: send: seq# {}, function={:?}, length=0x{:x}\n",
|
|
self.seq,
|
|
M::FUNCTION,
|
|
dst.header.length(),
|
|
);
|
|
|
|
// All set - update the write pointer and inform the GSP of the new command.
|
|
let elem_count = dst.header.element_count();
|
|
self.seq += 1;
|
|
self.gsp_mem.advance_cpu_write_ptr(elem_count);
|
|
Cmdq::notify_gsp(bar);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Sends `command` to the GSP.
|
|
///
|
|
/// The command may be split into multiple messages if it is large.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `ETIMEDOUT` if space does not become available within the timeout.
|
|
/// - `EIO` if the variable payload requested by the command has not been entirely
|
|
/// written to by its [`CommandToGsp::init_variable_payload`] method.
|
|
///
|
|
/// Error codes returned by the command initializers are propagated as-is.
|
|
fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
|
|
where
|
|
M: CommandToGsp,
|
|
Error: From<M::InitError>,
|
|
{
|
|
match SplitState::new(command)? {
|
|
SplitState::Single(command) => self.send_single_command(bar, command),
|
|
SplitState::Split(command, mut continuations) => {
|
|
self.send_single_command(bar, command)?;
|
|
|
|
while let Some(continuation) = continuations.next() {
|
|
// Turbofish needed because the compiler cannot infer M here.
|
|
self.send_single_command::<ContinuationRecord<'_>>(bar, continuation)?;
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Wait for a message to become available on the message queue.
|
|
///
|
|
/// This works purely at the transport layer and does not interpret or validate the message
|
|
/// beyond the advertised length in its [`GspMsgElement`].
|
|
///
|
|
/// This method returns:
|
|
///
|
|
/// - A reference to the [`GspMsgElement`] of the message,
|
|
/// - Two byte slices with the contents of the message. The second slice is empty unless the
|
|
/// message loops across the message queue.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
|
|
/// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
|
|
/// message queue.
|
|
///
|
|
/// Error codes returned by the message constructor are propagated as-is.
|
|
fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
|
|
// Wait for a message to arrive from the GSP.
|
|
let (slice_1, slice_2) = read_poll_timeout(
|
|
|| Ok(self.gsp_mem.driver_read_area()),
|
|
|driver_area| !driver_area.0.is_empty(),
|
|
Delta::from_millis(1),
|
|
timeout,
|
|
)
|
|
.map(|(slice_1, slice_2)| {
|
|
#[allow(clippy::incompatible_msrv)]
|
|
(slice_1.as_flattened(), slice_2.as_flattened())
|
|
})?;
|
|
|
|
// Extract the `GspMsgElement`.
|
|
let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
|
|
|
|
dev_dbg!(
|
|
&self.dev,
|
|
"GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
|
|
header.sequence(),
|
|
header.function(),
|
|
header.length(),
|
|
);
|
|
|
|
let payload_length = header.payload_length();
|
|
|
|
// Check that the driver read area is large enough for the message.
|
|
if slice_1.len() + slice_2.len() < payload_length {
|
|
return Err(EIO);
|
|
}
|
|
|
|
// Cut the message slices down to the actual length of the message.
|
|
let (slice_1, slice_2) = if slice_1.len() > payload_length {
|
|
// PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
|
|
(slice_1.split_at(payload_length).0, &slice_2[0..0])
|
|
} else {
|
|
(
|
|
slice_1,
|
|
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
|
|
// large as `payload_length`.
|
|
slice_2.split_at(payload_length - slice_1.len()).0,
|
|
)
|
|
};
|
|
|
|
// Validate checksum.
|
|
if Cmdq::calculate_checksum(SBufferIter::new_reader([
|
|
header.as_bytes(),
|
|
slice_1,
|
|
slice_2,
|
|
])) != 0
|
|
{
|
|
dev_err!(
|
|
&self.dev,
|
|
"GSP RPC: receive: Call {} - bad checksum\n",
|
|
header.sequence()
|
|
);
|
|
return Err(EIO);
|
|
}
|
|
|
|
Ok(GspMessage {
|
|
header,
|
|
contents: (slice_1, slice_2),
|
|
})
|
|
}
|
|
|
|
/// Receive a message from the GSP.
|
|
///
|
|
/// The expected message type is specified using the `M` generic parameter. If the pending
|
|
/// message has a different function code, `ERANGE` is returned and the message is consumed.
|
|
///
|
|
/// The read pointer is always advanced past the message, regardless of whether it matched.
|
|
///
|
|
/// # Errors
|
|
///
|
|
/// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
|
|
/// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
|
|
/// message queue.
|
|
/// - `EINVAL` if the function code of the message was not recognized.
|
|
/// - `ERANGE` if the message had a recognized but non-matching function code.
|
|
///
|
|
/// Error codes returned by [`MessageFromGsp::read`] are propagated as-is.
|
|
fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
|
|
where
|
|
// This allows all error types, including `Infallible`, to be used for `M::InitError`.
|
|
Error: From<M::InitError>,
|
|
{
|
|
let message = self.wait_for_msg(timeout)?;
|
|
let function = message.header.function().map_err(|_| EINVAL)?;
|
|
|
|
// Extract the message. Store the result as we want to advance the read pointer even in
|
|
// case of failure.
|
|
let result = if function == M::FUNCTION {
|
|
let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
|
|
let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
|
|
|
|
M::read(cmd, &mut sbuffer)
|
|
.map_err(|e| e.into())
|
|
.inspect(|_| {
|
|
if !sbuffer.is_empty() {
|
|
dev_warn!(
|
|
&self.dev,
|
|
"GSP message {:?} has unprocessed data\n",
|
|
function
|
|
);
|
|
}
|
|
})
|
|
} else {
|
|
Err(ERANGE)
|
|
};
|
|
|
|
// Advance the read pointer past this message.
|
|
self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
|
|
message.header.length().div_ceil(GSP_PAGE_SIZE),
|
|
)?);
|
|
|
|
result
|
|
}
|
|
}
|