mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
net: tso: Introduce tso_dma_map and helpers
Add struct tso_dma_map to tso.h for tracking DMA addresses of mapped GSO payload data and tso_dma_map_completion_state. The tso_dma_map combines DMA mapping storage with iterator state, allowing drivers to walk pre-mapped DMA regions linearly. Includes fields for the DMA IOVA path (iova_state, iova_offset, total_len) and a fallback per-region path (linear_dma, frags[], frag_idx, offset). The tso_dma_map_completion_state makes the IOVA completion state opaque for drivers. Drivers are expected to allocate this and use the added helpers to update the completion state. Adds skb_frag_phys() to skbuff.h, returning the physical address of a paged fragment's data, which is used by the tso_dma_map helpers introduced in this commit described below. The added TSO DMA map helpers are: tso_dma_map_init(): DMA-maps the linear payload region and all frags upfront. Prefers the DMA IOVA API for a single contiguous mapping with one IOTLB sync; falls back to per-region dma_map_phys() otherwise. Returns 0 on success, cleans up partial mappings on failure. tso_dma_map_cleanup(): Handles both IOVA and fallback teardown paths. tso_dma_map_count(): counts how many descriptors the next N bytes of payload will need. Returns 1 if IOVA is used since the mapping is contiguous. tso_dma_map_next(): yields the next (dma_addr, chunk_len) pair. On the IOVA path, each segment is a single contiguous chunk. On the fallback path, indicates when a chunk starts a new DMA mapping so the driver can set dma_unmap_len on that descriptor for completion-time unmapping. tso_dma_map_completion_save(): updates the completion state. Drivers will call this at xmit time. tso_dma_map_complete(): tears down the mapping at completion time and returns true if the IOVA path was used. If it was not used, this is a no-op and returns false. Suggested-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Joe Damato <joe@dama.to> Link: https://patch.msgid.link/20260408230607.2019402-2-joe@dama.to Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
006679268a
commit
82db77f6fb
@@ -3,6 +3,7 @@
|
||||
#define _TSO_H
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
#define TSO_HEADER_SIZE 256
|
||||
@@ -28,4 +29,103 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
|
||||
void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
|
||||
int tso_start(struct sk_buff *skb, struct tso_t *tso);
|
||||
|
||||
/**
|
||||
* struct tso_dma_map - DMA mapping state for GSO payload
|
||||
* @dev: device used for DMA mapping
|
||||
* @skb: the GSO skb being mapped
|
||||
* @hdr_len: per-segment header length
|
||||
* @iova_state: DMA IOVA state (when IOMMU available)
|
||||
* @iova_offset: global byte offset into IOVA range (IOVA path only)
|
||||
* @total_len: total payload length
|
||||
* @frag_idx: current region (-1 = linear, 0..nr_frags-1 = frag)
|
||||
* @offset: byte offset within current region
|
||||
* @linear_dma: DMA address of the linear payload
|
||||
* @linear_len: length of the linear payload
|
||||
* @nr_frags: number of frags successfully DMA-mapped
|
||||
* @frags: per-frag DMA address and length
|
||||
*
|
||||
* DMA-maps the payload regions of a GSO skb (linear data + frags).
|
||||
* Prefers the DMA IOVA API for a single contiguous mapping with one
|
||||
* IOTLB sync; falls back to per-region dma_map_phys() otherwise.
|
||||
*/
|
||||
struct tso_dma_map {
|
||||
struct device *dev;
|
||||
const struct sk_buff *skb;
|
||||
unsigned int hdr_len;
|
||||
/* IOVA path */
|
||||
struct dma_iova_state iova_state;
|
||||
size_t iova_offset;
|
||||
size_t total_len;
|
||||
/* Fallback path if IOVA path fails */
|
||||
int frag_idx;
|
||||
unsigned int offset;
|
||||
dma_addr_t linear_dma;
|
||||
unsigned int linear_len;
|
||||
unsigned int nr_frags;
|
||||
struct {
|
||||
dma_addr_t dma;
|
||||
unsigned int len;
|
||||
} frags[MAX_SKB_FRAGS];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tso_dma_map_completion_state - Completion-time cleanup state
|
||||
* @iova_state: DMA IOVA state (when IOMMU available)
|
||||
* @total_len: total payload length of the IOVA mapping
|
||||
*
|
||||
* Drivers store this on their SW ring at xmit time via
|
||||
* tso_dma_map_completion_save(), then call tso_dma_map_complete() at
|
||||
* completion time.
|
||||
*/
|
||||
struct tso_dma_map_completion_state {
|
||||
struct dma_iova_state iova_state;
|
||||
size_t total_len;
|
||||
};
|
||||
|
||||
int tso_dma_map_init(struct tso_dma_map *map, struct device *dev,
|
||||
const struct sk_buff *skb, unsigned int hdr_len);
|
||||
void tso_dma_map_cleanup(struct tso_dma_map *map);
|
||||
unsigned int tso_dma_map_count(struct tso_dma_map *map, unsigned int len);
|
||||
bool tso_dma_map_next(struct tso_dma_map *map, dma_addr_t *addr,
|
||||
unsigned int *chunk_len, unsigned int *mapping_len,
|
||||
unsigned int seg_remaining);
|
||||
|
||||
/**
|
||||
* tso_dma_map_completion_save - save state needed for completion-time cleanup
|
||||
* @map: the xmit-time DMA map
|
||||
* @cstate: driver-owned storage that persists until completion
|
||||
*
|
||||
* Should be called at xmit time to update the completion state and later passed
|
||||
* to tso_dma_map_complete().
|
||||
*/
|
||||
static inline void
|
||||
tso_dma_map_completion_save(const struct tso_dma_map *map,
|
||||
struct tso_dma_map_completion_state *cstate)
|
||||
{
|
||||
cstate->iova_state = map->iova_state;
|
||||
cstate->total_len = map->total_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* tso_dma_map_complete - tear down mapping at completion time
|
||||
* @dev: the device that owns the mapping
|
||||
* @cstate: state saved by tso_dma_map_completion_save()
|
||||
*
|
||||
* Return: true if the IOVA path was used and the mapping has been
|
||||
* destroyed; false if the fallback per-region path was used and the
|
||||
* driver must unmap via its normal completion path.
|
||||
*/
|
||||
static inline bool
|
||||
tso_dma_map_complete(struct device *dev,
|
||||
struct tso_dma_map_completion_state *cstate)
|
||||
{
|
||||
if (dma_use_iova(&cstate->iova_state)) {
|
||||
dma_iova_destroy(dev, &cstate->iova_state, cstate->total_len,
|
||||
DMA_TO_DEVICE, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* _TSO_H */
|
||||
|
||||
Reference in New Issue
Block a user