mirror of
https://github.com/torvalds/linux.git
synced 2026-04-26 02:22:28 -04:00
Add page reclamation action to tlb inval backend. The page reclamation action is paired with range tlb invalidations so both are issued at the same time. Page reclamation will issue the TLB invalidation with an invalid seqno and a H2G page reclamation action with the fence's corresponding seqno and handle the fence accordingly on page reclaim action done handler. If page reclamation fails, tlb timeout handler will be responsible for signalling fence and cleaning up. v2: - add send_page_reclaim to patch. - Remove flush_cache and use prl_sa pointer to determine PPC flush instead of explicit bool. Add NULL as fallback for others. (Matthew B) v3: - Add comments for flush_cache with media. Signed-off-by: Brian Nguyen <brian3.nguyen@intel.com> Suggested-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://patch.msgid.link/20251212213225.3564537-20-brian3.nguyen@intel.com
47 lines
1.2 KiB
C
47 lines
1.2 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2025 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_TLB_INVAL_H_
|
|
#define _XE_TLB_INVAL_H_
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include "xe_tlb_inval_types.h"
|
|
|
|
struct xe_gt;
|
|
struct xe_guc;
|
|
struct xe_vm;
|
|
|
|
int xe_gt_tlb_inval_init_early(struct xe_gt *gt);
|
|
|
|
void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval);
|
|
int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
|
|
struct xe_tlb_inval_fence *fence);
|
|
int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval);
|
|
void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm);
|
|
int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
|
|
struct xe_tlb_inval_fence *fence,
|
|
u64 start, u64 end, u32 asid, struct drm_suballoc *prl_sa);
|
|
|
|
void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
|
|
struct xe_tlb_inval_fence *fence,
|
|
bool stack);
|
|
|
|
/**
|
|
* xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
|
|
* @fence: TLB invalidation fence to wait on
|
|
*
|
|
* Wait on a TLB invalidiation fence until it signals, non interruptible
|
|
*/
|
|
static inline void
|
|
xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
|
|
{
|
|
dma_fence_wait(&fence->base, false);
|
|
}
|
|
|
|
void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno);
|
|
|
|
#endif /* _XE_TLB_INVAL_ */
|