mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 23:05:25 -04:00
Merge tag 'drm-misc-next-2021-09-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for $kernel-version:
UAPI Changes:
Cross-subsystem Changes:
- dma-buf: Avoid a warning with some allocations, Remove
DMA_FENCE_TRACE macros
Core Changes:
- bridge: New helper to git rid of panels in drivers
- fence: Improve dma_fence_add_callback documentation, Improve
dma_fence_ops->wait documentation
- ioctl: Unexport drm_ioctl_permit
- lease: Documentation improvements
- fourcc: Add new macro to determine the modifier vendor
- quirks: Add the Steam Deck, Chuwi HiBook, Chuwi Hi10 Pro, Samsung
Galaxy Book 10.6, KD Kurio Smart C15200 2-in-1, Lenovo Ideapad D330
- resv: Improve the documentation
- shmem-helpers: Allocate WC pages on x86, Switch to vmf_insert_pfn
- sched: Fix for a timer being canceled too soon, Avoid null pointer
derefence if the fence is null in drm_sched_fence_free, Convert
drivers to rely on its dependency tracking
- ttm: Switch to kerneldoc, new helper to clear all DMA mappings, pool
shrinker optitimization, Remove ttm_tt_destroy_common, Fix for
unbinding on multiple drivers
Driver Changes:
- bochs: New PCI IDs
- msm: Fence ordering impromevemnts
- stm: Add layer alpha support, zpos
- v3d: Fix for a Vulkan CTS failure
- vc4: Conversion to the new bridge helpers
- vgem: Use shmem helpers
- virtio: Support mapping exported vram
- zte: Remove obsolete driver
- bridge: Probe improvements for it66121, enable DSI EOTP for anx7625,
errors propagation improvements for anx7625
- panels: 60fps mode for otm8009a, New driver for Samsung S6D27A1
Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Thu 16 Sep 2021 17:30:50 AEST
# gpg: using EDDSA key 5C1337A45ECA9AEB89060E9EE3EF0D6F671851C5
# gpg: Can't check signature: No public key
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210916073132.ptbbmjetm7v3ufq3@gilmour
This commit is contained in:
@@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
|
||||
|
||||
config DRM_GEM_SHMEM_HELPER
|
||||
bool
|
||||
depends on DRM
|
||||
depends on DRM && MMU
|
||||
help
|
||||
Choose this if you need the GEM shmem helper functions
|
||||
|
||||
@@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
|
||||
|
||||
config DRM_VGEM
|
||||
tristate "Virtual GEM provider"
|
||||
depends on DRM
|
||||
depends on DRM && MMU
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
help
|
||||
Choose this option to get a virtual graphics memory manager,
|
||||
as used by Mesa's software renderer for enhanced performance.
|
||||
@@ -279,7 +280,7 @@ config DRM_VGEM
|
||||
|
||||
config DRM_VKMS
|
||||
tristate "Virtual KMS (EXPERIMENTAL)"
|
||||
depends on DRM
|
||||
depends on DRM && MMU
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select CRC32
|
||||
@@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/mediatek/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/zte/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/mxsfb/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/meson/Kconfig"
|
||||
|
||||
@@ -113,7 +113,6 @@ obj-y += bridge/
|
||||
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
|
||||
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
|
||||
obj-y += hisilicon/
|
||||
obj-$(CONFIG_DRM_ZTE) += zte/
|
||||
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
|
||||
obj-y += tiny/
|
||||
obj-$(CONFIG_DRM_PL111) += pl111/
|
||||
|
||||
@@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
drm_sched_job_arm(&job->base);
|
||||
|
||||
/* No memory allocation is allowed while holding the notifier lock.
|
||||
* The lock is held until amdgpu_cs_submit is finished and fence is
|
||||
* added to BOs.
|
||||
@@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
|
||||
@@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t seq, last_seq;
|
||||
int r;
|
||||
|
||||
do {
|
||||
last_seq = atomic_read(&ring->fence_drv.last_seq);
|
||||
@@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
r = dma_fence_signal(fence);
|
||||
if (!r)
|
||||
DMA_FENCE_TRACE(fence, "signaled from irq context\n");
|
||||
else
|
||||
BUG();
|
||||
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_put(fence);
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
@@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
drm_sched_job_arm(&job->base);
|
||||
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1066,8 +1066,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
amdgpu_ttm_backend_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
if (gtt->usertask)
|
||||
put_task_struct(gtt->usertask);
|
||||
|
||||
@@ -1148,6 +1146,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
amdgpu_ttm_backend_unbind(bdev, ttm);
|
||||
|
||||
if (gtt->userptr) {
|
||||
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
||||
kfree(ttm->sg);
|
||||
|
||||
@@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
|
||||
return !malidp_hw_format_is_afbc_only(format);
|
||||
}
|
||||
|
||||
if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
|
||||
if (!fourcc_mod_is_vendor(modifier, ARM)) {
|
||||
DRM_ERROR("Unknown modifier (not Arm)\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1,21 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
|
||||
*
|
||||
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
|
||||
*
|
||||
* This program is free software; you may redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
@@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
|
||||
ret = sp_tx_aux_rd(ctx, 0xf1);
|
||||
|
||||
if (ret) {
|
||||
sp_tx_rst_aux(ctx);
|
||||
ret = sp_tx_rst_aux(ctx);
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
|
||||
} else {
|
||||
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
|
||||
@@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
|
||||
if (cnt > EDID_TRY_CNT)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int segments_edid_read(struct anx7625_data *ctx,
|
||||
@@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
|
||||
if (cnt > EDID_TRY_CNT)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sp_tx_edid_read(struct anx7625_data *ctx,
|
||||
@@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
|
||||
if (g_edid_break == 1)
|
||||
break;
|
||||
|
||||
segments_edid_read(ctx, count / 2,
|
||||
pblock_buf, offset);
|
||||
ret = segments_edid_read(ctx, count / 2,
|
||||
pblock_buf, offset);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
memcpy(&pedid_blocks_buf[edid_pos],
|
||||
pblock_buf,
|
||||
MAX_DPCD_BUFFER_SIZE);
|
||||
@@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
|
||||
if (g_edid_break == 1)
|
||||
break;
|
||||
|
||||
segments_edid_read(ctx, count / 2,
|
||||
pblock_buf, offset);
|
||||
ret = segments_edid_read(ctx, count / 2,
|
||||
pblock_buf, offset);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
memcpy(&pedid_blocks_buf[edid_pos],
|
||||
pblock_buf,
|
||||
MAX_DPCD_BUFFER_SIZE);
|
||||
@@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
|
||||
}
|
||||
|
||||
/* Reset aux channel */
|
||||
sp_tx_rst_aux(ctx);
|
||||
ret = sp_tx_rst_aux(ctx);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return (blocks_num + 1);
|
||||
}
|
||||
@@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
|
||||
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
|
||||
MIPI_DSI_MODE_NO_EOT_PACKET |
|
||||
MIPI_DSI_MODE_VIDEO_HSE;
|
||||
|
||||
if (mipi_dsi_attach(dsi) < 0) {
|
||||
|
||||
@@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct cdns_dsi *dsi;
|
||||
struct cdns_dsi_input *input;
|
||||
struct resource *res;
|
||||
int ret, irq;
|
||||
u32 val;
|
||||
|
||||
@@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
|
||||
|
||||
input = &dsi->input;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dsi->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
dsi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(dsi->regs))
|
||||
return PTR_ERR(dsi->regs);
|
||||
|
||||
|
||||
@@ -889,7 +889,7 @@ unlock:
|
||||
static int it66121_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
u32 vendor_ids[2], device_ids[2], revision_id;
|
||||
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
|
||||
struct device_node *ep;
|
||||
int ret;
|
||||
struct it66121_ctx *ctx;
|
||||
@@ -924,6 +924,9 @@ static int it66121_probe(struct i2c_client *client,
|
||||
ctx->next_bridge = of_drm_find_bridge(ep);
|
||||
of_node_put(ep);
|
||||
|
||||
if (!ctx->next_bridge)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
i2c_set_clientdata(client, ctx);
|
||||
mutex_init(&ctx->lock);
|
||||
|
||||
|
||||
@@ -18,16 +18,18 @@
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#define PAGE2_GPIO_H 0xa7
|
||||
#define PS_GPIO9 BIT(1)
|
||||
#define PS_GPIO9 BIT(1)
|
||||
#define PAGE2_I2C_BYPASS 0xea
|
||||
#define I2C_BYPASS_EN 0xd0
|
||||
#define I2C_BYPASS_EN 0xd0
|
||||
#define PAGE2_MCS_EN 0xf3
|
||||
#define MCS_EN BIT(0)
|
||||
#define MCS_EN BIT(0)
|
||||
|
||||
#define PAGE3_SET_ADD 0xfe
|
||||
#define VDO_CTL_ADD 0x13
|
||||
#define VDO_DIS 0x18
|
||||
#define VDO_EN 0x1c
|
||||
#define DP_NUM_LANES 4
|
||||
#define VDO_CTL_ADD 0x13
|
||||
#define VDO_DIS 0x18
|
||||
#define VDO_EN 0x1c
|
||||
|
||||
#define NUM_MIPI_LANES 4
|
||||
|
||||
/*
|
||||
* PS8640 uses multiple addresses:
|
||||
@@ -254,7 +256,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
|
||||
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->lanes = DP_NUM_LANES;
|
||||
dsi->lanes = NUM_MIPI_LANES;
|
||||
ret = mipi_dsi_attach(dsi);
|
||||
if (ret)
|
||||
goto err_dsi_attach;
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <drm/drm_atomic_state_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
@@ -51,10 +52,8 @@
|
||||
*
|
||||
* Display drivers are responsible for linking encoders with the first bridge
|
||||
* in the chains. This is done by acquiring the appropriate bridge with
|
||||
* of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
|
||||
* panel with drm_panel_bridge_add_typed() (or the managed version
|
||||
* devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
|
||||
* attached to the encoder with a call to drm_bridge_attach().
|
||||
* devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
|
||||
* encoder with a call to drm_bridge_attach().
|
||||
*
|
||||
* Bridges are responsible for linking themselves with the next bridge in the
|
||||
* chain, if any. This is done the same way as for encoders, with the call to
|
||||
@@ -1233,6 +1232,40 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(of_drm_find_bridge);
|
||||
|
||||
/**
|
||||
* devm_drm_of_get_bridge - Return next bridge in the chain
|
||||
* @dev: device to tie the bridge lifetime to
|
||||
* @np: device tree node containing encoder output ports
|
||||
* @port: port in the device tree node
|
||||
* @endpoint: endpoint in the device tree node
|
||||
*
|
||||
* Given a DT node's port and endpoint number, finds the connected node
|
||||
* and returns the associated bridge if any, or creates and returns a
|
||||
* drm panel bridge instance if a panel is connected.
|
||||
*
|
||||
* Returns a pointer to the bridge if successful, or an error pointer
|
||||
* otherwise.
|
||||
*/
|
||||
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
|
||||
struct device_node *np,
|
||||
u32 port, u32 endpoint)
|
||||
{
|
||||
struct drm_bridge *bridge;
|
||||
struct drm_panel *panel;
|
||||
int ret;
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(np, port, endpoint,
|
||||
&panel, &bridge);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (panel)
|
||||
bridge = devm_drm_panel_bridge_add(dev, panel);
|
||||
|
||||
return bridge;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_drm_of_get_bridge);
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
|
||||
|
||||
@@ -65,6 +65,14 @@
|
||||
* support can instead use e.g. drm_helper_hpd_irq_event().
|
||||
*/
|
||||
|
||||
/*
|
||||
* Global connector list for drm_connector_find_by_fwnode().
|
||||
* Note drm_connector_[un]register() first take connector->lock and then
|
||||
* take the connector_list_lock.
|
||||
*/
|
||||
static DEFINE_MUTEX(connector_list_lock);
|
||||
static LIST_HEAD(connector_list);
|
||||
|
||||
struct drm_conn_prop_enum_list {
|
||||
int type;
|
||||
const char *name;
|
||||
@@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev,
|
||||
goto out_put_type_id;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&connector->global_connector_list_entry);
|
||||
INIT_LIST_HEAD(&connector->probed_modes);
|
||||
INIT_LIST_HEAD(&connector->modes);
|
||||
mutex_init(&connector->mutex);
|
||||
@@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
|
||||
drm_mode_object_unregister(dev, &connector->base);
|
||||
kfree(connector->name);
|
||||
connector->name = NULL;
|
||||
fwnode_handle_put(connector->fwnode);
|
||||
connector->fwnode = NULL;
|
||||
spin_lock_irq(&dev->mode_config.connector_list_lock);
|
||||
list_del(&connector->head);
|
||||
dev->mode_config.num_connector--;
|
||||
@@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector)
|
||||
/* Let userspace know we have a new connector */
|
||||
drm_sysfs_hotplug_event(connector->dev);
|
||||
|
||||
mutex_lock(&connector_list_lock);
|
||||
list_add_tail(&connector->global_connector_list_entry, &connector_list);
|
||||
mutex_unlock(&connector_list_lock);
|
||||
goto unlock;
|
||||
|
||||
err_debugfs:
|
||||
@@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&connector_list_lock);
|
||||
list_del_init(&connector->global_connector_list_entry);
|
||||
mutex_unlock(&connector_list_lock);
|
||||
|
||||
if (connector->funcs->early_unregister)
|
||||
connector->funcs->early_unregister(connector);
|
||||
|
||||
@@ -2543,6 +2561,67 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
|
||||
* @fwnode: fwnode for which to find the matching drm_connector
|
||||
*
|
||||
* This functions looks up a drm_connector based on its associated fwnode. When
|
||||
* a connector is found a reference to the connector is returned. The caller must
|
||||
* call drm_connector_put() to release this reference when it is done with the
|
||||
* connector.
|
||||
*
|
||||
* Returns: A reference to the found connector or an ERR_PTR().
|
||||
*/
|
||||
struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
|
||||
|
||||
if (!fwnode)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mutex_lock(&connector_list_lock);
|
||||
|
||||
list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
|
||||
if (connector->fwnode == fwnode ||
|
||||
(connector->fwnode && connector->fwnode->secondary == fwnode)) {
|
||||
drm_connector_get(connector);
|
||||
found = connector;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&connector_list_lock);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
|
||||
* @connector: connector to report the event on
|
||||
*
|
||||
* On some hardware a hotplug event notification may come from outside the display
|
||||
* driver / device. An example of this is some USB Type-C setups where the hardware
|
||||
* muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
|
||||
* status bit to the GPU's DP HPD pin.
|
||||
*
|
||||
* This function can be used to report these out-of-band events after obtaining
|
||||
* a drm_connector reference through calling drm_connector_find_by_fwnode().
|
||||
*/
|
||||
void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
|
||||
connector = drm_connector_find_by_fwnode(connector_fwnode);
|
||||
if (IS_ERR(connector))
|
||||
return;
|
||||
|
||||
if (connector->funcs->oob_hotplug_event)
|
||||
connector->funcs->oob_hotplug_event(connector);
|
||||
|
||||
drm_connector_put(connector);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
|
||||
|
||||
|
||||
/**
|
||||
* DOC: Tile group
|
||||
|
||||
@@ -58,6 +58,7 @@ struct drm_property;
|
||||
struct edid;
|
||||
struct kref;
|
||||
struct work_struct;
|
||||
struct fwnode_handle;
|
||||
|
||||
/* drm_crtc.c */
|
||||
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
|
||||
@@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj,
|
||||
int drm_connector_create_standard_properties(struct drm_device *dev);
|
||||
const char *drm_get_connector_force_name(enum drm_connector_force force);
|
||||
void drm_connector_free_work_fn(struct work_struct *work);
|
||||
struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode);
|
||||
|
||||
/* IOCTL */
|
||||
int drm_connector_property_set_ioctl(struct drm_device *dev,
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/set_memory.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@@ -162,6 +166,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Allocating WC pages which are correctly flushed is only
|
||||
* supported on x86. Ideal solution would be a GFP_WC flag, which also
|
||||
* ttm_pool.c could use.
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
if (shmem->map_wc)
|
||||
set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
shmem->pages = pages;
|
||||
|
||||
return 0;
|
||||
@@ -203,6 +217,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
|
||||
if (--shmem->pages_use_count > 0)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
if (shmem->map_wc)
|
||||
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
drm_gem_put_pages(obj, shmem->pages,
|
||||
shmem->pages_mark_dirty_on_put,
|
||||
shmem->pages_mark_accessed_on_put);
|
||||
@@ -542,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
||||
} else {
|
||||
page = shmem->pages[page_offset];
|
||||
|
||||
ret = vmf_insert_page(vma, vmf->address, page);
|
||||
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
|
||||
}
|
||||
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
@@ -612,7 +631,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
if (shmem->map_wc)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
@@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
|
||||
|
||||
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
|
||||
{
|
||||
ttm_tt_destroy_common(bdev, tt);
|
||||
ttm_tt_fini(tt);
|
||||
kfree(tt);
|
||||
}
|
||||
|
||||
@@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_ioctl_permit - Check ioctl permissions against caller
|
||||
*
|
||||
* @flags: ioctl permission flags.
|
||||
* @file_priv: Pointer to struct drm_file identifying the caller.
|
||||
*
|
||||
* Checks whether the caller is allowed to run an ioctl with the
|
||||
* indicated permissions.
|
||||
*
|
||||
* Returns:
|
||||
* Zero if allowed, -EACCES otherwise.
|
||||
*/
|
||||
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
||||
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
||||
{
|
||||
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
|
||||
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
|
||||
@@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioctl_permit);
|
||||
|
||||
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
|
||||
[DRM_IOCTL_NR(ioctl)] = { \
|
||||
@@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
|
||||
};
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
|
||||
|
||||
/**
|
||||
* DOC: driver specific ioctls
|
||||
@@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp,
|
||||
if (drm_dev_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
||||
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
|
||||
return -ENOTTY;
|
||||
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
|
||||
return -ENOTTY;
|
||||
|
||||
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
|
||||
|
||||
|
||||
@@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
|
||||
|
||||
static int __init drm_kms_helper_init(void)
|
||||
{
|
||||
/*
|
||||
* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
|
||||
* but the module doesn't depend on any fb console symbols. At least
|
||||
* attempt to load fbcon to avoid leaving the system without a usable
|
||||
* console.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
|
||||
IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
|
||||
!IS_ENABLED(CONFIG_EXPERT))
|
||||
request_module_nowait("fbcon");
|
||||
|
||||
return drm_dp_aux_dev_init();
|
||||
}
|
||||
|
||||
|
||||
@@ -231,6 +231,9 @@ EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
|
||||
* return either the associated struct drm_panel or drm_bridge device. Either
|
||||
* @panel or @bridge must not be NULL.
|
||||
*
|
||||
* This function is deprecated and should not be used in new drivers. Use
|
||||
* devm_drm_of_get_bridge() instead.
|
||||
*
|
||||
* Returns zero if successful, or one of the standard error codes if it fails.
|
||||
*/
|
||||
int drm_of_find_panel_or_bridge(const struct device_node *np,
|
||||
|
||||
@@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
|
||||
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||
};
|
||||
|
||||
static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
|
||||
.width = 1280,
|
||||
.height = 1920,
|
||||
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||
};
|
||||
|
||||
static const struct dmi_system_id orientation_data[] = {
|
||||
{ /* Acer One 10 (S1003) */
|
||||
.matches = {
|
||||
@@ -134,6 +140,20 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Chuwi HiBook (CWI514) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
|
||||
/* Above matches are too generic, add bios-date match */
|
||||
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* Chuwi Hi10 Pro (CWI529) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
@@ -193,6 +213,13 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
|
||||
},
|
||||
.driver_data = (void *)&itworks_tw891,
|
||||
}, { /* KD Kurio Smart C15200 2-in-1 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /*
|
||||
* Lenovo Ideapad Miix 310 laptop, only some production batches
|
||||
* have a portrait screen, the resolution checks makes the quirk
|
||||
@@ -211,10 +238,15 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Lenovo Ideapad D330 */
|
||||
}, { /* Lenovo Ideapad D330-10IGM (HD) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Lenovo Ideapad D330-10IGM (FHD) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
@@ -225,6 +257,19 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
|
||||
},
|
||||
.driver_data = (void *)&onegx1_pro,
|
||||
}, { /* Samsung GalaxyBook 10.6 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1280x1920_rightside_up,
|
||||
}, { /* Valve Steam Deck */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* VIOS LTH17 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
* Copyright (c) 2003-2004 IBM Corp.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
@@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = {
|
||||
.name = "drm_minor"
|
||||
};
|
||||
|
||||
static struct device_type drm_sysfs_device_connector = {
|
||||
.name = "drm_connector",
|
||||
};
|
||||
|
||||
struct class *drm_class;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool drm_connector_acpi_bus_match(struct device *dev)
|
||||
{
|
||||
return dev->type == &drm_sysfs_device_connector;
|
||||
}
|
||||
|
||||
static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
|
||||
{
|
||||
struct drm_connector *connector = to_drm_connector(dev);
|
||||
|
||||
return to_acpi_device_node(connector->fwnode);
|
||||
}
|
||||
|
||||
static struct acpi_bus_type drm_connector_acpi_bus = {
|
||||
.name = "drm_connector",
|
||||
.match = drm_connector_acpi_bus_match,
|
||||
.find_companion = drm_connector_acpi_find_companion,
|
||||
};
|
||||
|
||||
static void drm_sysfs_acpi_register(void)
|
||||
{
|
||||
register_acpi_bus_type(&drm_connector_acpi_bus);
|
||||
}
|
||||
|
||||
static void drm_sysfs_acpi_unregister(void)
|
||||
{
|
||||
unregister_acpi_bus_type(&drm_connector_acpi_bus);
|
||||
}
|
||||
#else
|
||||
static void drm_sysfs_acpi_register(void) { }
|
||||
static void drm_sysfs_acpi_unregister(void) { }
|
||||
#endif
|
||||
|
||||
static char *drm_devnode(struct device *dev, umode_t *mode)
|
||||
{
|
||||
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
|
||||
@@ -85,6 +123,8 @@ int drm_sysfs_init(void)
|
||||
}
|
||||
|
||||
drm_class->devnode = drm_devnode;
|
||||
|
||||
drm_sysfs_acpi_register();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -97,11 +137,17 @@ void drm_sysfs_destroy(void)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(drm_class))
|
||||
return;
|
||||
drm_sysfs_acpi_unregister();
|
||||
class_remove_file(drm_class, &class_attr_version.attr);
|
||||
class_destroy(drm_class);
|
||||
drm_class = NULL;
|
||||
}
|
||||
|
||||
static void drm_sysfs_release(struct device *dev)
|
||||
{
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Connector properties
|
||||
*/
|
||||
@@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = {
|
||||
int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct device *kdev;
|
||||
int r;
|
||||
|
||||
if (connector->kdev)
|
||||
return 0;
|
||||
|
||||
connector->kdev =
|
||||
device_create_with_groups(drm_class, dev->primary->kdev, 0,
|
||||
connector, connector_dev_groups,
|
||||
"card%d-%s", dev->primary->index,
|
||||
connector->name);
|
||||
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
|
||||
if (!kdev)
|
||||
return -ENOMEM;
|
||||
|
||||
device_initialize(kdev);
|
||||
kdev->class = drm_class;
|
||||
kdev->type = &drm_sysfs_device_connector;
|
||||
kdev->parent = dev->primary->kdev;
|
||||
kdev->groups = connector_dev_groups;
|
||||
kdev->release = drm_sysfs_release;
|
||||
dev_set_drvdata(kdev, connector);
|
||||
|
||||
r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
DRM_DEBUG("adding \"%s\" to sysfs\n",
|
||||
connector->name);
|
||||
|
||||
if (IS_ERR(connector->kdev)) {
|
||||
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
|
||||
return PTR_ERR(connector->kdev);
|
||||
r = device_add(kdev);
|
||||
if (r) {
|
||||
drm_err(dev, "failed to register connector device: %d\n", r);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
connector->kdev = kdev;
|
||||
|
||||
if (connector->ddc)
|
||||
return sysfs_create_link(&connector->kdev->kobj,
|
||||
&connector->ddc->dev.kobj, "ddc");
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
put_device(kdev);
|
||||
return r;
|
||||
}
|
||||
|
||||
void drm_sysfs_connector_remove(struct drm_connector *connector)
|
||||
@@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sysfs_connector_status_event);
|
||||
|
||||
static void drm_sysfs_release(struct device *dev)
|
||||
{
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
|
||||
{
|
||||
const char *minor_str;
|
||||
|
||||
@@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
drm_sched_job_arm(&submit->sched_job);
|
||||
|
||||
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
|
||||
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
|
||||
submit->out_fence, 0,
|
||||
@@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
|
||||
/* the scheduler holds on to the job now */
|
||||
kref_get(&submit->refcount);
|
||||
|
||||
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
|
||||
drm_sched_entity_push_job(&submit->sched_job);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&submit->gpu->fence_lock);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
config DRM_GUD
|
||||
tristate "GUD USB Display"
|
||||
depends on DRM && USB
|
||||
depends on DRM && USB && MMU
|
||||
select LZ4_COMPRESS
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
|
||||
@@ -214,7 +214,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
|
||||
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(i915_tt);
|
||||
}
|
||||
|
||||
@@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
|
||||
if (explicit)
|
||||
return 0;
|
||||
|
||||
return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
|
||||
return drm_sched_job_add_implicit_dependencies(&task->base,
|
||||
&bo->base.base,
|
||||
write);
|
||||
}
|
||||
|
||||
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
|
||||
@@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = drm_gem_fence_array_add(&submit->task->deps, fence);
|
||||
err = drm_sched_job_add_dependency(&submit->task->base, fence);
|
||||
if (err) {
|
||||
dma_fence_put(fence);
|
||||
return err;
|
||||
@@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
|
||||
goto err_out2;
|
||||
}
|
||||
|
||||
fence = lima_sched_context_queue_task(
|
||||
submit->ctx->context + submit->pipe, submit->task);
|
||||
fence = lima_sched_context_queue_task(submit->task);
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
|
||||
|
||||
@@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task,
|
||||
return err;
|
||||
}
|
||||
|
||||
drm_sched_job_arm(&task->base);
|
||||
|
||||
task->num_bos = num_bos;
|
||||
task->vm = lima_vm_get(vm);
|
||||
|
||||
xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void lima_sched_task_fini(struct lima_sched_task *task)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
unsigned long index;
|
||||
int i;
|
||||
|
||||
drm_sched_job_cleanup(&task->base);
|
||||
|
||||
xa_for_each(&task->deps, index, fence) {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
xa_destroy(&task->deps);
|
||||
|
||||
if (task->bos) {
|
||||
for (i = 0; i < task->num_bos; i++)
|
||||
drm_gem_object_put(&task->bos[i]->base.base);
|
||||
@@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
||||
drm_sched_entity_fini(&context->base);
|
||||
}
|
||||
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
|
||||
struct lima_sched_task *task)
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
|
||||
|
||||
trace_lima_task_submit(task);
|
||||
drm_sched_entity_push_job(&task->base, &context->base);
|
||||
drm_sched_entity_push_job(&task->base);
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct lima_sched_task *task = to_lima_task(job);
|
||||
|
||||
if (!xa_empty(&task->deps))
|
||||
return xa_erase(&task->deps, task->last_dep++);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int lima_pm_busy(struct lima_device *ldev)
|
||||
{
|
||||
int ret;
|
||||
@@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
|
||||
}
|
||||
|
||||
static const struct drm_sched_backend_ops lima_sched_ops = {
|
||||
.dependency = lima_sched_dependency,
|
||||
.run_job = lima_sched_run_job,
|
||||
.timedout_job = lima_sched_timedout_job,
|
||||
.free_job = lima_sched_free_job,
|
||||
|
||||
@@ -23,9 +23,6 @@ struct lima_sched_task {
|
||||
struct lima_vm *vm;
|
||||
void *frame;
|
||||
|
||||
struct xarray deps;
|
||||
unsigned long last_dep;
|
||||
|
||||
struct lima_bo **bos;
|
||||
int num_bos;
|
||||
|
||||
@@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
|
||||
atomic_t *guilty);
|
||||
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
||||
struct lima_sched_context *context);
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
|
||||
struct lima_sched_task *task);
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
|
||||
|
||||
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
|
||||
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
|
||||
|
||||
@@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev)
|
||||
struct drm_device *drm;
|
||||
struct mcde *mcde;
|
||||
struct component_match *match = NULL;
|
||||
struct resource *res;
|
||||
u32 pid;
|
||||
int irq;
|
||||
int ret;
|
||||
@@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev)
|
||||
goto clk_disable;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
mcde->regs = devm_ioremap_resource(dev, res);
|
||||
mcde->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(mcde->regs)) {
|
||||
dev_err(dev, "no MCDE regs\n");
|
||||
ret = -EINVAL;
|
||||
|
||||
@@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct mcde_dsi *d;
|
||||
struct mipi_dsi_host *host;
|
||||
struct resource *res;
|
||||
u32 dsi_id;
|
||||
int ret;
|
||||
|
||||
@@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(d->prcmu);
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
d->regs = devm_ioremap_resource(dev, res);
|
||||
d->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(d->regs))
|
||||
return PTR_ERR(d->regs);
|
||||
|
||||
|
||||
@@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
|
||||
priv->compat = match->compat;
|
||||
priv->afbcd.ops = match->afbcd_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
|
||||
if (IS_ERR(regs)) {
|
||||
ret = PTR_ERR(regs);
|
||||
goto free_drm;
|
||||
|
||||
@@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
|
||||
struct dw_hdmi_plat_data *dw_plat_data;
|
||||
struct drm_bridge *next_bridge;
|
||||
struct drm_encoder *encoder;
|
||||
struct resource *res;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
@@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
|
||||
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
|
||||
meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(meson_dw_hdmi->hdmitx))
|
||||
return PTR_ERR(meson_dw_hdmi->hdmitx);
|
||||
|
||||
|
||||
@@ -309,11 +309,6 @@ struct msm_gem_submit {
|
||||
struct ww_acquire_ctx ticket;
|
||||
uint32_t seqno; /* Sequence number of the submit on the ring */
|
||||
|
||||
/* Array of struct dma_fence * to block on before submitting this job.
|
||||
*/
|
||||
struct xarray deps;
|
||||
unsigned long last_dep;
|
||||
|
||||
/* Hw fence, which is created when the scheduler executes the job, and
|
||||
* is signaled when the hw finishes (via seqno write from cmdstream)
|
||||
*/
|
||||
|
||||
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
|
||||
|
||||
kref_init(&submit->ref);
|
||||
submit->dev = dev;
|
||||
submit->aspace = queue->ctx->aspace;
|
||||
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_gem_submit *submit =
|
||||
container_of(kref, struct msm_gem_submit, ref);
|
||||
unsigned long index;
|
||||
struct dma_fence *fence;
|
||||
unsigned i;
|
||||
|
||||
if (submit->fence_id) {
|
||||
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
|
||||
mutex_unlock(&submit->queue->lock);
|
||||
}
|
||||
|
||||
xa_for_each (&submit->deps, index, fence) {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
xa_destroy(&submit->deps);
|
||||
|
||||
dma_fence_put(submit->user_fence);
|
||||
dma_fence_put(submit->hw_fence);
|
||||
|
||||
@@ -340,11 +330,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (no_implicit)
|
||||
/* exclusive fences must be ordered */
|
||||
if (no_implicit && !write)
|
||||
continue;
|
||||
|
||||
ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
|
||||
write);
|
||||
ret = drm_sched_job_add_implicit_dependencies(&submit->base,
|
||||
obj,
|
||||
write);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@@ -588,7 +580,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = drm_gem_fence_array_add(&submit->deps, fence);
|
||||
ret = drm_sched_job_add_dependency(&submit->base, fence);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -798,7 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = drm_gem_fence_array_add(&submit->deps, in_fence);
|
||||
ret = drm_sched_job_add_dependency(&submit->base, in_fence);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -878,6 +870,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
|
||||
submit->nr_cmds = i;
|
||||
|
||||
drm_sched_job_arm(&submit->base);
|
||||
|
||||
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
|
||||
|
||||
/*
|
||||
@@ -889,17 +883,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
if (submit->fence_id < 0) {
|
||||
ret = submit->fence_id = 0;
|
||||
submit->fence_id = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||
struct sync_file *sync_file = sync_file_create(submit->user_fence);
|
||||
if (!sync_file) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
} else {
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
|
||||
submit_attach_object_fences(submit);
|
||||
@@ -907,7 +900,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
/* The scheduler owns a ref now: */
|
||||
msm_gem_submit_get(submit);
|
||||
|
||||
drm_sched_entity_push_job(&submit->base, &queue->entity);
|
||||
drm_sched_entity_push_job(&submit->base);
|
||||
|
||||
args->fence = submit->fence_id;
|
||||
|
||||
|
||||
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
|
||||
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
|
||||
module_param(num_hw_submissions, uint, 0600);
|
||||
|
||||
static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct msm_gem_submit *submit = to_msm_submit(job);
|
||||
|
||||
if (!xa_empty(&submit->deps))
|
||||
return xa_erase(&submit->deps, submit->last_dep++);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
|
||||
{
|
||||
struct msm_gem_submit *submit = to_msm_submit(job);
|
||||
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
|
||||
}
|
||||
|
||||
const struct drm_sched_backend_ops msm_sched_ops = {
|
||||
.dependency = msm_job_dependency,
|
||||
.run_job = msm_job_run,
|
||||
.free_job = msm_job_free
|
||||
};
|
||||
|
||||
@@ -1277,6 +1277,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
if (slave)
|
||||
return;
|
||||
|
||||
nouveau_ttm_tt_unbind(bdev, ttm);
|
||||
|
||||
drm = nouveau_bdev(bdev);
|
||||
dev = drm->dev->dev;
|
||||
|
||||
@@ -1290,8 +1292,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
if (drm->agp.bridge) {
|
||||
ttm_agp_unbind(ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_agp_destroy(ttm);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
|
||||
if (ttm) {
|
||||
nouveau_sgdma_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_tt_fini(&nvbe->ttm);
|
||||
kfree(nvbe);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ config DRM_OMAP
|
||||
tristate "OMAP DRM"
|
||||
depends on DRM
|
||||
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
|
||||
select OMAP2_DSS
|
||||
select DRM_KMS_HELPER
|
||||
select VIDEOMODE_HELPERS
|
||||
select HDMI
|
||||
|
||||
@@ -392,6 +392,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
|
||||
depends on DRM_MIPI_DSI
|
||||
select VIDEOMODE_HELPERS
|
||||
|
||||
config DRM_PANEL_SAMSUNG_S6D27A1
|
||||
tristate "Samsung S6D27A1 DPI panel driver"
|
||||
depends on OF && SPI && GPIOLIB
|
||||
select DRM_MIPI_DBI
|
||||
help
|
||||
Say Y here if you want to enable support for the Samsung
|
||||
S6D27A1 DPI 480x800 panel.
|
||||
|
||||
This panel can be found in Samsung Galaxy Ace 2
|
||||
GT-I8160 mobile phone.
|
||||
|
||||
config DRM_PANEL_SAMSUNG_S6E3HA2
|
||||
tristate "Samsung S6E3HA2 DSI video mode panel"
|
||||
depends on OF
|
||||
|
||||
@@ -39,6 +39,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
|
||||
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
|
||||
|
||||
@@ -60,6 +60,9 @@
|
||||
#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
|
||||
#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
|
||||
|
||||
#define OTM8009A_HDISPLAY 480
|
||||
#define OTM8009A_VDISPLAY 800
|
||||
|
||||
struct otm8009a {
|
||||
struct device *dev;
|
||||
struct drm_panel panel;
|
||||
@@ -70,19 +73,35 @@ struct otm8009a {
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
static const struct drm_display_mode default_mode = {
|
||||
.clock = 29700,
|
||||
.hdisplay = 480,
|
||||
.hsync_start = 480 + 98,
|
||||
.hsync_end = 480 + 98 + 32,
|
||||
.htotal = 480 + 98 + 32 + 98,
|
||||
.vdisplay = 800,
|
||||
.vsync_start = 800 + 15,
|
||||
.vsync_end = 800 + 15 + 10,
|
||||
.vtotal = 800 + 15 + 10 + 14,
|
||||
.flags = 0,
|
||||
.width_mm = 52,
|
||||
.height_mm = 86,
|
||||
static const struct drm_display_mode modes[] = {
|
||||
{ /* 50 Hz, preferred */
|
||||
.clock = 29700,
|
||||
.hdisplay = 480,
|
||||
.hsync_start = 480 + 98,
|
||||
.hsync_end = 480 + 98 + 32,
|
||||
.htotal = 480 + 98 + 32 + 98,
|
||||
.vdisplay = 800,
|
||||
.vsync_start = 800 + 15,
|
||||
.vsync_end = 800 + 15 + 10,
|
||||
.vtotal = 800 + 15 + 10 + 14,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
.width_mm = 52,
|
||||
.height_mm = 86,
|
||||
},
|
||||
{ /* 60 Hz */
|
||||
.clock = 33000,
|
||||
.hdisplay = 480,
|
||||
.hsync_start = 480 + 70,
|
||||
.hsync_end = 480 + 70 + 32,
|
||||
.htotal = 480 + 70 + 32 + 72,
|
||||
.vdisplay = 800,
|
||||
.vsync_start = 800 + 15,
|
||||
.vsync_end = 800 + 15 + 10,
|
||||
.vtotal = 800 + 15 + 10 + 16,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
.width_mm = 52,
|
||||
.height_mm = 86,
|
||||
},
|
||||
};
|
||||
|
||||
static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
|
||||
@@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
|
||||
/* Default portrait 480x800 rgb24 */
|
||||
dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
|
||||
|
||||
ret = mipi_dsi_dcs_set_column_address(dsi, 0,
|
||||
default_mode.hdisplay - 1);
|
||||
ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
|
||||
ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
unsigned int num_modes = ARRAY_SIZE(modes);
|
||||
unsigned int i;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, &default_mode);
|
||||
if (!mode) {
|
||||
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
|
||||
default_mode.hdisplay, default_mode.vdisplay,
|
||||
drm_mode_vrefresh(&default_mode));
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < num_modes; i++) {
|
||||
mode = drm_mode_duplicate(connector->dev, &modes[i]);
|
||||
if (!mode) {
|
||||
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
|
||||
modes[i].hdisplay,
|
||||
modes[i].vdisplay,
|
||||
drm_mode_vrefresh(&modes[i]));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER;
|
||||
|
||||
/* Setting first mode as preferred */
|
||||
if (!i)
|
||||
mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
connector->display_info.width_mm = mode->width_mm;
|
||||
connector->display_info.height_mm = mode->height_mm;
|
||||
|
||||
return 1;
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static const struct drm_panel_funcs otm8009a_drm_funcs = {
|
||||
|
||||
320
drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
Normal file
320
drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
Normal file
@@ -0,0 +1,320 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
|
||||
* Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
|
||||
*/
|
||||
|
||||
#include <drm/drm_mipi_dbi.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#include <video/mipi_display.h>
|
||||
|
||||
#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
|
||||
#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
|
||||
#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
|
||||
#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
|
||||
#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
|
||||
#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
|
||||
#define S6D27A1_DISPCTL 0xF2 /* Display Control */
|
||||
#define S6D27A1_MANPWR 0xF3 /* Manual Control */
|
||||
#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
|
||||
#define S6D27A1_SRCCTL 0xF6 /* Source Control */
|
||||
#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
|
||||
|
||||
static const u8 s6d27a1_dbi_read_commands[] = {
|
||||
S6D27A1_READID1,
|
||||
S6D27A1_READID2,
|
||||
S6D27A1_READID3,
|
||||
0, /* sentinel */
|
||||
};
|
||||
|
||||
struct s6d27a1 {
|
||||
struct device *dev;
|
||||
struct mipi_dbi dbi;
|
||||
struct drm_panel panel;
|
||||
struct gpio_desc *reset;
|
||||
struct regulator_bulk_data regulators[2];
|
||||
};
|
||||
|
||||
static const struct drm_display_mode s6d27a1_480_800_mode = {
|
||||
/*
|
||||
* The vendor driver states that the S6D27A1 panel
|
||||
* has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
|
||||
*/
|
||||
.clock = 24960,
|
||||
.hdisplay = 480,
|
||||
.hsync_start = 480 + 63,
|
||||
.hsync_end = 480 + 63 + 2,
|
||||
.htotal = 480 + 63 + 2 + 63,
|
||||
.vdisplay = 800,
|
||||
.vsync_start = 800 + 11,
|
||||
.vsync_end = 800 + 11 + 2,
|
||||
.vtotal = 800 + 11 + 2 + 10,
|
||||
.width_mm = 50,
|
||||
.height_mm = 84,
|
||||
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
||||
};
|
||||
|
||||
static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
|
||||
{
|
||||
return container_of(panel, struct s6d27a1, panel);
|
||||
}
|
||||
|
||||
static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
|
||||
{
|
||||
struct mipi_dbi *dbi = &ctx->dbi;
|
||||
u8 id1, id2, id3;
|
||||
int ret;
|
||||
|
||||
ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "unable to read MTP ID 1\n");
|
||||
return;
|
||||
}
|
||||
ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "unable to read MTP ID 2\n");
|
||||
return;
|
||||
}
|
||||
ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "unable to read MTP ID 3\n");
|
||||
return;
|
||||
}
|
||||
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
|
||||
}
|
||||
|
||||
static int s6d27a1_power_on(struct s6d27a1 *ctx)
|
||||
{
|
||||
struct mipi_dbi *dbi = &ctx->dbi;
|
||||
int ret;
|
||||
|
||||
/* Power up */
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
|
||||
ctx->regulators);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
msleep(20);
|
||||
|
||||
/* Assert reset >=1 ms */
|
||||
gpiod_set_value_cansleep(ctx->reset, 1);
|
||||
usleep_range(1000, 5000);
|
||||
/* De-assert reset */
|
||||
gpiod_set_value_cansleep(ctx->reset, 0);
|
||||
/* Wait >= 10 ms */
|
||||
msleep(20);
|
||||
|
||||
/*
|
||||
* Exit sleep mode and initialize display - some hammering is
|
||||
* necessary.
|
||||
*/
|
||||
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
|
||||
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
|
||||
msleep(120);
|
||||
|
||||
/* Magic to unlock level 2 control of the display */
|
||||
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
|
||||
|
||||
/* Configure resolution to 480RGBx800 */
|
||||
mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
|
||||
0x44, 0x05, 0x88, 0x4B, 0x50);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
|
||||
|
||||
mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
|
||||
0x01, 0x09, 0x0D, 0x0A, 0x0E,
|
||||
0x0B, 0x0F, 0x0C, 0x10, 0x01,
|
||||
0x11, 0x12, 0x13, 0x14, 0x05,
|
||||
0x06, 0x07, 0x08, 0x01, 0x09,
|
||||
0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
|
||||
0x0C, 0x10, 0x01, 0x11, 0x12,
|
||||
0x13, 0x14);
|
||||
|
||||
/* lock the level 2 control */
|
||||
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
|
||||
|
||||
s6d27a1_read_mtp_id(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s6d27a1_power_off(struct s6d27a1 *ctx)
|
||||
{
|
||||
/* Go into RESET and disable regulators */
|
||||
gpiod_set_value_cansleep(ctx->reset, 1);
|
||||
return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
|
||||
ctx->regulators);
|
||||
}
|
||||
|
||||
static int s6d27a1_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct s6d27a1 *ctx = to_s6d27a1(panel);
|
||||
struct mipi_dbi *dbi = &ctx->dbi;
|
||||
|
||||
mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
|
||||
msleep(120);
|
||||
return s6d27a1_power_off(to_s6d27a1(panel));
|
||||
}
|
||||
|
||||
static int s6d27a1_disable(struct drm_panel *panel)
|
||||
{
|
||||
struct s6d27a1 *ctx = to_s6d27a1(panel);
|
||||
struct mipi_dbi *dbi = &ctx->dbi;
|
||||
|
||||
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
|
||||
msleep(25);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s6d27a1_prepare(struct drm_panel *panel)
|
||||
{
|
||||
return s6d27a1_power_on(to_s6d27a1(panel));
|
||||
}
|
||||
|
||||
static int s6d27a1_enable(struct drm_panel *panel)
|
||||
{
|
||||
struct s6d27a1 *ctx = to_s6d27a1(panel);
|
||||
struct mipi_dbi *dbi = &ctx->dbi;
|
||||
|
||||
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s6d27a1_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct s6d27a1 *ctx = to_s6d27a1(panel);
|
||||
struct drm_display_mode *mode;
|
||||
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
|
||||
if (!mode) {
|
||||
dev_err(ctx->dev, "failed to add mode\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
connector->display_info.bpc = 8;
|
||||
connector->display_info.width_mm = mode->width_mm;
|
||||
connector->display_info.height_mm = mode->height_mm;
|
||||
connector->display_info.bus_flags =
|
||||
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
|
||||
drm_display_info_set_bus_formats(&connector->display_info,
|
||||
&bus_format, 1);
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct drm_panel_funcs s6d27a1_drm_funcs = {
|
||||
.disable = s6d27a1_disable,
|
||||
.unprepare = s6d27a1_unprepare,
|
||||
.prepare = s6d27a1_prepare,
|
||||
.enable = s6d27a1_enable,
|
||||
.get_modes = s6d27a1_get_modes,
|
||||
};
|
||||
|
||||
static int s6d27a1_probe(struct spi_device *spi)
|
||||
{
|
||||
struct device *dev = &spi->dev;
|
||||
struct s6d27a1 *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->dev = dev;
|
||||
|
||||
/*
|
||||
* VCI is the analog voltage supply
|
||||
* VCCIO is the digital I/O voltage supply
|
||||
*/
|
||||
ctx->regulators[0].supply = "vci";
|
||||
ctx->regulators[1].supply = "vccio";
|
||||
ret = devm_regulator_bulk_get(dev,
|
||||
ARRAY_SIZE(ctx->regulators),
|
||||
ctx->regulators);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to get regulators\n");
|
||||
|
||||
ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(ctx->reset)) {
|
||||
ret = PTR_ERR(ctx->reset);
|
||||
return dev_err_probe(dev, ret, "no RESET GPIO\n");
|
||||
}
|
||||
|
||||
ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
|
||||
|
||||
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
|
||||
|
||||
drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
|
||||
DRM_MODE_CONNECTOR_DPI);
|
||||
|
||||
ret = drm_panel_of_backlight(&ctx->panel);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to add backlight\n");
|
||||
|
||||
spi_set_drvdata(spi, ctx);
|
||||
|
||||
drm_panel_add(&ctx->panel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s6d27a1_remove(struct spi_device *spi)
|
||||
{
|
||||
struct s6d27a1 *ctx = spi_get_drvdata(spi);
|
||||
|
||||
drm_panel_remove(&ctx->panel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id s6d27a1_match[] = {
|
||||
{ .compatible = "samsung,s6d27a1", },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, s6d27a1_match);
|
||||
|
||||
static struct spi_driver s6d27a1_driver = {
|
||||
.probe = s6d27a1_probe,
|
||||
.remove = s6d27a1_remove,
|
||||
.driver = {
|
||||
.name = "s6d27a1-panel",
|
||||
.of_match_table = s6d27a1_match,
|
||||
},
|
||||
};
|
||||
module_spi_driver(s6d27a1_driver);
|
||||
|
||||
MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
|
||||
MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
@@ -3158,19 +3158,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
|
||||
.clock = 30400,
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 0,
|
||||
.hsync_end = 800 + 1,
|
||||
.htotal = 800 + 0 + 1 + 160,
|
||||
.vdisplay = 480,
|
||||
.vsync_start = 480 + 0,
|
||||
.vsync_end = 480 + 48 + 1,
|
||||
.vtotal = 480 + 48 + 1 + 0,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode logicpd_type_28_mode = {
|
||||
.clock = 9107,
|
||||
.hdisplay = 480,
|
||||
@@ -3205,6 +3192,19 @@ static const struct panel_desc logicpd_type_28 = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
|
||||
.clock = 30400,
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 0,
|
||||
.hsync_end = 800 + 1,
|
||||
.htotal = 800 + 0 + 1 + 160,
|
||||
.vdisplay = 480,
|
||||
.vsync_start = 480 + 0,
|
||||
.vsync_end = 480 + 48 + 1,
|
||||
.vtotal = 480 + 48 + 1 + 0,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
};
|
||||
|
||||
static const struct panel_desc mitsubishi_aa070mc01 = {
|
||||
.modes = &mitsubishi_aa070mc01_mode,
|
||||
.num_modes = 1,
|
||||
|
||||
@@ -198,7 +198,6 @@ err:
|
||||
int panfrost_device_init(struct panfrost_device *pfdev)
|
||||
{
|
||||
int err;
|
||||
struct resource *res;
|
||||
|
||||
mutex_init(&pfdev->sched_lock);
|
||||
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
|
||||
@@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
|
||||
if (err)
|
||||
goto out_reset;
|
||||
|
||||
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
|
||||
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
|
||||
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
|
||||
if (IS_ERR(pfdev->iomem)) {
|
||||
err = PTR_ERR(pfdev->iomem);
|
||||
goto out_pm_domain;
|
||||
|
||||
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_gem_fence_array_add(&job->deps, fence);
|
||||
ret = drm_sched_job_add_dependency(&job->base, fence);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
|
||||
struct drm_panfrost_submit *args = data;
|
||||
struct drm_syncobj *sync_out = NULL;
|
||||
struct panfrost_job *job;
|
||||
int ret = 0;
|
||||
int ret = 0, slot;
|
||||
|
||||
if (!args->jc)
|
||||
return -EINVAL;
|
||||
@@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
|
||||
job = kzalloc(sizeof(*job), GFP_KERNEL);
|
||||
if (!job) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_out_sync;
|
||||
goto out_put_syncout;
|
||||
}
|
||||
|
||||
kref_init(&job->refcount);
|
||||
|
||||
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
|
||||
|
||||
job->pfdev = pfdev;
|
||||
job->jc = args->jc;
|
||||
job->requirements = args->requirements;
|
||||
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
|
||||
job->file_priv = file->driver_priv;
|
||||
|
||||
slot = panfrost_job_get_slot(job);
|
||||
|
||||
ret = drm_sched_job_init(&job->base,
|
||||
&job->file_priv->sched_entity[slot],
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_put_job;
|
||||
|
||||
ret = panfrost_copy_in_sync(dev, file, args, job);
|
||||
if (ret)
|
||||
goto fail_job;
|
||||
goto out_cleanup_job;
|
||||
|
||||
ret = panfrost_lookup_bos(dev, file, args, job);
|
||||
if (ret)
|
||||
goto fail_job;
|
||||
goto out_cleanup_job;
|
||||
|
||||
ret = panfrost_job_push(job);
|
||||
if (ret)
|
||||
goto fail_job;
|
||||
goto out_cleanup_job;
|
||||
|
||||
/* Update the return sync object for the job */
|
||||
if (sync_out)
|
||||
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
|
||||
|
||||
fail_job:
|
||||
out_cleanup_job:
|
||||
if (ret)
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
out_put_job:
|
||||
panfrost_job_put(job);
|
||||
fail_out_sync:
|
||||
out_put_syncout:
|
||||
if (sync_out)
|
||||
drm_syncobj_put(sync_out);
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
|
||||
return &fence->base;
|
||||
}
|
||||
|
||||
static int panfrost_job_get_slot(struct panfrost_job *job)
|
||||
int panfrost_job_get_slot(struct panfrost_job *job)
|
||||
{
|
||||
/* JS0: fragment jobs.
|
||||
* JS1: vertex/tiler jobs
|
||||
@@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
|
||||
*/
|
||||
affinity = pfdev->features.shader_present;
|
||||
|
||||
job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
|
||||
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
|
||||
job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
|
||||
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
|
||||
}
|
||||
|
||||
static u32
|
||||
@@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
||||
|
||||
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
|
||||
|
||||
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
|
||||
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
|
||||
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
|
||||
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
|
||||
|
||||
panfrost_job_write_affinity(pfdev, job->requirements, js);
|
||||
|
||||
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
||||
|
||||
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
|
||||
int bo_count,
|
||||
struct xarray *deps)
|
||||
struct drm_sched_job *job)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < bo_count; i++) {
|
||||
/* panfrost always uses write mode in its current uapi */
|
||||
ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
|
||||
ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
|
||||
int panfrost_job_push(struct panfrost_job *job)
|
||||
{
|
||||
struct panfrost_device *pfdev = job->pfdev;
|
||||
int slot = panfrost_job_get_slot(job);
|
||||
struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
|
||||
struct ww_acquire_ctx acquire_ctx;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
|
||||
&acquire_ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&pfdev->sched_lock);
|
||||
|
||||
ret = drm_sched_job_init(&job->base, entity, NULL);
|
||||
if (ret) {
|
||||
mutex_unlock(&pfdev->sched_lock);
|
||||
goto unlock;
|
||||
}
|
||||
drm_sched_job_arm(&job->base);
|
||||
|
||||
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
|
||||
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
|
||||
&job->deps);
|
||||
&job->base);
|
||||
if (ret) {
|
||||
mutex_unlock(&pfdev->sched_lock);
|
||||
goto unlock;
|
||||
@@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job)
|
||||
|
||||
kref_get(&job->refcount); /* put by scheduler job completion */
|
||||
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
mutex_unlock(&pfdev->sched_lock);
|
||||
|
||||
@@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
|
||||
{
|
||||
struct panfrost_job *job = container_of(ref, struct panfrost_job,
|
||||
refcount);
|
||||
struct dma_fence *fence;
|
||||
unsigned long index;
|
||||
unsigned int i;
|
||||
|
||||
xa_for_each(&job->deps, index, fence) {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
xa_destroy(&job->deps);
|
||||
|
||||
dma_fence_put(job->done_fence);
|
||||
dma_fence_put(job->render_done_fence);
|
||||
|
||||
@@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
|
||||
panfrost_job_put(job);
|
||||
}
|
||||
|
||||
static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct panfrost_job *job = to_panfrost_job(sched_job);
|
||||
|
||||
if (!xa_empty(&job->deps))
|
||||
return xa_erase(&job->deps, job->last_dep++);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct panfrost_job *job = to_panfrost_job(sched_job);
|
||||
@@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
static const struct drm_sched_backend_ops panfrost_sched_ops = {
|
||||
.dependency = panfrost_job_dependency,
|
||||
.run_job = panfrost_job_run,
|
||||
.timedout_job = panfrost_job_timedout,
|
||||
.free_job = panfrost_job_free
|
||||
|
||||
@@ -19,10 +19,6 @@ struct panfrost_job {
|
||||
struct panfrost_device *pfdev;
|
||||
struct panfrost_file_priv *file_priv;
|
||||
|
||||
/* Contains both explicit and implicit fences */
|
||||
struct xarray deps;
|
||||
unsigned long last_dep;
|
||||
|
||||
/* Fence to be signaled by IRQ handler when the job is complete. */
|
||||
struct dma_fence *done_fence;
|
||||
|
||||
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
|
||||
void panfrost_job_fini(struct panfrost_device *pfdev);
|
||||
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
|
||||
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
|
||||
int panfrost_job_get_slot(struct panfrost_job *job);
|
||||
int panfrost_job_push(struct panfrost_job *job);
|
||||
void panfrost_job_put(struct panfrost_job *job);
|
||||
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
|
||||
|
||||
@@ -71,8 +71,8 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
||||
region |= region_width;
|
||||
|
||||
/* Lock the region that needs to be updated */
|
||||
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
|
||||
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
|
||||
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
|
||||
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
|
||||
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
|
||||
}
|
||||
|
||||
@@ -114,14 +114,14 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
||||
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
|
||||
|
||||
/* Need to revisit mem attrs.
|
||||
* NC is the default, Mali driver is inner WT.
|
||||
*/
|
||||
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
|
||||
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
|
||||
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
|
||||
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
|
||||
|
||||
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
|
||||
}
|
||||
|
||||
@@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
|
||||
|
||||
reinit_completion(&pfdev->perfcnt->dump_comp);
|
||||
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
|
||||
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
|
||||
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
|
||||
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
|
||||
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
|
||||
gpu_write(pfdev, GPU_INT_CLEAR,
|
||||
GPU_IRQ_CLEAN_CACHES_COMPLETED |
|
||||
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
/* manage releaseables */
|
||||
/* stack them 16 high for now -drawable object is 191 */
|
||||
#define RELEASE_SIZE 256
|
||||
#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
|
||||
#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
|
||||
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
|
||||
#define SURFACE_RELEASE_SIZE 128
|
||||
#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
|
||||
#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
|
||||
|
||||
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
|
||||
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
|
||||
|
||||
@@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
|
||||
*/
|
||||
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(ttm);
|
||||
}
|
||||
|
||||
@@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
||||
for (i = 0; i < pages; i++) {
|
||||
if (!entry->busaddr[i])
|
||||
break;
|
||||
pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&pdev->dev, entry->busaddr[i],
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
|
||||
@@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
|
||||
|
||||
if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
|
||||
if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
|
||||
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
|
||||
(unsigned long long)gart_info->table_mask);
|
||||
ret = -EFAULT;
|
||||
@@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
||||
gart_idx = 0;
|
||||
for (i = 0; i < pages; i++) {
|
||||
/* we need to support large memory configurations */
|
||||
entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
|
||||
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
|
||||
entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
|
||||
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
drm_ati_pcigart_cleanup(dev, gart_info);
|
||||
address = NULL;
|
||||
|
||||
@@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
|
||||
*/
|
||||
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
|
||||
if (seq >= fence->seq) {
|
||||
int ret = dma_fence_signal_locked(&fence->base);
|
||||
|
||||
if (!ret)
|
||||
DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
|
||||
else
|
||||
DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
|
||||
|
||||
dma_fence_signal_locked(&fence->base);
|
||||
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
|
||||
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
|
||||
dma_fence_put(&fence->base);
|
||||
} else
|
||||
DMA_FENCE_TRACE(&fence->base, "pending\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
|
||||
fence->fence_wake.func = radeon_fence_check_signaled;
|
||||
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
|
||||
dma_fence_get(f);
|
||||
|
||||
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
|
||||
return true;
|
||||
|
||||
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
|
||||
int ret;
|
||||
|
||||
ret = dma_fence_signal(&fence->base);
|
||||
if (!ret)
|
||||
DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
|
||||
dma_fence_signal(&fence->base);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
|
||||
{
|
||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||
long r;
|
||||
int r_sig;
|
||||
|
||||
/*
|
||||
* This function should not be called on !radeon fences.
|
||||
@@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
|
||||
return r;
|
||||
}
|
||||
|
||||
r_sig = dma_fence_signal(&fence->base);
|
||||
if (!r_sig)
|
||||
DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
|
||||
dma_fence_signal(&fence->base);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
radeon_ttm_backend_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
|
||||
ttm_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
@@ -574,6 +571,8 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
|
||||
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
||||
radeon_ttm_tt_unbind(bdev, ttm);
|
||||
|
||||
if (gtt && gtt->userptr) {
|
||||
kfree(ttm->sg);
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
||||
@@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
|
||||
struct radeon_device *rdev = radeon_get_rdev(bdev);
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
ttm_agp_unbind(ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_agp_destroy(ttm);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ config DRM_ROCKCHIP
|
||||
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
|
||||
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
|
||||
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
|
||||
select DRM_RGB if ROCKCHIP_RGB
|
||||
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
|
||||
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
|
||||
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
|
||||
|
||||
@@ -45,8 +45,14 @@
|
||||
* @guilty: atomic_t set to 1 when a job on this queue
|
||||
* is found to be guilty causing a timeout
|
||||
*
|
||||
* Note: the sched_list should have at least one element to schedule
|
||||
* the entity
|
||||
* Note that the &sched_list must have at least one element to schedule the entity.
|
||||
*
|
||||
* For changing @priority later on at runtime see
|
||||
* drm_sched_entity_set_priority(). For changing the set of schedulers
|
||||
* @sched_list at runtime see drm_sched_entity_modify_sched().
|
||||
*
|
||||
* An entity is cleaned up by callind drm_sched_entity_fini(). See also
|
||||
* drm_sched_entity_destroy().
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
@@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);
|
||||
* @sched_list: the list of new drm scheds which will replace
|
||||
* existing entity->sched_list
|
||||
* @num_sched_list: number of drm sched in sched_list
|
||||
*
|
||||
* Note that this must be called under the same common lock for @entity as
|
||||
* drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
|
||||
* guarantee through some other means that this is never called while new jobs
|
||||
* can be pushed to @entity.
|
||||
*/
|
||||
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||
struct drm_gpu_scheduler **sched_list,
|
||||
@@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
|
||||
|
||||
/**
|
||||
* drm_sched_entity_is_idle - Check if entity is idle
|
||||
*
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Returns true if the entity does not have any unscheduled jobs.
|
||||
*/
|
||||
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
||||
{
|
||||
rmb(); /* for list_empty to work without lock */
|
||||
@@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_entity_is_ready - Check if entity is ready
|
||||
*
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Return true if entity could provide a job.
|
||||
*/
|
||||
/* Return true if entity could provide a job. */
|
||||
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
|
||||
{
|
||||
if (spsc_queue_peek(&entity->job_queue) == NULL)
|
||||
@@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_flush);
|
||||
|
||||
/**
|
||||
* drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
|
||||
*
|
||||
* @f: signaled fence
|
||||
* @cb: our callback structure
|
||||
*
|
||||
* Signal the scheduler finished fence when the entity in question is killed.
|
||||
*/
|
||||
/* Signal the scheduler finished fence when the entity in question is killed. */
|
||||
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
@@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
||||
job->sched->ops->free_job(job);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
|
||||
*
|
||||
* @entity: entity which is cleaned up
|
||||
*
|
||||
* Makes sure that all remaining jobs in an entity are killed before it is
|
||||
* destroyed.
|
||||
*/
|
||||
static struct dma_fence *
|
||||
drm_sched_job_dependency(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
if (!xa_empty(&job->dependencies))
|
||||
return xa_erase(&job->dependencies, job->last_dependency++);
|
||||
|
||||
if (job->sched->ops->dependency)
|
||||
return job->sched->ops->dependency(job, entity);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct drm_sched_job *job;
|
||||
@@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
|
||||
struct drm_sched_fence *s_fence = job->s_fence;
|
||||
|
||||
/* Wait for all dependencies to avoid data corruptions */
|
||||
while ((f = job->sched->ops->dependency(job, entity)))
|
||||
while ((f = drm_sched_job_dependency(job, entity)))
|
||||
dma_fence_wait(f, false);
|
||||
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
@@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
|
||||
*
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* This should be called after @drm_sched_entity_do_release. It goes over the
|
||||
* entity and signals all jobs with an error code if the process was killed.
|
||||
* Cleanups up @entity which has been initialized by drm_sched_entity_init().
|
||||
*
|
||||
* If there are potentially job still in flight or getting newly queued
|
||||
* drm_sched_entity_flush() must be called first. This function then goes over
|
||||
* the entity and signals all jobs with an error code if the process was killed.
|
||||
*/
|
||||
void drm_sched_entity_fini(struct drm_sched_entity *entity)
|
||||
{
|
||||
@@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
|
||||
|
||||
/**
|
||||
* drm_sched_entity_destroy - Destroy a context entity
|
||||
*
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
|
||||
* Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
|
||||
* convenience wrapper.
|
||||
*/
|
||||
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
|
||||
{
|
||||
@@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_destroy);
|
||||
|
||||
/*
|
||||
* drm_sched_entity_clear_dep - callback to clear the entities dependency
|
||||
*/
|
||||
/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
|
||||
static void drm_sched_entity_clear_dep(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
@@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_set_priority);
|
||||
|
||||
/**
|
||||
* drm_sched_entity_add_dependency_cb - add callback for the entities dependency
|
||||
*
|
||||
* @entity: entity with dependency
|
||||
*
|
||||
/*
|
||||
* Add a callback to the current dependency of the entity to wake up the
|
||||
* scheduler when the entity becomes available.
|
||||
*/
|
||||
@@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
|
||||
*
|
||||
* @entity: entity to get the job from
|
||||
*
|
||||
* Process all dependencies and try to get one job from the entities queue.
|
||||
*/
|
||||
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched = entity->rq->sched;
|
||||
struct drm_sched_job *sched_job;
|
||||
|
||||
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
|
||||
@@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||
return NULL;
|
||||
|
||||
while ((entity->dependency =
|
||||
sched->ops->dependency(sched_job, entity))) {
|
||||
drm_sched_job_dependency(sched_job, entity))) {
|
||||
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
|
||||
|
||||
if (drm_sched_entity_add_dependency_cb(entity))
|
||||
@@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
|
||||
|
||||
dma_fence_put(entity->last_scheduled);
|
||||
|
||||
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
|
||||
|
||||
/*
|
||||
* If the queue is empty we allow drm_sched_entity_select_rq() to
|
||||
* locklessly access ->last_scheduled. This only works if we set the
|
||||
* pointer before we dequeue and if we a write barrier here.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
spsc_queue_pop(&entity->job_queue);
|
||||
return sched_job;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_entity_select_rq - select a new rq for the entity
|
||||
*
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Check all prerequisites and select a new rq for the entity for load
|
||||
* balancing.
|
||||
*/
|
||||
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
|
||||
/* single possible engine and already selected */
|
||||
if (!entity->sched_list)
|
||||
return;
|
||||
|
||||
fence = READ_ONCE(entity->last_scheduled);
|
||||
/* queue non-empty, stay on the same engine */
|
||||
if (spsc_queue_count(&entity->job_queue))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Only when the queue is empty are we guaranteed that the scheduler
|
||||
* thread cannot change ->last_scheduled. To enforce ordering we need
|
||||
* a read barrier here. See drm_sched_entity_pop_job() for the other
|
||||
* side.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
fence = entity->last_scheduled;
|
||||
|
||||
/* stay on the same engine if the previous job hasn't finished */
|
||||
if (fence && !dma_fence_is_signaled(fence))
|
||||
return;
|
||||
|
||||
@@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
||||
|
||||
/**
|
||||
* drm_sched_entity_push_job - Submit a job to the entity's job queue
|
||||
*
|
||||
* @sched_job: job to submit
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Note: To guarantee that the order of insertion to queue matches
|
||||
* the job's fence sequence number this function should be
|
||||
* called with drm_sched_job_init under common lock.
|
||||
* Note: To guarantee that the order of insertion to queue matches the job's
|
||||
* fence sequence number this function should be called with drm_sched_job_arm()
|
||||
* under common lock for the struct drm_sched_entity that was set up for
|
||||
* @sched_job in drm_sched_job_init().
|
||||
*
|
||||
* Returns 0 for success, negative error code otherwise.
|
||||
*/
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity)
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct drm_sched_entity *entity = sched_job->entity;
|
||||
bool first;
|
||||
|
||||
trace_drm_sched_job(sched_job, entity);
|
||||
|
||||
@@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||
{
|
||||
int ret = dma_fence_signal(&fence->scheduled);
|
||||
|
||||
if (!ret)
|
||||
DMA_FENCE_TRACE(&fence->scheduled,
|
||||
"signaled from irq context\n");
|
||||
else
|
||||
DMA_FENCE_TRACE(&fence->scheduled,
|
||||
"was already signaled\n");
|
||||
dma_fence_signal(&fence->scheduled);
|
||||
}
|
||||
|
||||
void drm_sched_fence_finished(struct drm_sched_fence *fence)
|
||||
{
|
||||
int ret = dma_fence_signal(&fence->finished);
|
||||
|
||||
if (!ret)
|
||||
DMA_FENCE_TRACE(&fence->finished,
|
||||
"signaled from irq context\n");
|
||||
else
|
||||
DMA_FENCE_TRACE(&fence->finished,
|
||||
"was already signaled\n");
|
||||
dma_fence_signal(&fence->finished);
|
||||
}
|
||||
|
||||
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
|
||||
@@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
|
||||
return (const char *)fence->sched->name;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_fence_free - free up the fence memory
|
||||
*
|
||||
* @rcu: RCU callback head
|
||||
*
|
||||
* Free up the fence memory after the RCU grace period.
|
||||
*/
|
||||
static void drm_sched_fence_free(struct rcu_head *rcu)
|
||||
static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
|
||||
kmem_cache_free(sched_fence_slab, fence);
|
||||
if (!WARN_ON_ONCE(!fence))
|
||||
kmem_cache_free(sched_fence_slab, fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_fence_free - free up an uninitialized fence
|
||||
*
|
||||
* @fence: fence to free
|
||||
*
|
||||
* Free up the fence memory. Should only be used if drm_sched_fence_init()
|
||||
* has not been called yet.
|
||||
*/
|
||||
void drm_sched_fence_free(struct drm_sched_fence *fence)
|
||||
{
|
||||
/* This function should not be called if the fence has been initialized. */
|
||||
if (!WARN_ON_ONCE(fence->sched))
|
||||
kmem_cache_free(sched_fence_slab, fence);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
|
||||
dma_fence_put(fence->parent);
|
||||
call_rcu(&fence->finished.rcu, drm_sched_fence_free);
|
||||
call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
|
||||
}
|
||||
EXPORT_SYMBOL(to_drm_sched_fence);
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
struct drm_sched_fence *fence = NULL;
|
||||
unsigned seq;
|
||||
|
||||
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return NULL;
|
||||
|
||||
fence->owner = owner;
|
||||
fence->sched = entity->rq->sched;
|
||||
spin_lock_init(&fence->lock);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
void drm_sched_fence_init(struct drm_sched_fence *fence,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
unsigned seq;
|
||||
|
||||
fence->sched = entity->rq->sched;
|
||||
seq = atomic_inc_return(&entity->fence_seq);
|
||||
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
|
||||
&fence->lock, entity->fence_context, seq);
|
||||
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
|
||||
&fence->lock, entity->fence_context + 1, seq);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
module_init(drm_sched_fence_slab_init);
|
||||
|
||||
@@ -48,9 +48,11 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-resv.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <drm/spsc_queue.h>
|
||||
|
||||
@@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
|
||||
|
||||
/**
|
||||
* drm_sched_job_init - init a scheduler job
|
||||
*
|
||||
* @job: scheduler job to init
|
||||
* @entity: scheduler entity to use
|
||||
* @owner: job owner for debugging
|
||||
@@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
|
||||
* Refer to drm_sched_entity_push_job() documentation
|
||||
* for locking considerations.
|
||||
*
|
||||
* Drivers must make sure drm_sched_job_cleanup() if this function returns
|
||||
* successfully, even when @job is aborted before drm_sched_job_arm() is called.
|
||||
*
|
||||
* WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
|
||||
* has died, which can mean that there's no valid runqueue for a @entity.
|
||||
* This function returns -ENOENT in this case (which probably should be -EIO as
|
||||
* a more meanigful return value).
|
||||
*
|
||||
* Returns 0 for success, negative error code otherwise.
|
||||
*/
|
||||
int drm_sched_job_init(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched;
|
||||
|
||||
drm_sched_entity_select_rq(entity);
|
||||
if (!entity->rq)
|
||||
return -ENOENT;
|
||||
|
||||
sched = entity->rq->sched;
|
||||
|
||||
job->sched = sched;
|
||||
job->entity = entity;
|
||||
job->s_priority = entity->rq - sched->sched_rq;
|
||||
job->s_fence = drm_sched_fence_create(entity, owner);
|
||||
job->s_fence = drm_sched_fence_alloc(entity, owner);
|
||||
if (!job->s_fence)
|
||||
return -ENOMEM;
|
||||
job->id = atomic64_inc_return(&sched->job_id_count);
|
||||
|
||||
INIT_LIST_HEAD(&job->list);
|
||||
|
||||
xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_init);
|
||||
|
||||
/**
|
||||
* drm_sched_job_cleanup - clean up scheduler job resources
|
||||
* drm_sched_job_arm - arm a scheduler job for execution
|
||||
* @job: scheduler job to arm
|
||||
*
|
||||
* This arms a scheduler job for execution. Specifically it initializes the
|
||||
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
|
||||
* or other places that need to track the completion of this job.
|
||||
*
|
||||
* Refer to drm_sched_entity_push_job() documentation for locking
|
||||
* considerations.
|
||||
*
|
||||
* This can only be called if drm_sched_job_init() succeeded.
|
||||
*/
|
||||
void drm_sched_job_arm(struct drm_sched_job *job)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_entity *entity = job->entity;
|
||||
|
||||
BUG_ON(!entity);
|
||||
|
||||
sched = entity->rq->sched;
|
||||
|
||||
job->sched = sched;
|
||||
job->s_priority = entity->rq - sched->sched_rq;
|
||||
job->id = atomic64_inc_return(&sched->job_id_count);
|
||||
|
||||
drm_sched_fence_init(job->s_fence, job->entity);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_arm);
|
||||
|
||||
/**
|
||||
* drm_sched_job_add_dependency - adds the fence as a job dependency
|
||||
* @job: scheduler job to add the dependencies to
|
||||
* @fence: the dma_fence to add to the list of dependencies.
|
||||
*
|
||||
* Note that @fence is consumed in both the success and error cases.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or an error on failing to expand the array.
|
||||
*/
|
||||
int drm_sched_job_add_dependency(struct drm_sched_job *job,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence *entry;
|
||||
unsigned long index;
|
||||
u32 id = 0;
|
||||
int ret;
|
||||
|
||||
if (!fence)
|
||||
return 0;
|
||||
|
||||
/* Deduplicate if we already depend on a fence from the same context.
|
||||
* This lets the size of the array of deps scale with the number of
|
||||
* engines involved, rather than the number of BOs.
|
||||
*/
|
||||
xa_for_each(&job->dependencies, index, entry) {
|
||||
if (entry->context != fence->context)
|
||||
continue;
|
||||
|
||||
if (dma_fence_is_later(fence, entry)) {
|
||||
dma_fence_put(entry);
|
||||
xa_store(&job->dependencies, index, fence, GFP_KERNEL);
|
||||
} else {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
|
||||
if (ret != 0)
|
||||
dma_fence_put(fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_add_dependency);
|
||||
|
||||
/**
|
||||
* drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
|
||||
* dependencies
|
||||
* @job: scheduler job to add the dependencies to
|
||||
* @obj: the gem object to add new dependencies from.
|
||||
* @write: whether the job might write the object (so we need to depend on
|
||||
* shared fences in the reservation object).
|
||||
*
|
||||
* This should be called after drm_gem_lock_reservations() on your array of
|
||||
* GEM objects used in the job but before updating the reservations with your
|
||||
* own fences.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or an error on failing to expand the array.
|
||||
*/
|
||||
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
|
||||
struct drm_gem_object *obj,
|
||||
bool write)
|
||||
{
|
||||
int ret;
|
||||
struct dma_fence **fences;
|
||||
unsigned int i, fence_count;
|
||||
|
||||
if (!write) {
|
||||
struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
|
||||
|
||||
return drm_sched_job_add_dependency(job, fence);
|
||||
}
|
||||
|
||||
ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
|
||||
if (ret || !fence_count)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < fence_count; i++) {
|
||||
ret = drm_sched_job_add_dependency(job, fences[i]);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
for (; i < fence_count; i++)
|
||||
dma_fence_put(fences[i]);
|
||||
kfree(fences);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
|
||||
|
||||
|
||||
/**
|
||||
* drm_sched_job_cleanup - clean up scheduler job resources
|
||||
* @job: scheduler job to clean up
|
||||
*
|
||||
* Cleans up the resources allocated with drm_sched_job_init().
|
||||
*
|
||||
* Drivers should call this from their error unwind code if @job is aborted
|
||||
* before drm_sched_job_arm() is called.
|
||||
*
|
||||
* After that point of no return @job is committed to be executed by the
|
||||
* scheduler, and this function should be called from the
|
||||
* &drm_sched_backend_ops.free_job callback.
|
||||
*/
|
||||
void drm_sched_job_cleanup(struct drm_sched_job *job)
|
||||
{
|
||||
dma_fence_put(&job->s_fence->finished);
|
||||
struct dma_fence *fence;
|
||||
unsigned long index;
|
||||
|
||||
if (kref_read(&job->s_fence->finished.refcount)) {
|
||||
/* drm_sched_job_arm() has been called */
|
||||
dma_fence_put(&job->s_fence->finished);
|
||||
} else {
|
||||
/* aborted job before committing to run it */
|
||||
drm_sched_fence_free(job->s_fence);
|
||||
}
|
||||
|
||||
job->s_fence = NULL;
|
||||
|
||||
xa_for_each(&job->dependencies, index, fence) {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
xa_destroy(&job->dependencies);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_cleanup);
|
||||
|
||||
@@ -676,15 +827,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
struct drm_sched_job *job, *next;
|
||||
|
||||
/*
|
||||
* Don't destroy jobs while the timeout worker is running OR thread
|
||||
* is being parked and hence assumed to not touch pending_list
|
||||
*/
|
||||
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
|
||||
!cancel_delayed_work(&sched->work_tdr)) ||
|
||||
kthread_should_park())
|
||||
return NULL;
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
|
||||
job = list_first_entry_or_null(&sched->pending_list,
|
||||
@@ -693,17 +835,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
|
||||
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
|
||||
/* remove job from pending_list */
|
||||
list_del_init(&job->list);
|
||||
|
||||
/* cancel this job's TO timer */
|
||||
cancel_delayed_work(&sched->work_tdr);
|
||||
/* make the scheduled timestamp more accurate */
|
||||
next = list_first_entry_or_null(&sched->pending_list,
|
||||
typeof(*next), list);
|
||||
if (next)
|
||||
|
||||
if (next) {
|
||||
next->s_fence->scheduled.timestamp =
|
||||
job->s_fence->finished.timestamp;
|
||||
|
||||
/* start TO timer for next job */
|
||||
drm_sched_start_timeout(sched);
|
||||
}
|
||||
} else {
|
||||
job = NULL;
|
||||
/* queue timeout for next job */
|
||||
drm_sched_start_timeout(sched);
|
||||
}
|
||||
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
@@ -791,11 +937,8 @@ static int drm_sched_main(void *param)
|
||||
(entity = drm_sched_select_entity(sched))) ||
|
||||
kthread_should_stop());
|
||||
|
||||
if (cleanup_job) {
|
||||
if (cleanup_job)
|
||||
sched->ops->free_job(cleanup_job);
|
||||
/* queue timeout for next job */
|
||||
drm_sched_start_timeout(sched);
|
||||
}
|
||||
|
||||
if (!entity)
|
||||
continue;
|
||||
|
||||
@@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
|
||||
LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
|
||||
|
||||
/* Specifies the constant alpha value */
|
||||
val = CONSTA_MAX;
|
||||
val = newstate->alpha >> 8;
|
||||
reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
|
||||
|
||||
/* Specifies the blending factors */
|
||||
@@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
|
||||
|
||||
drm_plane_helper_add(plane, <dc_plane_helper_funcs);
|
||||
|
||||
drm_plane_create_alpha_property(plane);
|
||||
|
||||
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
|
||||
|
||||
return plane;
|
||||
@@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_plane_create_zpos_immutable_property(primary, 0);
|
||||
|
||||
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
|
||||
<dc_crtc_funcs, NULL);
|
||||
if (ret) {
|
||||
@@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
|
||||
DRM_ERROR("Can not create overlay plane %d\n", i);
|
||||
goto cleanup;
|
||||
}
|
||||
drm_plane_create_zpos_immutable_property(overlay, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
|
||||
struct sun4i_drv *drv = drm->dev_private;
|
||||
struct sun4i_backend *backend;
|
||||
const struct sun4i_backend_quirks *quirks;
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
int i, ret;
|
||||
|
||||
@@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
|
||||
if (IS_ERR(backend->frontend))
|
||||
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
|
||||
@@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
|
||||
struct sun4i_frontend *frontend;
|
||||
struct drm_device *drm = data;
|
||||
struct sun4i_drv *drv = drm->dev_private;
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
|
||||
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
|
||||
@@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
|
||||
if (!frontend->data)
|
||||
return -ENODEV;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
|
||||
@@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
||||
struct cec_connector_info conn_info;
|
||||
struct sun4i_drv *drv = drm->dev_private;
|
||||
struct sun4i_hdmi *hdmi;
|
||||
struct resource *res;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
@@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
||||
if (!hdmi->variant)
|
||||
return -EINVAL;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hdmi->base = devm_ioremap_resource(dev, res);
|
||||
hdmi->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hdmi->base)) {
|
||||
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
|
||||
return PTR_ERR(hdmi->base);
|
||||
|
||||
@@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev,
|
||||
struct sun4i_tcon *tcon)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
|
||||
@@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
|
||||
struct drm_device *drm = data;
|
||||
struct sun4i_drv *drv = drm->dev_private;
|
||||
struct sun4i_tv *tv;
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
int ret;
|
||||
|
||||
@@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
|
||||
tv->drv = drv;
|
||||
dev_set_drvdata(dev, tv);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs)) {
|
||||
dev_err(dev, "Couldn't map the TV encoder registers\n");
|
||||
return PTR_ERR(regs);
|
||||
|
||||
@@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
const char *bus_clk_name = NULL;
|
||||
struct sun6i_dsi *dsi;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
|
||||
@@ -1120,8 +1119,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
|
||||
"allwinner,sun6i-a31-mipi-dsi"))
|
||||
bus_clk_name = "bus";
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(dev, res);
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base)) {
|
||||
dev_err(dev, "Couldn't map the DSI encoder registers\n");
|
||||
return PTR_ERR(base);
|
||||
|
||||
@@ -16,8 +16,8 @@ struct sun8i_mixer;
|
||||
#define CCSC10_OFFSET 0xA0000
|
||||
#define CCSC11_OFFSET 0xF0000
|
||||
|
||||
#define SUN8I_CSC_CTRL(base) (base + 0x0)
|
||||
#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
|
||||
#define SUN8I_CSC_CTRL(base) ((base) + 0x0)
|
||||
#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i))
|
||||
|
||||
#define SUN8I_CSC_CTRL_EN BIT(0)
|
||||
|
||||
|
||||
@@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
|
||||
struct drm_device *drm = data;
|
||||
struct sun4i_drv *drv = drm->dev_private;
|
||||
struct sun8i_mixer *mixer;
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
unsigned int base;
|
||||
int plane_cnt;
|
||||
@@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
|
||||
if (!mixer->cfg)
|
||||
return -EINVAL;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
|
||||
@@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
|
||||
struct clk_hw_onecell_data *clk_data;
|
||||
struct sun8i_tcon_top *tcon_top;
|
||||
const struct sun8i_tcon_top_quirks *quirks;
|
||||
struct resource *res;
|
||||
void __iomem *regs;
|
||||
int ret, i;
|
||||
|
||||
@@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
|
||||
return PTR_ERR(tcon_top->bus);
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
tcon_top->regs = regs;
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
@@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
|
||||
{
|
||||
uint64_t modifier = framebuffer->modifier;
|
||||
|
||||
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
|
||||
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
|
||||
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
|
||||
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
|
||||
else
|
||||
|
||||
@@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
|
||||
return true;
|
||||
|
||||
/* check for the sector layout bit */
|
||||
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
|
||||
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
|
||||
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
|
||||
if (!tegra_plane_supports_sector_layout(plane))
|
||||
return false;
|
||||
|
||||
@@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
|
||||
|
||||
config DRM_GM12U320
|
||||
tristate "GM12U320 driver for USB projectors"
|
||||
depends on DRM && USB
|
||||
depends on DRM && USB && MMU
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
help
|
||||
@@ -53,7 +53,7 @@ config DRM_GM12U320
|
||||
|
||||
config DRM_SIMPLEDRM
|
||||
tristate "Simple framebuffer driver"
|
||||
depends on DRM
|
||||
depends on DRM && MMU
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
|
||||
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(defy, "default y resolution");
|
||||
|
||||
enum bochs_types {
|
||||
BOCHS_QEMU_STDVGA,
|
||||
BOCHS_SIMICS,
|
||||
BOCHS_UNKNOWN,
|
||||
};
|
||||
|
||||
@@ -695,6 +696,13 @@ static const struct pci_device_id bochs_pci_tbl[] = {
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = BOCHS_UNKNOWN,
|
||||
},
|
||||
{
|
||||
.vendor = 0x4321,
|
||||
.device = 0x1111,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = BOCHS_SIMICS,
|
||||
},
|
||||
{ /* end of list */ }
|
||||
};
|
||||
|
||||
|
||||
@@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
|
||||
list_move_tail(&bo->lru, &bdev->pinned);
|
||||
|
||||
if (bdev->funcs->del_from_lru_notify)
|
||||
bdev->funcs->del_from_lru_notify(bo);
|
||||
}
|
||||
|
||||
static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
|
||||
@@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
if (bo->pin_count) {
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_move_to_pinned(bo);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_move_to_pinned(bo);
|
||||
list_del_init(&bo->ddestroy);
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
ttm_bo_cleanup_memtype_use(bo);
|
||||
@@ -914,57 +924,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool ttm_bo_places_compat(const struct ttm_place *places,
|
||||
unsigned num_placement,
|
||||
struct ttm_resource *mem,
|
||||
uint32_t *new_flags)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (mem->placement & TTM_PL_FLAG_TEMPORARY)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < num_placement; i++) {
|
||||
const struct ttm_place *heap = &places[i];
|
||||
|
||||
if ((mem->start < heap->fpfn ||
|
||||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
|
||||
continue;
|
||||
|
||||
*new_flags = heap->flags;
|
||||
if ((mem->mem_type == heap->mem_type) &&
|
||||
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
|
||||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ttm_bo_mem_compat(struct ttm_placement *placement,
|
||||
struct ttm_resource *mem,
|
||||
uint32_t *new_flags)
|
||||
{
|
||||
if (ttm_bo_places_compat(placement->placement, placement->num_placement,
|
||||
mem, new_flags))
|
||||
return true;
|
||||
|
||||
if ((placement->busy_placement != placement->placement ||
|
||||
placement->num_busy_placement > placement->num_placement) &&
|
||||
ttm_bo_places_compat(placement->busy_placement,
|
||||
placement->num_busy_placement,
|
||||
mem, new_flags))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mem_compat);
|
||||
|
||||
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
uint32_t new_flags;
|
||||
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
@@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
*/
|
||||
if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
|
||||
if (!ttm_resource_compat(bo->resource, placement)) {
|
||||
ret = ttm_bo_move_buffer(bo, placement, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
||||
return ret == -EBUSY ? -ENOSPC : ret;
|
||||
}
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_move_to_pinned(bo);
|
||||
/* TODO: Cleanup the locking */
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
|
||||
@@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
|
||||
if (bo->ttm == NULL)
|
||||
return;
|
||||
|
||||
ttm_tt_unpopulate(bo->bdev, bo->ttm);
|
||||
ttm_tt_destroy(bo->bdev, bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
@@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
|
||||
spin_lock_init(&bdev->lru_lock);
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
INIT_LIST_HEAD(&bdev->pinned);
|
||||
bdev->dev_mapping = mapping;
|
||||
mutex_lock(&ttm_global_mutex);
|
||||
list_add_tail(&bdev->device_list, &glob->device_list);
|
||||
@@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
|
||||
ttm_global_release();
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_device_fini);
|
||||
|
||||
void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
|
||||
{
|
||||
struct ttm_resource_manager *man;
|
||||
struct ttm_buffer_object *bo;
|
||||
unsigned int i, j;
|
||||
|
||||
spin_lock(&bdev->lru_lock);
|
||||
while (!list_empty(&bdev->pinned)) {
|
||||
bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
|
||||
/* Take ref against racing releases once lru_lock is unlocked */
|
||||
if (ttm_bo_get_unless_zero(bo)) {
|
||||
list_del_init(&bo->lru);
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
|
||||
if (bo->ttm)
|
||||
ttm_tt_unpopulate(bo->bdev, bo->ttm);
|
||||
|
||||
ttm_bo_put(bo);
|
||||
spin_lock(&bdev->lru_lock);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
|
||||
man = ttm_manager_type(bdev, i);
|
||||
if (!man || !man->use_tt)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
|
||||
while (!list_empty(&man->lru[j])) {
|
||||
bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
|
||||
if (ttm_bo_get_unless_zero(bo)) {
|
||||
list_del_init(&bo->lru);
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
|
||||
if (bo->ttm)
|
||||
ttm_tt_unpopulate(bo->bdev, bo->ttm);
|
||||
|
||||
ttm_bo_put(bo);
|
||||
spin_lock(&bdev->lru_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
|
||||
|
||||
@@ -39,6 +39,18 @@
|
||||
|
||||
#include "ttm_module.h"
|
||||
|
||||
/**
|
||||
* DOC: TTM
|
||||
*
|
||||
* TTM is a memory manager for accelerator devices with dedicated memory.
|
||||
*
|
||||
* The basic idea is that resources are grouped together in buffer objects of
|
||||
* certain size and TTM handles lifetime, movement and CPU mappings of those
|
||||
* objects.
|
||||
*
|
||||
* TODO: Add more design background and information here.
|
||||
*/
|
||||
|
||||
/**
|
||||
* ttm_prot_from_caching - Modify the page protection according to the
|
||||
* ttm cacing mode
|
||||
|
||||
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
|
||||
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
|
||||
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
|
||||
|
||||
static struct mutex shrinker_lock;
|
||||
static spinlock_t shrinker_lock;
|
||||
static struct list_head shrinker_list;
|
||||
static struct shrinker mm_shrinker;
|
||||
|
||||
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
|
||||
spin_lock_init(&pt->lock);
|
||||
INIT_LIST_HEAD(&pt->pages);
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
spin_lock(&shrinker_lock);
|
||||
list_add_tail(&pt->shrinker_list, &shrinker_list);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
spin_unlock(&shrinker_lock);
|
||||
}
|
||||
|
||||
/* Remove a pool_type from the global shrinker list and free all pages */
|
||||
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p;
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
spin_lock(&shrinker_lock);
|
||||
list_del(&pt->shrinker_list);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
while ((p = ttm_pool_type_take(pt)))
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
|
||||
static unsigned int ttm_pool_shrink(void)
|
||||
{
|
||||
struct ttm_pool_type *pt;
|
||||
unsigned int num_freed;
|
||||
unsigned int num_pages;
|
||||
struct page *p;
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
spin_lock(&shrinker_lock);
|
||||
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
|
||||
list_move_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
p = ttm_pool_type_take(pt);
|
||||
if (p) {
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
num_freed = 1 << pt->order;
|
||||
num_pages = 1 << pt->order;
|
||||
} else {
|
||||
num_freed = 0;
|
||||
num_pages = 0;
|
||||
}
|
||||
|
||||
list_move_tail(&pt->shrinker_list, &shrinker_list);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
|
||||
return num_freed;
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
/* Return the allocation order based for a page */
|
||||
@@ -531,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
|
||||
for (j = 0; j < MAX_ORDER; ++j)
|
||||
ttm_pool_type_fini(&pool->caching[i].orders[j]);
|
||||
}
|
||||
|
||||
/* We removed the pool types from the LRU, but we need to also make sure
|
||||
* that no shrinker is concurrently freeing pages from the pool.
|
||||
*/
|
||||
synchronize_shrinkers();
|
||||
}
|
||||
|
||||
/* As long as pages are available make sure to release at least one */
|
||||
@@ -605,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
|
||||
{
|
||||
ttm_pool_debugfs_header(m);
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
spin_lock(&shrinker_lock);
|
||||
seq_puts(m, "wc\t:");
|
||||
ttm_pool_debugfs_orders(global_write_combined, m);
|
||||
seq_puts(m, "uc\t:");
|
||||
@@ -614,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
|
||||
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
|
||||
seq_puts(m, "uc 32\t:");
|
||||
ttm_pool_debugfs_orders(global_dma32_uncached, m);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
ttm_pool_debugfs_footer(m);
|
||||
|
||||
@@ -641,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
|
||||
ttm_pool_debugfs_header(m);
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
spin_lock(&shrinker_lock);
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||
seq_puts(m, "DMA ");
|
||||
switch (i) {
|
||||
@@ -657,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
}
|
||||
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
|
||||
}
|
||||
mutex_unlock(&shrinker_lock);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
ttm_pool_debugfs_footer(m);
|
||||
return 0;
|
||||
@@ -694,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
|
||||
if (!page_pool_size)
|
||||
page_pool_size = num_pages;
|
||||
|
||||
mutex_init(&shrinker_lock);
|
||||
spin_lock_init(&shrinker_lock);
|
||||
INIT_LIST_HEAD(&shrinker_list);
|
||||
|
||||
for (i = 0; i < MAX_ORDER; ++i) {
|
||||
|
||||
@@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
|
||||
* Initialise a generic range manager for the selected memory type.
|
||||
* The range manager is installed for this device in the type slot.
|
||||
*/
|
||||
int ttm_range_man_init(struct ttm_device *bdev,
|
||||
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
|
||||
unsigned type, bool use_tt,
|
||||
unsigned long p_size)
|
||||
{
|
||||
@@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
|
||||
ttm_resource_manager_set_used(man, true);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_range_man_init);
|
||||
EXPORT_SYMBOL(ttm_range_man_init_nocheck);
|
||||
|
||||
/**
|
||||
* ttm_range_man_fini
|
||||
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
|
||||
*
|
||||
* Remove the generic range manager from a slot and tear it down.
|
||||
*/
|
||||
int ttm_range_man_fini(struct ttm_device *bdev,
|
||||
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
|
||||
unsigned type)
|
||||
{
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
|
||||
@@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
|
||||
kfree(rman);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_range_man_fini);
|
||||
EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
|
||||
|
||||
@@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_resource_free);
|
||||
|
||||
static bool ttm_resource_places_compat(struct ttm_resource *res,
|
||||
const struct ttm_place *places,
|
||||
unsigned num_placement)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (res->placement & TTM_PL_FLAG_TEMPORARY)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < num_placement; i++) {
|
||||
const struct ttm_place *heap = &places[i];
|
||||
|
||||
if (res->start < heap->fpfn || (heap->lpfn &&
|
||||
(res->start + res->num_pages) > heap->lpfn))
|
||||
continue;
|
||||
|
||||
if ((res->mem_type == heap->mem_type) &&
|
||||
(!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
|
||||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_resource_compat - check if resource is compatible with placement
|
||||
*
|
||||
* @res: the resource to check
|
||||
* @placement: the placement to check against
|
||||
*
|
||||
* Returns true if the placement is compatible.
|
||||
*/
|
||||
bool ttm_resource_compat(struct ttm_resource *res,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
if (ttm_resource_places_compat(res, placement->placement,
|
||||
placement->num_placement))
|
||||
return true;
|
||||
|
||||
if ((placement->busy_placement != placement->placement ||
|
||||
placement->num_busy_placement > placement->num_placement) &&
|
||||
ttm_resource_places_compat(res, placement->busy_placement,
|
||||
placement->num_busy_placement))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_resource_compat);
|
||||
|
||||
/**
|
||||
* ttm_resource_manager_init
|
||||
*
|
||||
|
||||
@@ -122,17 +122,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
ttm_tt_unpopulate(bdev, ttm);
|
||||
|
||||
if (ttm->swap_storage)
|
||||
fput(ttm->swap_storage);
|
||||
|
||||
ttm->swap_storage = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_destroy_common);
|
||||
|
||||
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
bdev->funcs->ttm_tt_destroy(bdev, ttm);
|
||||
@@ -167,6 +156,12 @@ EXPORT_SYMBOL(ttm_tt_init);
|
||||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
|
||||
|
||||
if (ttm->swap_storage)
|
||||
fput(ttm->swap_storage);
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
if (ttm->pages)
|
||||
kvfree(ttm->pages);
|
||||
else
|
||||
|
||||
@@ -4,6 +4,7 @@ config DRM_UDL
|
||||
depends on DRM
|
||||
depends on USB
|
||||
depends on USB_ARCH_HAS_HCD
|
||||
depends on MMU
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config DRM_V3D
|
||||
tristate "Broadcom V3D 3.x and newer"
|
||||
depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
|
||||
depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
|
||||
depends on DRM
|
||||
depends on COMMON_CLK
|
||||
depends on MMU
|
||||
|
||||
@@ -234,11 +234,6 @@ struct v3d_job {
|
||||
struct drm_gem_object **bo;
|
||||
u32 bo_count;
|
||||
|
||||
/* Array of struct dma_fence * to block on before submitting this job.
|
||||
*/
|
||||
struct xarray deps;
|
||||
unsigned long last_dep;
|
||||
|
||||
/* v3d fence to be signaled by IRQ handler when the job is complete. */
|
||||
struct dma_fence *irq_fence;
|
||||
|
||||
@@ -379,6 +374,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void v3d_job_cleanup(struct v3d_job *job);
|
||||
void v3d_job_put(struct v3d_job *job);
|
||||
void v3d_reset(struct v3d_dev *v3d);
|
||||
void v3d_invalidate_caches(struct v3d_dev *v3d);
|
||||
|
||||
@@ -197,8 +197,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
|
||||
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
|
||||
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
|
||||
V3D_L2TCACTL_L2TFLS), 100)) {
|
||||
DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
|
||||
V3D_L2TCACTL_TMUWCF), 100)) {
|
||||
DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
|
||||
}
|
||||
|
||||
mutex_lock(&v3d->cache_clean_lock);
|
||||
@@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < job->bo_count; i++) {
|
||||
ret = drm_gem_fence_array_add_implicit(&job->deps,
|
||||
job->bo[i], true);
|
||||
ret = drm_sched_job_add_implicit_dependencies(&job->base,
|
||||
job->bo[i], true);
|
||||
if (ret) {
|
||||
drm_gem_unlock_reservations(job->bo, job->bo_count,
|
||||
acquire_ctx);
|
||||
@@ -356,8 +356,6 @@ static void
|
||||
v3d_job_free(struct kref *ref)
|
||||
{
|
||||
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
|
||||
unsigned long index;
|
||||
struct dma_fence *fence;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < job->bo_count; i++) {
|
||||
@@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
|
||||
}
|
||||
kvfree(job->bo);
|
||||
|
||||
xa_for_each(&job->deps, index, fence) {
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
xa_destroy(&job->deps);
|
||||
|
||||
dma_fence_put(job->irq_fence);
|
||||
dma_fence_put(job->done_fence);
|
||||
|
||||
@@ -397,6 +390,12 @@ v3d_render_job_free(struct kref *ref)
|
||||
v3d_job_free(ref);
|
||||
}
|
||||
|
||||
void v3d_job_cleanup(struct v3d_job *job)
|
||||
{
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
v3d_job_put(job);
|
||||
}
|
||||
|
||||
void v3d_job_put(struct v3d_job *job)
|
||||
{
|
||||
kref_put(&job->refcount, job->free);
|
||||
@@ -438,9 +437,10 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
|
||||
static int
|
||||
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
|
||||
struct v3d_job *job, void (*free)(struct kref *ref),
|
||||
u32 in_sync)
|
||||
u32 in_sync, enum v3d_queue queue)
|
||||
{
|
||||
struct dma_fence *in_fence = NULL;
|
||||
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
|
||||
int ret;
|
||||
|
||||
job->v3d = v3d;
|
||||
@@ -450,44 +450,40 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
|
||||
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
|
||||
v3d_priv);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
|
||||
if (ret == -EINVAL)
|
||||
goto fail;
|
||||
goto fail_job;
|
||||
|
||||
ret = drm_gem_fence_array_add(&job->deps, in_fence);
|
||||
ret = drm_sched_job_add_dependency(&job->base, in_fence);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto fail_job;
|
||||
|
||||
kref_init(&job->refcount);
|
||||
|
||||
return 0;
|
||||
fail_job:
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
fail:
|
||||
xa_destroy(&job->deps);
|
||||
pm_runtime_put_autosuspend(v3d->drm.dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
v3d_push_job(struct v3d_file_priv *v3d_priv,
|
||||
struct v3d_job *job, enum v3d_queue queue)
|
||||
static void
|
||||
v3d_push_job(struct v3d_job *job)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
|
||||
v3d_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
drm_sched_job_arm(&job->base);
|
||||
|
||||
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
|
||||
/* put by scheduler job completion */
|
||||
kref_get(&job->refcount);
|
||||
|
||||
drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
|
||||
|
||||
return 0;
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -562,7 +558,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
INIT_LIST_HEAD(&render->unref_list);
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, &render->base,
|
||||
v3d_render_job_free, args->in_sync_rcl);
|
||||
v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
|
||||
if (ret) {
|
||||
kfree(render);
|
||||
return ret;
|
||||
@@ -576,7 +572,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, &bin->base,
|
||||
v3d_job_free, args->in_sync_bcl);
|
||||
v3d_job_free, args->in_sync_bcl, V3D_BIN);
|
||||
if (ret) {
|
||||
v3d_job_put(&render->base);
|
||||
kfree(bin);
|
||||
@@ -598,7 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
|
||||
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
|
||||
if (ret) {
|
||||
kfree(clean_job);
|
||||
clean_job = NULL;
|
||||
@@ -633,31 +629,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
if (bin) {
|
||||
bin->base.perfmon = render->base.perfmon;
|
||||
v3d_perfmon_get(bin->base.perfmon);
|
||||
ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
v3d_push_job(&bin->base);
|
||||
|
||||
ret = drm_gem_fence_array_add(&render->base.deps,
|
||||
dma_fence_get(bin->base.done_fence));
|
||||
ret = drm_sched_job_add_dependency(&render->base.base,
|
||||
dma_fence_get(bin->base.done_fence));
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
}
|
||||
|
||||
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
v3d_push_job(&render->base);
|
||||
|
||||
if (clean_job) {
|
||||
struct dma_fence *render_fence =
|
||||
dma_fence_get(render->base.done_fence);
|
||||
ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
|
||||
ret = drm_sched_job_add_dependency(&clean_job->base,
|
||||
render_fence);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
clean_job->perfmon = render->base.perfmon;
|
||||
v3d_perfmon_get(clean_job->perfmon);
|
||||
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
v3d_push_job(clean_job);
|
||||
}
|
||||
|
||||
mutex_unlock(&v3d->sched_lock);
|
||||
@@ -682,10 +673,10 @@ fail_unreserve:
|
||||
last_job->bo_count, &acquire_ctx);
|
||||
fail:
|
||||
if (bin)
|
||||
v3d_job_put(&bin->base);
|
||||
v3d_job_put(&render->base);
|
||||
v3d_job_cleanup(&bin->base);
|
||||
v3d_job_cleanup(&render->base);
|
||||
if (clean_job)
|
||||
v3d_job_put(clean_job);
|
||||
v3d_job_cleanup(clean_job);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -704,7 +695,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
|
||||
struct drm_v3d_submit_tfu *args = data;
|
||||
struct v3d_tfu_job *job;
|
||||
struct ww_acquire_ctx acquire_ctx;
|
||||
@@ -717,7 +707,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, &job->base,
|
||||
v3d_job_free, args->in_sync);
|
||||
v3d_job_free, args->in_sync, V3D_TFU);
|
||||
if (ret) {
|
||||
kfree(job);
|
||||
return ret;
|
||||
@@ -761,9 +751,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
|
||||
goto fail;
|
||||
|
||||
mutex_lock(&v3d->sched_lock);
|
||||
ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
v3d_push_job(&job->base);
|
||||
mutex_unlock(&v3d->sched_lock);
|
||||
|
||||
v3d_attach_fences_and_unlock_reservation(file_priv,
|
||||
@@ -775,12 +763,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
mutex_unlock(&v3d->sched_lock);
|
||||
drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
|
||||
&acquire_ctx);
|
||||
fail:
|
||||
v3d_job_put(&job->base);
|
||||
v3d_job_cleanup(&job->base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -818,7 +802,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, &job->base,
|
||||
v3d_job_free, args->in_sync);
|
||||
v3d_job_free, args->in_sync, V3D_CSD);
|
||||
if (ret) {
|
||||
kfree(job);
|
||||
return ret;
|
||||
@@ -831,7 +815,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
|
||||
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
|
||||
if (ret) {
|
||||
v3d_job_put(&job->base);
|
||||
kfree(clean_job);
|
||||
@@ -859,18 +843,14 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
mutex_lock(&v3d->sched_lock);
|
||||
ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
|
||||
v3d_push_job(&job->base);
|
||||
|
||||
ret = drm_sched_job_add_dependency(&clean_job->base,
|
||||
dma_fence_get(job->base.done_fence));
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
ret = drm_gem_fence_array_add(&clean_job->deps,
|
||||
dma_fence_get(job->base.done_fence));
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
v3d_push_job(clean_job);
|
||||
mutex_unlock(&v3d->sched_lock);
|
||||
|
||||
v3d_attach_fences_and_unlock_reservation(file_priv,
|
||||
@@ -889,8 +869,8 @@ fail_unreserve:
|
||||
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
|
||||
&acquire_ctx);
|
||||
fail:
|
||||
v3d_job_put(&job->base);
|
||||
v3d_job_put(clean_job);
|
||||
v3d_job_cleanup(&job->base);
|
||||
v3d_job_cleanup(clean_job);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
* jobs when bulk background jobs are queued up, we submit a new job
|
||||
* to the HW only when it has completed the last one, instead of
|
||||
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
|
||||
* v3d_job_dependency() to manage the dependency between bin and
|
||||
* drm_sched_job_add_dependency() to manage the dependency between bin and
|
||||
* render, instead of having the clients submit jobs using the HW's
|
||||
* semaphores to interlock between them.
|
||||
*/
|
||||
@@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_job_free(struct drm_sched_job *sched_job)
|
||||
v3d_sched_job_free(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
|
||||
drm_sched_job_cleanup(sched_job);
|
||||
v3d_job_put(job);
|
||||
v3d_job_cleanup(job);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
|
||||
v3d_perfmon_start(v3d, job->perfmon);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the fences that the job depends on, one by one.
|
||||
*
|
||||
* If placed in the scheduler's .dependency method, the corresponding
|
||||
* .run_job won't be called until all of them have been signaled.
|
||||
*/
|
||||
static struct dma_fence *
|
||||
v3d_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
|
||||
/* XXX: Wait on a fence for switching the GMP if necessary,
|
||||
* and then do so.
|
||||
*/
|
||||
|
||||
if (!xa_empty(&job->deps))
|
||||
return xa_erase(&job->deps, job->last_dep++);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct v3d_bin_job *job = to_bin_job(sched_job);
|
||||
@@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
|
||||
}
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_bin_job_run,
|
||||
.timedout_job = v3d_bin_job_timedout,
|
||||
.free_job = v3d_job_free,
|
||||
.free_job = v3d_sched_job_free,
|
||||
};
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_render_job_run,
|
||||
.timedout_job = v3d_render_job_timedout,
|
||||
.free_job = v3d_job_free,
|
||||
.free_job = v3d_sched_job_free,
|
||||
};
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_tfu_job_run,
|
||||
.timedout_job = v3d_generic_job_timedout,
|
||||
.free_job = v3d_job_free,
|
||||
.free_job = v3d_sched_job_free,
|
||||
};
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_csd_job_run,
|
||||
.timedout_job = v3d_csd_job_timedout,
|
||||
.free_job = v3d_job_free
|
||||
.free_job = v3d_sched_job_free
|
||||
};
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_cache_clean_job_run,
|
||||
.timedout_job = v3d_generic_job_timedout,
|
||||
.free_job = v3d_job_free
|
||||
.free_job = v3d_sched_job_free
|
||||
};
|
||||
|
||||
int
|
||||
|
||||
@@ -229,26 +229,19 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
|
||||
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
|
||||
{
|
||||
struct device *dev = &dpi->pdev->dev;
|
||||
struct drm_panel *panel;
|
||||
struct drm_bridge *bridge;
|
||||
int ret;
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
|
||||
&panel, &bridge);
|
||||
if (ret) {
|
||||
bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
|
||||
if (IS_ERR(bridge)) {
|
||||
/* If nothing was connected in the DT, that's not an
|
||||
* error.
|
||||
*/
|
||||
if (ret == -ENODEV)
|
||||
if (PTR_ERR(bridge) == -ENODEV)
|
||||
return 0;
|
||||
else
|
||||
return ret;
|
||||
return PTR_ERR(bridge);
|
||||
}
|
||||
|
||||
if (panel)
|
||||
bridge = drm_panel_bridge_add_typed(panel,
|
||||
DRM_MODE_CONNECTOR_DPI);
|
||||
|
||||
return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -50,13 +50,11 @@
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
/* Helper function for mapping the regs on a platform device. */
|
||||
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
|
||||
void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
|
||||
{
|
||||
struct resource *res;
|
||||
void __iomem *map;
|
||||
|
||||
res = platform_get_resource(dev, IORESOURCE_MEM, index);
|
||||
map = devm_ioremap_resource(&dev->dev, res);
|
||||
map = devm_platform_ioremap_resource(pdev, index);
|
||||
if (IS_ERR(map))
|
||||
return map;
|
||||
|
||||
|
||||
@@ -1497,7 +1497,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
|
||||
struct drm_device *drm = dev_get_drvdata(master);
|
||||
struct vc4_dsi *dsi = dev_get_drvdata(dev);
|
||||
struct vc4_dsi_encoder *vc4_dsi_encoder;
|
||||
struct drm_panel *panel;
|
||||
const struct of_device_id *match;
|
||||
dma_cap_mask_t dma_mask;
|
||||
int ret;
|
||||
@@ -1609,27 +1608,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
|
||||
&panel, &dsi->bridge);
|
||||
if (ret) {
|
||||
/* If the bridge or panel pointed by dev->of_node is not
|
||||
* enabled, just return 0 here so that we don't prevent the DRM
|
||||
* dev from being registered. Of course that means the DSI
|
||||
* encoder won't be exposed, but that's not a problem since
|
||||
* nothing is connected to it.
|
||||
*/
|
||||
if (ret == -ENODEV)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (panel) {
|
||||
dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
if (IS_ERR(dsi->bridge))
|
||||
return PTR_ERR(dsi->bridge);
|
||||
}
|
||||
dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
|
||||
if (IS_ERR(dsi->bridge))
|
||||
return PTR_ERR(dsi->bridge);
|
||||
|
||||
/* The esc clock rate is supposed to always be 100Mhz. */
|
||||
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
|
||||
@@ -1667,8 +1648,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
|
||||
{
|
||||
struct vc4_dsi *dsi = dev_get_drvdata(dev);
|
||||
|
||||
if (dsi->bridge)
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
/*
|
||||
* Restore the bridge_chain so the bridge detach procedure can happen
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_prime.h>
|
||||
@@ -50,87 +51,11 @@
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 0
|
||||
|
||||
static const struct drm_gem_object_funcs vgem_gem_object_funcs;
|
||||
|
||||
static struct vgem_device {
|
||||
struct drm_device drm;
|
||||
struct platform_device *platform;
|
||||
} *vgem_device;
|
||||
|
||||
static void vgem_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
|
||||
|
||||
kvfree(vgem_obj->pages);
|
||||
mutex_destroy(&vgem_obj->pages_lock);
|
||||
|
||||
if (obj->import_attach)
|
||||
drm_prime_gem_destroy(obj, vgem_obj->table);
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
kfree(vgem_obj);
|
||||
}
|
||||
|
||||
static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct drm_vgem_gem_object *obj = vma->vm_private_data;
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
unsigned long vaddr = vmf->address;
|
||||
vm_fault_t ret = VM_FAULT_SIGBUS;
|
||||
loff_t num_pages;
|
||||
pgoff_t page_offset;
|
||||
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
|
||||
|
||||
if (page_offset >= num_pages)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
mutex_lock(&obj->pages_lock);
|
||||
if (obj->pages) {
|
||||
get_page(obj->pages[page_offset]);
|
||||
vmf->page = obj->pages[page_offset];
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&obj->pages_lock);
|
||||
if (ret) {
|
||||
struct page *page;
|
||||
|
||||
page = shmem_read_mapping_page(
|
||||
file_inode(obj->base.filp)->i_mapping,
|
||||
page_offset);
|
||||
if (!IS_ERR(page)) {
|
||||
vmf->page = page;
|
||||
ret = 0;
|
||||
} else switch (PTR_ERR(page)) {
|
||||
case -ENOSPC:
|
||||
case -ENOMEM:
|
||||
ret = VM_FAULT_OOM;
|
||||
break;
|
||||
case -EBUSY:
|
||||
ret = VM_FAULT_RETRY;
|
||||
break;
|
||||
case -EFAULT:
|
||||
case -EINVAL:
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(PTR_ERR(page));
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vgem_gem_vm_ops = {
|
||||
.fault = vgem_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static int vgem_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct vgem_file *vfile;
|
||||
@@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
kfree(vfile);
|
||||
}
|
||||
|
||||
static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
|
||||
unsigned long size)
|
||||
{
|
||||
struct drm_vgem_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (!obj)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
obj->base.funcs = &vgem_gem_object_funcs;
|
||||
|
||||
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
|
||||
if (ret) {
|
||||
kfree(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
mutex_init(&obj->pages_lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_release(&obj->base);
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
unsigned int *handle,
|
||||
unsigned long size)
|
||||
{
|
||||
struct drm_vgem_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = __vgem_gem_create(dev, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
ret = drm_gem_handle_create(file, &obj->base, handle);
|
||||
if (ret) {
|
||||
drm_gem_object_put(&obj->base);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &obj->base;
|
||||
}
|
||||
|
||||
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct drm_gem_object *gem_object;
|
||||
u64 pitch, size;
|
||||
|
||||
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
|
||||
size = args->height * pitch;
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
gem_object = vgem_gem_create(dev, file, &args->handle, size);
|
||||
if (IS_ERR(gem_object))
|
||||
return PTR_ERR(gem_object);
|
||||
|
||||
args->size = gem_object->size;
|
||||
args->pitch = pitch;
|
||||
|
||||
drm_gem_object_put(gem_object);
|
||||
|
||||
DRM_DEBUG("Created object of size %llu\n", args->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_ioctl_desc vgem_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
|
||||
|
||||
static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
|
||||
{
|
||||
unsigned long flags = vma->vm_flags;
|
||||
int ret;
|
||||
struct drm_gem_shmem_object *obj;
|
||||
|
||||
ret = drm_gem_mmap(filp, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
|
||||
* are ordinary and not special.
|
||||
/*
|
||||
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
|
||||
* coherent memory or dma-buf sharing just wont work.
|
||||
*/
|
||||
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
return 0;
|
||||
}
|
||||
obj->map_wc = true;
|
||||
|
||||
static const struct file_operations vgem_driver_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.mmap = vgem_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
.release = drm_release,
|
||||
};
|
||||
|
||||
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
mutex_lock(&bo->pages_lock);
|
||||
if (bo->pages_pin_count++ == 0) {
|
||||
struct page **pages;
|
||||
|
||||
pages = drm_gem_get_pages(&bo->base);
|
||||
if (IS_ERR(pages)) {
|
||||
bo->pages_pin_count--;
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
return pages;
|
||||
}
|
||||
|
||||
bo->pages = pages;
|
||||
}
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
|
||||
return bo->pages;
|
||||
}
|
||||
|
||||
static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
mutex_lock(&bo->pages_lock);
|
||||
if (--bo->pages_pin_count == 0) {
|
||||
drm_gem_put_pages(&bo->base, bo->pages, true, true);
|
||||
bo->pages = NULL;
|
||||
}
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
}
|
||||
|
||||
static int vgem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
long n_pages = obj->size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
|
||||
pages = vgem_pin_pages(bo);
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
/* Flush the object from the CPU cache so that importers can rely
|
||||
* on coherent indirect access via the exported dma-address.
|
||||
*/
|
||||
drm_clflush_pages(pages, n_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vgem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
|
||||
vgem_unpin_pages(bo);
|
||||
}
|
||||
|
||||
static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
|
||||
return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
|
||||
|
||||
return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
|
||||
}
|
||||
|
||||
static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg)
|
||||
{
|
||||
struct drm_vgem_gem_object *obj;
|
||||
int npages;
|
||||
|
||||
obj = __vgem_gem_create(dev, attach->dmabuf->size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
|
||||
|
||||
obj->table = sg;
|
||||
obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!obj->pages) {
|
||||
__vgem_gem_destroy(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
obj->pages_pin_count++; /* perma-pinned */
|
||||
drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
|
||||
return &obj->base;
|
||||
}
|
||||
|
||||
static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
long n_pages = obj->size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
void *vaddr;
|
||||
|
||||
pages = vgem_pin_pages(bo);
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
dma_buf_map_set_vaddr(map, vaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
|
||||
vunmap(map->vaddr);
|
||||
vgem_unpin_pages(bo);
|
||||
}
|
||||
|
||||
static int vgem_prime_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (obj->size < vma->vm_end - vma->vm_start)
|
||||
return -EINVAL;
|
||||
|
||||
if (!obj->filp)
|
||||
return -ENODEV;
|
||||
|
||||
ret = call_mmap(obj->filp, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma_set_file(vma, obj->filp);
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
|
||||
.free = vgem_gem_free_object,
|
||||
.pin = vgem_prime_pin,
|
||||
.unpin = vgem_prime_unpin,
|
||||
.get_sg_table = vgem_prime_get_sg_table,
|
||||
.vmap = vgem_prime_vmap,
|
||||
.vunmap = vgem_prime_vunmap,
|
||||
.vm_ops = &vgem_gem_vm_ops,
|
||||
};
|
||||
|
||||
static const struct drm_driver vgem_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_RENDER,
|
||||
.open = vgem_open,
|
||||
@@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
|
||||
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
|
||||
.fops = &vgem_driver_fops,
|
||||
|
||||
.dumb_create = vgem_gem_dumb_create,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_import = vgem_prime_import,
|
||||
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
|
||||
.gem_prime_mmap = vgem_prime_mmap,
|
||||
DRM_GEM_SHMEM_DRIVER_OPS,
|
||||
.gem_create_object = vgem_gem_create_object,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#ifndef VIRTIO_DRV_H
|
||||
#define VIRTIO_DRV_H
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
@@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr);
|
||||
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
|
||||
struct device *dev,
|
||||
enum dma_data_direction dir);
|
||||
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (virtio_gpu_is_vram(bo))
|
||||
return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
|
||||
|
||||
return drm_gem_map_dma_buf(attach, dir);
|
||||
}
|
||||
|
||||
static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (virtio_gpu_is_vram(bo)) {
|
||||
virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_gem_unmap_dma_buf(attach, sgt, dir);
|
||||
}
|
||||
|
||||
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
|
||||
.ops = {
|
||||
.cache_sgt_mapping = true,
|
||||
.attach = virtio_dma_buf_attach,
|
||||
.detach = drm_gem_map_detach,
|
||||
.map_dma_buf = drm_gem_map_dma_buf,
|
||||
.unmap_dma_buf = drm_gem_unmap_dma_buf,
|
||||
.map_dma_buf = virtgpu_gem_map_dma_buf,
|
||||
.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.mmap = drm_gem_dmabuf_mmap,
|
||||
.vmap = drm_gem_dmabuf_vmap,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
|
||||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
@@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
|
||||
struct device *dev,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
|
||||
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
|
||||
struct sg_table *sgt;
|
||||
dma_addr_t addr;
|
||||
int ret;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
|
||||
// Virtio devices can access the dma-buf via its UUID. Return a stub
|
||||
// sg_table so the dma-buf API still works.
|
||||
if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
return sgt;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addr = dma_map_resource(dev, vram->vram_node.start,
|
||||
vram->vram_node.size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
ret = dma_mapping_error(dev, addr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
|
||||
sg_dma_address(sgt->sgl) = addr;
|
||||
sg_dma_len(sgt->sgl) = vram->vram_node.size;
|
||||
|
||||
return sgt;
|
||||
out:
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (sgt->nents) {
|
||||
dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
|
||||
sg_dma_len(sgt->sgl), dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
|
||||
.open = virtio_gpu_gem_object_open,
|
||||
.close = virtio_gpu_gem_object_close,
|
||||
|
||||
@@ -94,7 +94,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
|
||||
struct ttm_operation_ctx ctx = {interruptible, false };
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
uint32_t new_flags;
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
@@ -103,8 +102,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
|
||||
goto err;
|
||||
|
||||
if (buf->base.pin_count > 0)
|
||||
ret = ttm_bo_mem_compat(placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
ret = ttm_resource_compat(bo->resource, placement)
|
||||
? 0 : -EINVAL;
|
||||
else
|
||||
ret = ttm_bo_validate(bo, placement, &ctx);
|
||||
|
||||
@@ -136,7 +135,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct ttm_operation_ctx ctx = {interruptible, false };
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
uint32_t new_flags;
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
@@ -145,8 +143,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
goto err;
|
||||
|
||||
if (buf->base.pin_count > 0) {
|
||||
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
|
||||
? 0 : -EINVAL;
|
||||
goto out_unreserve;
|
||||
}
|
||||
|
||||
@@ -208,7 +206,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
||||
struct ttm_placement placement;
|
||||
struct ttm_place place;
|
||||
int ret = 0;
|
||||
uint32_t new_flags;
|
||||
|
||||
place = vmw_vram_placement.placement[0];
|
||||
place.lpfn = bo->resource->num_pages;
|
||||
@@ -236,8 +233,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
if (buf->base.pin_count > 0)
|
||||
ret = ttm_bo_mem_compat(&placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
ret = ttm_resource_compat(bo->resource, &placement)
|
||||
? 0 : -EINVAL;
|
||||
else
|
||||
ret = ttm_bo_validate(bo, &placement, &ctx);
|
||||
|
||||
|
||||
@@ -522,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
struct vmw_ttm_tt *vmw_be =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
|
||||
vmw_ttm_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
vmw_ttm_unmap_dma(vmw_be);
|
||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
ttm_tt_fini(&vmw_be->dma_ttm);
|
||||
else
|
||||
ttm_tt_fini(ttm);
|
||||
|
||||
ttm_tt_fini(ttm);
|
||||
if (vmw_be->mob)
|
||||
vmw_mob_destroy(vmw_be->mob);
|
||||
|
||||
@@ -574,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
||||
dma_ttm);
|
||||
unsigned int i;
|
||||
|
||||
vmw_ttm_unbind(bdev, ttm);
|
||||
|
||||
if (vmw_tt->mob) {
|
||||
vmw_mob_destroy(vmw_tt->mob);
|
||||
vmw_tt->mob = NULL;
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config DRM_ZTE
|
||||
tristate "DRM Support for ZTE SoCs"
|
||||
depends on DRM && ARCH_ZX
|
||||
select DRM_KMS_CMA_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||
select VIDEOMODE_HELPERS
|
||||
help
|
||||
Choose this option to enable DRM on ZTE ZX SoCs.
|
||||
@@ -1,10 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
zxdrm-y := \
|
||||
zx_drm_drv.o \
|
||||
zx_hdmi.o \
|
||||
zx_plane.o \
|
||||
zx_tvenc.o \
|
||||
zx_vga.o \
|
||||
zx_vou.o
|
||||
|
||||
obj-$(CONFIG_DRM_ZTE) += zxdrm.o
|
||||
@@ -1,28 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2017 Sanechips Technology Co., Ltd.
|
||||
* Copyright 2017 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __ZX_COMMON_REGS_H__
|
||||
#define __ZX_COMMON_REGS_H__
|
||||
|
||||
/* CSC registers */
|
||||
#define CSC_CTRL0 0x30
|
||||
#define CSC_COV_MODE_SHIFT 16
|
||||
#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
|
||||
#define CSC_BT601_IMAGE_RGB2YCBCR 0
|
||||
#define CSC_BT601_IMAGE_YCBCR2RGB 1
|
||||
#define CSC_BT601_VIDEO_RGB2YCBCR 2
|
||||
#define CSC_BT601_VIDEO_YCBCR2RGB 3
|
||||
#define CSC_BT709_IMAGE_RGB2YCBCR 4
|
||||
#define CSC_BT709_IMAGE_YCBCR2RGB 5
|
||||
#define CSC_BT709_VIDEO_RGB2YCBCR 6
|
||||
#define CSC_BT709_VIDEO_YCBCR2RGB 7
|
||||
#define CSC_BT2020_IMAGE_RGB2YCBCR 8
|
||||
#define CSC_BT2020_IMAGE_YCBCR2RGB 9
|
||||
#define CSC_BT2020_VIDEO_RGB2YCBCR 10
|
||||
#define CSC_BT2020_VIDEO_YCBCR2RGB 11
|
||||
#define CSC_WORK_ENABLE BIT(0)
|
||||
|
||||
#endif /* __ZX_COMMON_REGS_H__ */
|
||||
@@ -1,184 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2016 Linaro Ltd.
|
||||
* Copyright 2016 ZTE Corporation.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "zx_drm_drv.h"
|
||||
#include "zx_vou.h"
|
||||
|
||||
static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
|
||||
.fb_create = drm_gem_fb_create,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
||||
DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
|
||||
|
||||
static const struct drm_driver zx_drm_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
|
||||
DRM_GEM_CMA_DRIVER_OPS,
|
||||
.fops = &zx_drm_fops,
|
||||
.name = "zx-vou",
|
||||
.desc = "ZTE VOU Controller DRM",
|
||||
.date = "20160811",
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
};
|
||||
|
||||
static int zx_drm_bind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm;
|
||||
int ret;
|
||||
|
||||
drm = drm_dev_alloc(&zx_drm_driver, dev);
|
||||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
|
||||
dev_set_drvdata(dev, drm);
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
drm->mode_config.min_width = 16;
|
||||
drm->mode_config.min_height = 16;
|
||||
drm->mode_config.max_width = 4096;
|
||||
drm->mode_config.max_height = 4096;
|
||||
drm->mode_config.funcs = &zx_drm_mode_config_funcs;
|
||||
|
||||
ret = component_bind_all(dev, drm);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
|
||||
goto out_unbind;
|
||||
}
|
||||
|
||||
drm_mode_config_reset(drm);
|
||||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
ret = drm_dev_register(drm, 0);
|
||||
if (ret)
|
||||
goto out_poll_fini;
|
||||
|
||||
drm_fbdev_generic_setup(drm, 32);
|
||||
|
||||
return 0;
|
||||
|
||||
out_poll_fini:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
out_unbind:
|
||||
component_unbind_all(dev, drm);
|
||||
out_unregister:
|
||||
dev_set_drvdata(dev, NULL);
|
||||
drm_dev_put(drm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void zx_drm_unbind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
component_unbind_all(dev, drm);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops zx_drm_master_ops = {
|
||||
.bind = zx_drm_bind,
|
||||
.unbind = zx_drm_unbind,
|
||||
};
|
||||
|
||||
static int compare_of(struct device *dev, void *data)
|
||||
{
|
||||
return dev->of_node == data;
|
||||
}
|
||||
|
||||
static int zx_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *parent = dev->of_node;
|
||||
struct device_node *child;
|
||||
struct component_match *match = NULL;
|
||||
int ret;
|
||||
|
||||
ret = devm_of_platform_populate(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_available_child_of_node(parent, child)
|
||||
component_match_add(dev, &match, compare_of, child);
|
||||
|
||||
return component_master_add_with_match(dev, &zx_drm_master_ops, match);
|
||||
}
|
||||
|
||||
static int zx_drm_remove(struct platform_device *pdev)
|
||||
{
|
||||
component_master_del(&pdev->dev, &zx_drm_master_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id zx_drm_of_match[] = {
|
||||
{ .compatible = "zte,zx296718-vou", },
|
||||
{ /* end */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, zx_drm_of_match);
|
||||
|
||||
static struct platform_driver zx_drm_platform_driver = {
|
||||
.probe = zx_drm_probe,
|
||||
.remove = zx_drm_remove,
|
||||
.driver = {
|
||||
.name = "zx-drm",
|
||||
.of_match_table = zx_drm_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_driver *drivers[] = {
|
||||
&zx_crtc_driver,
|
||||
&zx_hdmi_driver,
|
||||
&zx_tvenc_driver,
|
||||
&zx_vga_driver,
|
||||
&zx_drm_platform_driver,
|
||||
};
|
||||
|
||||
static int zx_drm_init(void)
|
||||
{
|
||||
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
|
||||
}
|
||||
module_init(zx_drm_init);
|
||||
|
||||
static void zx_drm_exit(void)
|
||||
{
|
||||
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
|
||||
}
|
||||
module_exit(zx_drm_exit);
|
||||
|
||||
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
|
||||
MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
@@ -1,34 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2016 Linaro Ltd.
|
||||
* Copyright 2016 ZTE Corporation.
|
||||
*/
|
||||
|
||||
#ifndef __ZX_DRM_DRV_H__
|
||||
#define __ZX_DRM_DRV_H__
|
||||
|
||||
extern struct platform_driver zx_crtc_driver;
|
||||
extern struct platform_driver zx_hdmi_driver;
|
||||
extern struct platform_driver zx_tvenc_driver;
|
||||
extern struct platform_driver zx_vga_driver;
|
||||
|
||||
static inline u32 zx_readl(void __iomem *reg)
|
||||
{
|
||||
return readl_relaxed(reg);
|
||||
}
|
||||
|
||||
static inline void zx_writel(void __iomem *reg, u32 val)
|
||||
{
|
||||
writel_relaxed(val, reg);
|
||||
}
|
||||
|
||||
static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = zx_readl(reg);
|
||||
tmp = (tmp & ~mask) | (val & mask);
|
||||
zx_writel(reg, tmp);
|
||||
}
|
||||
|
||||
#endif /* __ZX_DRM_DRV_H__ */
|
||||
@@ -1,760 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2016 Linaro Ltd.
|
||||
* Copyright 2016 ZTE Corporation.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
|
||||
#include <sound/hdmi-codec.h>
|
||||
|
||||
#include "zx_hdmi_regs.h"
|
||||
#include "zx_vou.h"
|
||||
|
||||
#define ZX_HDMI_INFOFRAME_SIZE 31
|
||||
#define DDC_SEGMENT_ADDR 0x30
|
||||
|
||||
struct zx_hdmi_i2c {
|
||||
struct i2c_adapter adap;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct zx_hdmi {
|
||||
struct drm_connector connector;
|
||||
struct drm_encoder encoder;
|
||||
struct zx_hdmi_i2c *ddc;
|
||||
struct device *dev;
|
||||
struct drm_device *drm;
|
||||
void __iomem *mmio;
|
||||
struct clk *cec_clk;
|
||||
struct clk *osc_clk;
|
||||
struct clk *xclk;
|
||||
bool sink_is_hdmi;
|
||||
bool sink_has_audio;
|
||||
struct platform_device *audio_pdev;
|
||||
};
|
||||
|
||||
#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
|
||||
|
||||
static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
|
||||
{
|
||||
return readl_relaxed(hdmi->mmio + offset * 4);
|
||||
}
|
||||
|
||||
static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
|
||||
{
|
||||
writel_relaxed(val, hdmi->mmio + offset * 4);
|
||||
}
|
||||
|
||||
static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
|
||||
u8 mask, u8 val)
|
||||
{
|
||||
u8 tmp;
|
||||
|
||||
tmp = hdmi_readb(hdmi, offset);
|
||||
tmp = (tmp & ~mask) | (val & mask);
|
||||
hdmi_writeb(hdmi, offset, tmp);
|
||||
}
|
||||
|
||||
static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
|
||||
union hdmi_infoframe *frame, u8 fsel)
|
||||
{
|
||||
u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
|
||||
int num;
|
||||
int i;
|
||||
|
||||
hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
|
||||
|
||||
num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
|
||||
if (num < 0) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
|
||||
return num;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
|
||||
|
||||
hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
|
||||
TPI_INFO_TRANS_RPT);
|
||||
hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
|
||||
TPI_INFO_TRANS_EN);
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
|
||||
&hdmi->connector,
|
||||
mode);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
|
||||
}
|
||||
|
||||
static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
||||
&hdmi->connector,
|
||||
mode);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We always use YUV444 for HDMI output. */
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
|
||||
|
||||
return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
|
||||
}
|
||||
|
||||
static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adj_mode)
|
||||
{
|
||||
struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
|
||||
|
||||
if (hdmi->sink_is_hdmi) {
|
||||
zx_hdmi_config_video_avi(hdmi, mode);
|
||||
zx_hdmi_config_video_vsi(hdmi, mode);
|
||||
}
|
||||
}
|
||||
|
||||
static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
|
||||
{
|
||||
/* Copy from ZTE BSP code */
|
||||
hdmi_writeb(hdmi, 0x222, 0x0);
|
||||
hdmi_writeb(hdmi, 0x224, 0x4);
|
||||
hdmi_writeb(hdmi, 0x909, 0x0);
|
||||
hdmi_writeb(hdmi, 0x7b0, 0x90);
|
||||
hdmi_writeb(hdmi, 0x7b1, 0x00);
|
||||
hdmi_writeb(hdmi, 0x7b2, 0xa7);
|
||||
hdmi_writeb(hdmi, 0x7b8, 0xaa);
|
||||
hdmi_writeb(hdmi, 0x7b2, 0xa7);
|
||||
hdmi_writeb(hdmi, 0x7b3, 0x0f);
|
||||
hdmi_writeb(hdmi, 0x7b4, 0x0f);
|
||||
hdmi_writeb(hdmi, 0x7b5, 0x55);
|
||||
hdmi_writeb(hdmi, 0x7b7, 0x03);
|
||||
hdmi_writeb(hdmi, 0x7b9, 0x12);
|
||||
hdmi_writeb(hdmi, 0x7ba, 0x32);
|
||||
hdmi_writeb(hdmi, 0x7bc, 0x68);
|
||||
hdmi_writeb(hdmi, 0x7be, 0x40);
|
||||
hdmi_writeb(hdmi, 0x7bf, 0x84);
|
||||
hdmi_writeb(hdmi, 0x7c1, 0x0f);
|
||||
hdmi_writeb(hdmi, 0x7c8, 0x02);
|
||||
hdmi_writeb(hdmi, 0x7c9, 0x03);
|
||||
hdmi_writeb(hdmi, 0x7ca, 0x40);
|
||||
hdmi_writeb(hdmi, 0x7dc, 0x31);
|
||||
hdmi_writeb(hdmi, 0x7e2, 0x04);
|
||||
hdmi_writeb(hdmi, 0x7e0, 0x06);
|
||||
hdmi_writeb(hdmi, 0x7cb, 0x68);
|
||||
hdmi_writeb(hdmi, 0x7f9, 0x02);
|
||||
hdmi_writeb(hdmi, 0x7b6, 0x02);
|
||||
hdmi_writeb(hdmi, 0x7f3, 0x0);
|
||||
}
|
||||
|
||||
static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
|
||||
{
|
||||
/* Enable pclk */
|
||||
hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
|
||||
|
||||
/* Enable HDMI for TX */
|
||||
hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
|
||||
|
||||
/* Enable deep color packet */
|
||||
hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
|
||||
|
||||
/* Enable HDMI/MHL mode for output */
|
||||
hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
|
||||
TEST_TXCTRL_HDMI_MODE);
|
||||
|
||||
/* Configure reg_qc_sel */
|
||||
hdmi_writeb(hdmi, HDMICTL4, 0x3);
|
||||
|
||||
/* Enable interrupt */
|
||||
hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
|
||||
INTR1_MONITOR_DETECT);
|
||||
|
||||
/* Start up phy */
|
||||
zx_hdmi_phy_start(hdmi);
|
||||
}
|
||||
|
||||
static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
|
||||
{
|
||||
/* Disable interrupt */
|
||||
hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
|
||||
|
||||
/* Disable deep color packet */
|
||||
hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
|
||||
|
||||
/* Disable HDMI for TX */
|
||||
hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
|
||||
|
||||
/* Disable pclk */
|
||||
hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
|
||||
}
|
||||
|
||||
static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
|
||||
|
||||
clk_prepare_enable(hdmi->cec_clk);
|
||||
clk_prepare_enable(hdmi->osc_clk);
|
||||
clk_prepare_enable(hdmi->xclk);
|
||||
|
||||
zx_hdmi_hw_enable(hdmi);
|
||||
|
||||
vou_inf_enable(VOU_HDMI, encoder->crtc);
|
||||
}
|
||||
|
||||
static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
|
||||
|
||||
vou_inf_disable(VOU_HDMI, encoder->crtc);
|
||||
|
||||
zx_hdmi_hw_disable(hdmi);
|
||||
|
||||
clk_disable_unprepare(hdmi->xclk);
|
||||
clk_disable_unprepare(hdmi->osc_clk);
|
||||
clk_disable_unprepare(hdmi->cec_clk);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
|
||||
.enable = zx_hdmi_encoder_enable,
|
||||
.disable = zx_hdmi_encoder_disable,
|
||||
.mode_set = zx_hdmi_encoder_mode_set,
|
||||
};
|
||||
|
||||
static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct zx_hdmi *hdmi = to_zx_hdmi(connector);
|
||||
struct edid *edid;
|
||||
int ret;
|
||||
|
||||
edid = drm_get_edid(connector, &hdmi->ddc->adap);
|
||||
if (!edid)
|
||||
return 0;
|
||||
|
||||
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
|
||||
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
zx_hdmi_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
|
||||
.get_modes = zx_hdmi_connector_get_modes,
|
||||
.mode_valid = zx_hdmi_connector_mode_valid,
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct zx_hdmi *hdmi = to_zx_hdmi(connector);
|
||||
|
||||
return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
|
||||
connector_status_connected : connector_status_disconnected;
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.detect = zx_hdmi_connector_detect,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
|
||||
{
|
||||
struct drm_encoder *encoder = &hdmi->encoder;
|
||||
|
||||
encoder->possible_crtcs = VOU_CRTC_MASK;
|
||||
|
||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
|
||||
|
||||
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
drm_connector_init_with_ddc(drm, &hdmi->connector,
|
||||
&zx_hdmi_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_HDMIA,
|
||||
&hdmi->ddc->adap);
|
||||
drm_connector_helper_add(&hdmi->connector,
|
||||
&zx_hdmi_connector_helper_funcs);
|
||||
|
||||
drm_connector_attach_encoder(&hdmi->connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_id;
|
||||
|
||||
drm_helper_hpd_irq_event(hdmi->connector.dev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_id;
|
||||
u8 lstat;
|
||||
|
||||
lstat = hdmi_readb(hdmi, L1_INTR_STAT);
|
||||
|
||||
/* Monitor detect/HPD interrupt */
|
||||
if (lstat & L1_INTR_STAT_INTR1) {
|
||||
u8 stat;
|
||||
|
||||
stat = hdmi_readb(hdmi, INTR1_STAT);
|
||||
hdmi_writeb(hdmi, INTR1_STAT, stat);
|
||||
|
||||
if (stat & INTR1_MONITOR_DETECT)
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static int zx_hdmi_audio_startup(struct device *dev, void *data)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
struct drm_encoder *encoder = &hdmi->encoder;
|
||||
|
||||
vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
/* Disable audio input */
|
||||
hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
|
||||
}
|
||||
|
||||
static inline int zx_hdmi_audio_get_n(unsigned int fs)
|
||||
{
|
||||
unsigned int n;
|
||||
|
||||
if (fs && (fs % 44100) == 0)
|
||||
n = 6272 * (fs / 44100);
|
||||
else
|
||||
n = fs * 128 / 1000;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static int zx_hdmi_audio_hw_params(struct device *dev,
|
||||
void *data,
|
||||
struct hdmi_codec_daifmt *daifmt,
|
||||
struct hdmi_codec_params *params)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
struct hdmi_audio_infoframe *cea = ¶ms->cea;
|
||||
union hdmi_infoframe frame;
|
||||
int n;
|
||||
|
||||
/* We only support spdif for now */
|
||||
if (daifmt->fmt != HDMI_SPDIF) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (params->sample_width) {
|
||||
case 16:
|
||||
hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
|
||||
SPDIF_SAMPLE_SIZE_16BIT);
|
||||
break;
|
||||
case 20:
|
||||
hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
|
||||
SPDIF_SAMPLE_SIZE_20BIT);
|
||||
break;
|
||||
case 24:
|
||||
hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
|
||||
SPDIF_SAMPLE_SIZE_24BIT);
|
||||
break;
|
||||
default:
|
||||
DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
|
||||
params->sample_width);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* CTS is calculated by hardware, and we only need to take care of N */
|
||||
n = zx_hdmi_audio_get_n(params->sample_rate);
|
||||
hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
|
||||
hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
|
||||
hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
|
||||
|
||||
/* Enable spdif mode */
|
||||
hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
|
||||
|
||||
/* Enable audio input */
|
||||
hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
|
||||
|
||||
memcpy(&frame.audio, cea, sizeof(*cea));
|
||||
|
||||
return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
|
||||
}
|
||||
|
||||
static int zx_hdmi_audio_mute(struct device *dev, void *data,
|
||||
bool enable, int direction)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
if (enable)
|
||||
hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
|
||||
TPI_AUD_MUTE);
|
||||
else
|
||||
hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
|
||||
uint8_t *buf, size_t len)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
struct drm_connector *connector = &hdmi->connector;
|
||||
|
||||
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
|
||||
.audio_startup = zx_hdmi_audio_startup,
|
||||
.hw_params = zx_hdmi_audio_hw_params,
|
||||
.audio_shutdown = zx_hdmi_audio_shutdown,
|
||||
.mute_stream = zx_hdmi_audio_mute,
|
||||
.get_eld = zx_hdmi_audio_get_eld,
|
||||
.no_capture_mute = 1,
|
||||
};
|
||||
|
||||
static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
|
||||
.ops = &zx_hdmi_codec_ops,
|
||||
.spdif = 1,
|
||||
};
|
||||
|
||||
static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
|
||||
pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
|
||||
PLATFORM_DEVID_AUTO,
|
||||
&zx_hdmi_codec_pdata,
|
||||
sizeof(zx_hdmi_codec_pdata));
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
|
||||
hdmi->audio_pdev = pdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
|
||||
{
|
||||
int len = msg->len;
|
||||
u8 *buf = msg->buf;
|
||||
int retry = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Bits [9:8] of bytes */
|
||||
hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
|
||||
/* Bits [7:0] of bytes */
|
||||
hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
|
||||
|
||||
/* Clear FIFO */
|
||||
hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
|
||||
|
||||
/* Kick off the read */
|
||||
hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
|
||||
DDC_CMD_SEQUENTIAL_READ);
|
||||
|
||||
while (len > 0) {
|
||||
int cnt, i;
|
||||
|
||||
/* FIFO needs some time to get ready */
|
||||
usleep_range(500, 1000);
|
||||
|
||||
cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
|
||||
if (cnt == 0) {
|
||||
if (++retry > 5) {
|
||||
DRM_DEV_ERROR(hdmi->dev,
|
||||
"DDC FIFO read timed out!");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
*buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
|
||||
len -= cnt;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
|
||||
{
|
||||
/*
|
||||
* The DDC I2C adapter is only for reading EDID data, so we assume
|
||||
* that the write to this adapter must be the EDID data offset.
|
||||
*/
|
||||
if ((msg->len != 1) ||
|
||||
((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
|
||||
return -EINVAL;
|
||||
|
||||
if (msg->addr == DDC_SEGMENT_ADDR)
|
||||
hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
|
||||
else if (msg->addr == DDC_ADDR)
|
||||
hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
|
||||
|
||||
hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
int num)
|
||||
{
|
||||
struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
|
||||
struct zx_hdmi_i2c *ddc = hdmi->ddc;
|
||||
int i, ret = 0;
|
||||
|
||||
mutex_lock(&ddc->lock);
|
||||
|
||||
/* Enable DDC master access */
|
||||
hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
DRM_DEV_DEBUG(hdmi->dev,
|
||||
"xfer: num: %d/%d, len: %d, flags: %#x\n",
|
||||
i + 1, num, msgs[i].len, msgs[i].flags);
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD)
|
||||
ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
|
||||
else
|
||||
ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
|
||||
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = num;
|
||||
|
||||
/* Disable DDC master access */
|
||||
hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
|
||||
|
||||
mutex_unlock(&ddc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm zx_hdmi_algorithm = {
|
||||
.master_xfer = zx_hdmi_i2c_xfer,
|
||||
.functionality = zx_hdmi_i2c_func,
|
||||
};
|
||||
|
||||
static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
|
||||
{
|
||||
struct i2c_adapter *adap;
|
||||
struct zx_hdmi_i2c *ddc;
|
||||
int ret;
|
||||
|
||||
ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
|
||||
if (!ddc)
|
||||
return -ENOMEM;
|
||||
|
||||
hdmi->ddc = ddc;
|
||||
mutex_init(&ddc->lock);
|
||||
|
||||
adap = &ddc->adap;
|
||||
adap->owner = THIS_MODULE;
|
||||
adap->class = I2C_CLASS_DDC;
|
||||
adap->dev.parent = hdmi->dev;
|
||||
adap->algo = &zx_hdmi_algorithm;
|
||||
snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
|
||||
|
||||
ret = i2c_add_adapter(adap);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
i2c_set_adapdata(adap, hdmi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct drm_device *drm = data;
|
||||
struct resource *res;
|
||||
struct zx_hdmi *hdmi;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
|
||||
if (!hdmi)
|
||||
return -ENOMEM;
|
||||
|
||||
hdmi->dev = dev;
|
||||
hdmi->drm = drm;
|
||||
|
||||
dev_set_drvdata(dev, hdmi);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hdmi->mmio = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(hdmi->mmio)) {
|
||||
ret = PTR_ERR(hdmi->mmio);
|
||||
DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
|
||||
if (IS_ERR(hdmi->cec_clk)) {
|
||||
ret = PTR_ERR(hdmi->cec_clk);
|
||||
DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
|
||||
if (IS_ERR(hdmi->osc_clk)) {
|
||||
ret = PTR_ERR(hdmi->osc_clk);
|
||||
DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
|
||||
if (IS_ERR(hdmi->xclk)) {
|
||||
ret = PTR_ERR(hdmi->xclk);
|
||||
DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = zx_hdmi_ddc_register(hdmi);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = zx_hdmi_audio_register(hdmi);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = zx_hdmi_register(drm, hdmi);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
|
||||
zx_hdmi_irq_thread, IRQF_SHARED,
|
||||
dev_name(dev), hdmi);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zx_hdmi_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct zx_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
hdmi->connector.funcs->destroy(&hdmi->connector);
|
||||
hdmi->encoder.funcs->destroy(&hdmi->encoder);
|
||||
|
||||
if (hdmi->audio_pdev)
|
||||
platform_device_unregister(hdmi->audio_pdev);
|
||||
}
|
||||
|
||||
static const struct component_ops zx_hdmi_component_ops = {
|
||||
.bind = zx_hdmi_bind,
|
||||
.unbind = zx_hdmi_unbind,
|
||||
};
|
||||
|
||||
static int zx_hdmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
return component_add(&pdev->dev, &zx_hdmi_component_ops);
|
||||
}
|
||||
|
||||
static int zx_hdmi_remove(struct platform_device *pdev)
|
||||
{
|
||||
component_del(&pdev->dev, &zx_hdmi_component_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id zx_hdmi_of_match[] = {
|
||||
{ .compatible = "zte,zx296718-hdmi", },
|
||||
{ /* end */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
|
||||
|
||||
struct platform_driver zx_hdmi_driver = {
|
||||
.probe = zx_hdmi_probe,
|
||||
.remove = zx_hdmi_remove,
|
||||
.driver = {
|
||||
.name = "zx-hdmi",
|
||||
.of_match_table = zx_hdmi_of_match,
|
||||
},
|
||||
};
|
||||
@@ -1,66 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2016 Linaro Ltd.
|
||||
* Copyright 2016 ZTE Corporation.
|
||||
*/
|
||||
|
||||
#ifndef __ZX_HDMI_REGS_H__
|
||||
#define __ZX_HDMI_REGS_H__
|
||||
|
||||
#define FUNC_SEL 0x000b
|
||||
#define FUNC_HDMI_EN BIT(0)
|
||||
#define CLKPWD 0x000d
|
||||
#define CLKPWD_PDIDCK BIT(2)
|
||||
#define P2T_CTRL 0x0066
|
||||
#define P2T_DC_PKT_EN BIT(7)
|
||||
#define L1_INTR_STAT 0x007e
|
||||
#define L1_INTR_STAT_INTR1 BIT(0)
|
||||
#define INTR1_STAT 0x008f
|
||||
#define INTR1_MASK 0x0095
|
||||
#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
|
||||
#define ZX_DDC_ADDR 0x00ed
|
||||
#define ZX_DDC_SEGM 0x00ee
|
||||
#define ZX_DDC_OFFSET 0x00ef
|
||||
#define ZX_DDC_DIN_CNT1 0x00f0
|
||||
#define ZX_DDC_DIN_CNT2 0x00f1
|
||||
#define ZX_DDC_CMD 0x00f3
|
||||
#define DDC_CMD_MASK 0xf
|
||||
#define DDC_CMD_CLEAR_FIFO 0x9
|
||||
#define DDC_CMD_SEQUENTIAL_READ 0x2
|
||||
#define ZX_DDC_DATA 0x00f4
|
||||
#define ZX_DDC_DOUT_CNT 0x00f5
|
||||
#define DDC_DOUT_CNT_MASK 0x1f
|
||||
#define TEST_TXCTRL 0x00f7
|
||||
#define TEST_TXCTRL_HDMI_MODE BIT(1)
|
||||
#define HDMICTL4 0x0235
|
||||
#define TPI_HPD_RSEN 0x063b
|
||||
#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
|
||||
#define TPI_INFO_FSEL 0x06bf
|
||||
#define FSEL_AVI 0
|
||||
#define FSEL_GBD 1
|
||||
#define FSEL_AUDIO 2
|
||||
#define FSEL_SPD 3
|
||||
#define FSEL_MPEG 4
|
||||
#define FSEL_VSIF 5
|
||||
#define TPI_INFO_B0 0x06c0
|
||||
#define TPI_INFO_EN 0x06df
|
||||
#define TPI_INFO_TRANS_EN BIT(7)
|
||||
#define TPI_INFO_TRANS_RPT BIT(6)
|
||||
#define TPI_DDC_MASTER_EN 0x06f8
|
||||
#define HW_DDC_MASTER BIT(7)
|
||||
#define N_SVAL1 0xa03
|
||||
#define N_SVAL2 0xa04
|
||||
#define N_SVAL3 0xa05
|
||||
#define AUD_EN 0xa13
|
||||
#define AUD_IN_EN BIT(0)
|
||||
#define AUD_MODE 0xa14
|
||||
#define SPDIF_EN BIT(1)
|
||||
#define TPI_AUD_CONFIG 0xa62
|
||||
#define SPDIF_SAMPLE_SIZE_SHIFT 6
|
||||
#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
|
||||
#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
|
||||
#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
|
||||
#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
|
||||
#define TPI_AUD_MUTE BIT(4)
|
||||
|
||||
#endif /* __ZX_HDMI_REGS_H__ */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user