mirror of
https://github.com/torvalds/linux.git
synced 2026-04-25 18:12:26 -04:00
drm/amd/dc: Add dc display driver (v2)
Supported DCE versions: 8.0, 10.0, 11.0, 11.2 v2: rebase against 4.11 Signed-off-by: Harry Wentland <harry.wentland@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
9c5b2b0d40
commit
4562236b3b
17
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
Normal file
17
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
Normal file
@@ -0,0 +1,17 @@
|
||||
#
|
||||
# Makefile for the 'dm' sub-component of DAL.
|
||||
# It provides the control and status of dm blocks.
|
||||
|
||||
|
||||
|
||||
AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
|
||||
|
||||
ifneq ($(CONFIG_DRM_AMD_DC),)
|
||||
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
|
||||
endif
|
||||
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
|
||||
|
||||
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMDGPU_DM)
|
||||
1564
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Normal file
1564
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Normal file
File diff suppressed because it is too large
Load Diff
171
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
Normal file
171
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_H__
|
||||
#define __AMDGPU_DM_H__
|
||||
|
||||
/*
|
||||
#include "linux/switch.h"
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file contains the definition for amdgpu_display_manager
|
||||
* and its API for amdgpu driver's use.
|
||||
* This component provides all the display related functionality
|
||||
* and this is the only component that calls DAL API.
|
||||
* The API contained here intended for amdgpu driver use.
|
||||
* The API that is called directly from KMS framework is located
|
||||
* in amdgpu_dm_kms.h file
|
||||
*/
|
||||
|
||||
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
*/
|
||||
|
||||
#include "irq_types.h"
|
||||
#include "signal_types.h"
|
||||
|
||||
/* Forward declarations */
|
||||
struct amdgpu_device;
|
||||
struct drm_device;
|
||||
struct amdgpu_dm_irq_handler_data;
|
||||
|
||||
struct amdgpu_dm_prev_state {
|
||||
struct drm_framebuffer *fb;
|
||||
int32_t x;
|
||||
int32_t y;
|
||||
struct drm_display_mode mode;
|
||||
};
|
||||
|
||||
struct common_irq_params {
|
||||
struct amdgpu_device *adev;
|
||||
enum dc_irq_source irq_src;
|
||||
};
|
||||
|
||||
struct irq_list_head {
|
||||
struct list_head head;
|
||||
/* In case this interrupt needs post-processing, 'work' will be queued*/
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct amdgpu_display_manager {
|
||||
struct dal *dal;
|
||||
struct dc *dc;
|
||||
struct cgs_device *cgs_device;
|
||||
/* lock to be used when DAL is called from SYNC IRQ context */
|
||||
spinlock_t dal_lock;
|
||||
|
||||
struct amdgpu_device *adev; /*AMD base driver*/
|
||||
struct drm_device *ddev; /*DRM base driver*/
|
||||
u16 display_indexes_num;
|
||||
|
||||
struct amdgpu_dm_prev_state prev_state;
|
||||
|
||||
/*
|
||||
* 'irq_source_handler_table' holds a list of handlers
|
||||
* per (DAL) IRQ source.
|
||||
*
|
||||
* Each IRQ source may need to be handled at different contexts.
|
||||
* By 'context' we mean, for example:
|
||||
* - The ISR context, which is the direct interrupt handler.
|
||||
* - The 'deferred' context - this is the post-processing of the
|
||||
* interrupt, but at a lower priority.
|
||||
*
|
||||
* Note that handlers are called in the same order as they were
|
||||
* registered (FIFO).
|
||||
*/
|
||||
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
|
||||
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
|
||||
|
||||
struct common_irq_params
|
||||
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
|
||||
|
||||
struct common_irq_params
|
||||
vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
|
||||
|
||||
/* this spin lock synchronizes access to 'irq_handler_list_table' */
|
||||
spinlock_t irq_handler_list_table_lock;
|
||||
|
||||
/* Timer-related data. */
|
||||
struct list_head timer_handler_list;
|
||||
struct workqueue_struct *timer_workqueue;
|
||||
|
||||
/* Use dal_mutex for any activity which is NOT syncronized by
|
||||
* DRM mode setting locks.
|
||||
* For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
|
||||
* DRM mode setting locks being acquired. This is where dal_mutex
|
||||
* is acquired before calling into DAL. */
|
||||
struct mutex dal_mutex;
|
||||
|
||||
struct backlight_device *backlight_dev;
|
||||
|
||||
const struct dc_link *backlight_link;
|
||||
|
||||
struct work_struct mst_hotplug_work;
|
||||
|
||||
struct mod_freesync *freesync_module;
|
||||
};
|
||||
|
||||
/* basic init/fini API */
|
||||
int amdgpu_dm_init(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_dm_fini(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_dm_destroy(void);
|
||||
|
||||
/* initializes drm_device display related structures, based on the information
|
||||
* provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
|
||||
* drm_encoder, drm_mode_config
|
||||
*
|
||||
* Returns 0 on success
|
||||
*/
|
||||
int amdgpu_dm_initialize_drm_device(
|
||||
struct amdgpu_device *adev);
|
||||
|
||||
/* removes and deallocates the drm structures, created by the above function */
|
||||
void amdgpu_dm_destroy_drm_device(
|
||||
struct amdgpu_display_manager *dm);
|
||||
|
||||
/* Locking/Mutex */
|
||||
bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
|
||||
|
||||
bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
|
||||
|
||||
/* Register "Backlight device" accessible by user-mode. */
|
||||
void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
|
||||
|
||||
extern const struct amdgpu_ip_block_version dm_ip_block;
|
||||
|
||||
void amdgpu_dm_update_connector_after_detect(
|
||||
struct amdgpu_connector *aconnector);
|
||||
|
||||
struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
|
||||
struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc,
|
||||
bool from_state_var);
|
||||
|
||||
#endif /* __AMDGPU_DM_H__ */
|
||||
484
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
Normal file
484
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
Normal file
@@ -0,0 +1,484 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/i2c.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
#include "dc.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
#include "amdgpu_dm_types.h"
|
||||
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/* dm_helpers_parse_edid_caps
|
||||
*
|
||||
* Parse edid caps
|
||||
*
|
||||
* @edid: [in] pointer to edid
|
||||
* edid_caps: [in] pointer to edid caps
|
||||
* @return
|
||||
* void
|
||||
* */
|
||||
enum dc_edid_status dm_helpers_parse_edid_caps(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_edid *edid,
|
||||
struct dc_edid_caps *edid_caps)
|
||||
{
|
||||
struct edid *edid_buf = (struct edid *) edid->raw_edid;
|
||||
struct cea_sad *sads;
|
||||
int sad_count = -1;
|
||||
int sadb_count = -1;
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
uint8_t *sadb = NULL;
|
||||
|
||||
enum dc_edid_status result = EDID_OK;
|
||||
|
||||
if (!edid_caps || !edid)
|
||||
return EDID_BAD_INPUT;
|
||||
|
||||
if (!drm_edid_is_valid(edid_buf))
|
||||
result = EDID_BAD_CHECKSUM;
|
||||
|
||||
edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
|
||||
((uint16_t) edid_buf->mfg_id[1])<<8;
|
||||
edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
|
||||
((uint16_t) edid_buf->prod_code[1])<<8;
|
||||
edid_caps->serial_number = edid_buf->serial;
|
||||
edid_caps->manufacture_week = edid_buf->mfg_week;
|
||||
edid_caps->manufacture_year = edid_buf->mfg_year;
|
||||
|
||||
/* One of the four detailed_timings stores the monitor name. It's
|
||||
* stored in an array of length 13. */
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
|
||||
while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
|
||||
if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
|
||||
break;
|
||||
|
||||
edid_caps->display_name[j] =
|
||||
edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
|
||||
(struct edid *) edid->raw_edid);
|
||||
|
||||
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
|
||||
if (sad_count <= 0) {
|
||||
DRM_INFO("SADs count is: %d, don't need to read it\n",
|
||||
sad_count);
|
||||
return result;
|
||||
}
|
||||
|
||||
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
|
||||
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
|
||||
struct cea_sad *sad = &sads[i];
|
||||
|
||||
edid_caps->audio_modes[i].format_code = sad->format;
|
||||
edid_caps->audio_modes[i].channel_count = sad->channels;
|
||||
edid_caps->audio_modes[i].sample_rate = sad->freq;
|
||||
edid_caps->audio_modes[i].sample_size = sad->byte2;
|
||||
}
|
||||
|
||||
sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
|
||||
|
||||
if (sadb_count < 0) {
|
||||
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
|
||||
sadb_count = 0;
|
||||
}
|
||||
|
||||
if (sadb_count)
|
||||
edid_caps->speaker_flags = sadb[0];
|
||||
else
|
||||
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
|
||||
|
||||
kfree(sads);
|
||||
kfree(sadb);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct amdgpu_connector *get_connector_for_sink(
|
||||
struct drm_device *dev,
|
||||
const struct dc_sink *sink)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
|
||||
if (aconnector->dc_sink == sink)
|
||||
return aconnector;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct amdgpu_connector *get_connector_for_link(
|
||||
struct drm_device *dev,
|
||||
const struct dc_link *link)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
|
||||
if (aconnector->dc_link == link)
|
||||
return aconnector;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void get_payload_table(
|
||||
struct amdgpu_connector *aconnector,
|
||||
struct dp_mst_stream_allocation_table *proposed_table)
|
||||
{
|
||||
int i;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr =
|
||||
&aconnector->mst_port->mst_mgr;
|
||||
|
||||
mutex_lock(&mst_mgr->payload_lock);
|
||||
|
||||
proposed_table->stream_count = 0;
|
||||
|
||||
/* number of active streams */
|
||||
for (i = 0; i < mst_mgr->max_payloads; i++) {
|
||||
if (mst_mgr->payloads[i].num_slots == 0)
|
||||
break; /* end of vcp_id table */
|
||||
|
||||
ASSERT(mst_mgr->payloads[i].payload_state !=
|
||||
DP_PAYLOAD_DELETE_LOCAL);
|
||||
|
||||
if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
|
||||
mst_mgr->payloads[i].payload_state ==
|
||||
DP_PAYLOAD_REMOTE) {
|
||||
|
||||
struct dp_mst_stream_allocation *sa =
|
||||
&proposed_table->stream_allocations[
|
||||
proposed_table->stream_count];
|
||||
|
||||
sa->slot_count = mst_mgr->payloads[i].num_slots;
|
||||
sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
|
||||
proposed_table->stream_count++;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&mst_mgr->payload_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Writes payload allocation table in immediate downstream device.
|
||||
*/
|
||||
bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream *stream,
|
||||
struct dp_mst_stream_allocation_table *proposed_table,
|
||||
bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_port *mst_port;
|
||||
int slots = 0;
|
||||
bool ret;
|
||||
int clock;
|
||||
int bpp = 0;
|
||||
int pbn = 0;
|
||||
|
||||
aconnector = get_connector_for_sink(dev, stream->sink);
|
||||
|
||||
if (!aconnector || !aconnector->mst_port)
|
||||
return false;
|
||||
|
||||
mst_mgr = &aconnector->mst_port->mst_mgr;
|
||||
|
||||
if (!mst_mgr->mst_state)
|
||||
return false;
|
||||
|
||||
mst_port = aconnector->port;
|
||||
|
||||
if (enable) {
|
||||
clock = stream->timing.pix_clk_khz;
|
||||
|
||||
switch (stream->timing.display_color_depth) {
|
||||
|
||||
case COLOR_DEPTH_666:
|
||||
bpp = 6;
|
||||
break;
|
||||
case COLOR_DEPTH_888:
|
||||
bpp = 8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
bpp = 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
bpp = 12;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
bpp = 14;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
bpp = 16;
|
||||
break;
|
||||
default:
|
||||
ASSERT(bpp != 0);
|
||||
break;
|
||||
}
|
||||
|
||||
bpp = bpp * 3;
|
||||
|
||||
/* TODO need to know link rate */
|
||||
|
||||
pbn = drm_dp_calc_pbn_mode(clock, bpp);
|
||||
|
||||
slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
|
||||
ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
|
||||
|
||||
if (!ret)
|
||||
return false;
|
||||
|
||||
} else {
|
||||
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
|
||||
}
|
||||
|
||||
ret = drm_dp_update_payload_part1(mst_mgr);
|
||||
|
||||
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
|
||||
* AUX message. The sequence is slot 1-63 allocated sequence for each
|
||||
* stream. AMD ASIC stream slot allocation should follow the same
|
||||
* sequence. copy DRM MST allocation to dc */
|
||||
|
||||
get_payload_table(aconnector, proposed_table);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Polls for ACT (allocation change trigger) handled and sends
|
||||
* ALLOCATE_PAYLOAD message.
|
||||
*/
|
||||
bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream *stream)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
int ret;
|
||||
|
||||
aconnector = get_connector_for_sink(dev, stream->sink);
|
||||
|
||||
if (!aconnector || !aconnector->mst_port)
|
||||
return false;
|
||||
|
||||
mst_mgr = &aconnector->mst_port->mst_mgr;
|
||||
|
||||
if (!mst_mgr->mst_state)
|
||||
return false;
|
||||
|
||||
ret = drm_dp_check_act_status(mst_mgr);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream *stream,
|
||||
bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_port *mst_port;
|
||||
int ret;
|
||||
|
||||
aconnector = get_connector_for_sink(dev, stream->sink);
|
||||
|
||||
if (!aconnector || !aconnector->mst_port)
|
||||
return false;
|
||||
|
||||
mst_port = aconnector->port;
|
||||
|
||||
mst_mgr = &aconnector->mst_port->mst_mgr;
|
||||
|
||||
if (!mst_mgr->mst_state)
|
||||
return false;
|
||||
|
||||
ret = drm_dp_update_payload_part2(mst_mgr);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
if (!enable)
|
||||
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_mst_start_top_mgr(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
bool boot)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (boot) {
|
||||
DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
|
||||
aconnector, aconnector->base.base.id);
|
||||
return true;
|
||||
}
|
||||
|
||||
DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
|
||||
aconnector, aconnector->base.base.id);
|
||||
|
||||
return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
|
||||
}
|
||||
|
||||
void dm_helpers_dp_mst_stop_top_mgr(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
|
||||
aconnector, aconnector->base.base.id);
|
||||
|
||||
if (aconnector->mst_mgr.mst_state == true)
|
||||
drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_read_dpcd(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
|
||||
data, size) > 0;
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_write_dpcd(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
|
||||
address, (uint8_t *)data, size) > 0;
|
||||
}
|
||||
|
||||
bool dm_helpers_submit_i2c(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
struct i2c_command *cmd)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
|
||||
struct i2c_msg *msgs;
|
||||
int i = 0;
|
||||
int num = cmd->number_of_payloads;
|
||||
bool result;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
|
||||
|
||||
if (!msgs)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
msgs[i].flags = cmd->payloads[i].write ? I2C_M_RD : 0;
|
||||
msgs[i].addr = cmd->payloads[i].address;
|
||||
msgs[i].len = cmd->payloads[i].length;
|
||||
msgs[i].buf = cmd->payloads[i].data;
|
||||
}
|
||||
|
||||
result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
|
||||
|
||||
kfree(msgs);
|
||||
|
||||
return result;
|
||||
}
|
||||
829
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
Normal file
829
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
Normal file
@@ -0,0 +1,829 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "dm_services_types.h"
|
||||
#include "dc.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
|
||||
/******************************************************************************
|
||||
* Private declarations.
|
||||
*****************************************************************************/
|
||||
|
||||
struct handler_common_data {
|
||||
struct list_head list;
|
||||
interrupt_handler handler;
|
||||
void *handler_arg;
|
||||
|
||||
/* DM which this handler belongs to */
|
||||
struct amdgpu_display_manager *dm;
|
||||
};
|
||||
|
||||
struct amdgpu_dm_irq_handler_data {
|
||||
struct handler_common_data hcd;
|
||||
/* DAL irq source which registered for this interrupt. */
|
||||
enum dc_irq_source irq_source;
|
||||
};
|
||||
|
||||
struct amdgpu_dm_timer_handler_data {
|
||||
struct handler_common_data hcd;
|
||||
struct delayed_work d_work;
|
||||
};
|
||||
|
||||
#define DM_IRQ_TABLE_LOCK(adev, flags) \
|
||||
spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
|
||||
|
||||
#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
|
||||
spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
|
||||
|
||||
/******************************************************************************
|
||||
* Private functions.
|
||||
*****************************************************************************/
|
||||
|
||||
static void init_handler_common_data(
|
||||
struct handler_common_data *hcd,
|
||||
void (*ih)(void *),
|
||||
void *args,
|
||||
struct amdgpu_display_manager *dm)
|
||||
{
|
||||
hcd->handler = ih;
|
||||
hcd->handler_arg = args;
|
||||
hcd->dm = dm;
|
||||
}
|
||||
|
||||
/**
|
||||
* dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
|
||||
*
|
||||
* @work: work struct
|
||||
*/
|
||||
static void dm_irq_work_func(struct work_struct *work)
|
||||
{
|
||||
struct list_head *entry;
|
||||
struct irq_list_head *irq_list_head =
|
||||
container_of(work, struct irq_list_head, work);
|
||||
struct list_head *handler_list = &irq_list_head->head;
|
||||
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||
|
||||
list_for_each(entry, handler_list) {
|
||||
handler_data =
|
||||
list_entry(
|
||||
entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
|
||||
handler_data->irq_source);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
|
||||
handler_data->irq_source);
|
||||
|
||||
handler_data->hcd.handler(handler_data->hcd.handler_arg);
|
||||
}
|
||||
|
||||
/* Call a DAL subcomponent which registered for interrupt notification
|
||||
* at INTERRUPT_LOW_IRQ_CONTEXT.
|
||||
* (The most common use is HPD interrupt) */
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a handler and return a pointer to hander list from which the
|
||||
* handler was removed.
|
||||
*/
|
||||
static struct list_head *remove_irq_handler(
|
||||
struct amdgpu_device *adev,
|
||||
void *ih,
|
||||
const struct dc_interrupt_params *int_params)
|
||||
{
|
||||
struct list_head *hnd_list;
|
||||
struct list_head *entry, *tmp;
|
||||
struct amdgpu_dm_irq_handler_data *handler;
|
||||
unsigned long irq_table_flags;
|
||||
bool handler_removed = false;
|
||||
enum dc_irq_source irq_source;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
irq_source = int_params->irq_source;
|
||||
|
||||
switch (int_params->int_context) {
|
||||
case INTERRUPT_HIGH_IRQ_CONTEXT:
|
||||
hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
|
||||
break;
|
||||
case INTERRUPT_LOW_IRQ_CONTEXT:
|
||||
default:
|
||||
hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
|
||||
break;
|
||||
}
|
||||
|
||||
list_for_each_safe(entry, tmp, hnd_list) {
|
||||
|
||||
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
|
||||
if (ih == handler) {
|
||||
/* Found our handler. Remove it from the list. */
|
||||
list_del(&handler->hcd.list);
|
||||
handler_removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
if (handler_removed == false) {
|
||||
/* Not necessarily an error - caller may not
|
||||
* know the context. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kfree(handler);
|
||||
|
||||
DRM_DEBUG_KMS(
|
||||
"DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
|
||||
ih, int_params->irq_source, int_params->int_context);
|
||||
|
||||
return hnd_list;
|
||||
}
|
||||
|
||||
/* If 'handler_in == NULL' then remove ALL handlers. */
|
||||
static void remove_timer_handler(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_dm_timer_handler_data *handler_in)
|
||||
{
|
||||
struct amdgpu_dm_timer_handler_data *handler_temp;
|
||||
struct list_head *handler_list;
|
||||
struct list_head *entry, *tmp;
|
||||
unsigned long irq_table_flags;
|
||||
bool handler_removed = false;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
handler_list = &adev->dm.timer_handler_list;
|
||||
|
||||
list_for_each_safe(entry, tmp, handler_list) {
|
||||
/* Note that list_for_each_safe() guarantees that
|
||||
* handler_temp is NOT null. */
|
||||
handler_temp = list_entry(entry,
|
||||
struct amdgpu_dm_timer_handler_data, hcd.list);
|
||||
|
||||
if (handler_in == NULL || handler_in == handler_temp) {
|
||||
list_del(&handler_temp->hcd.list);
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
|
||||
handler_temp);
|
||||
|
||||
if (handler_in == NULL) {
|
||||
/* Since it is still in the queue, it must
|
||||
* be cancelled. */
|
||||
cancel_delayed_work_sync(&handler_temp->d_work);
|
||||
}
|
||||
|
||||
kfree(handler_temp);
|
||||
handler_removed = true;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
}
|
||||
|
||||
if (handler_in == NULL) {
|
||||
/* Remove ALL handlers. */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (handler_in == handler_temp) {
|
||||
/* Remove a SPECIFIC handler.
|
||||
* Found our handler - we can stop here. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
if (handler_in != NULL && handler_removed == false) {
|
||||
DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
|
||||
handler_in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dm_timer_work_func - Handle a timer.
|
||||
*
|
||||
* @work: work struct
|
||||
*/
|
||||
static void dm_timer_work_func(
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_timer_handler_data *handler_data =
|
||||
container_of(work, struct amdgpu_dm_timer_handler_data,
|
||||
d_work.work);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data);
|
||||
|
||||
/* Call a DAL subcomponent which registered for timer notification. */
|
||||
handler_data->hcd.handler(handler_data->hcd.handler_arg);
|
||||
|
||||
/* We support only "single shot" timers. That means we must delete
|
||||
* the handler after it was called. */
|
||||
remove_timer_handler(handler_data->hcd.dm->adev, handler_data);
|
||||
}
|
||||
|
||||
static bool validate_irq_registration_params(
|
||||
struct dc_interrupt_params *int_params,
|
||||
void (*ih)(void *))
|
||||
{
|
||||
if (NULL == int_params || NULL == ih) {
|
||||
DRM_ERROR("DM_IRQ: invalid input!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
|
||||
DRM_ERROR("DM_IRQ: invalid context: %d!\n",
|
||||
int_params->int_context);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
|
||||
DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
|
||||
int_params->irq_source);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool validate_irq_unregistration_params(
|
||||
enum dc_irq_source irq_source,
|
||||
irq_handler_idx handler_idx)
|
||||
{
|
||||
if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
|
||||
DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
|
||||
DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
/******************************************************************************
|
||||
* Public functions.
|
||||
*
|
||||
* Note: caller is responsible for input validation.
|
||||
*****************************************************************************/
|
||||
|
||||
void *amdgpu_dm_irq_register_interrupt(
|
||||
struct amdgpu_device *adev,
|
||||
struct dc_interrupt_params *int_params,
|
||||
void (*ih)(void *),
|
||||
void *handler_args)
|
||||
{
|
||||
struct list_head *hnd_list;
|
||||
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||
unsigned long irq_table_flags;
|
||||
enum dc_irq_source irq_source;
|
||||
|
||||
if (false == validate_irq_registration_params(int_params, ih))
|
||||
return DAL_INVALID_IRQ_HANDLER_IDX;
|
||||
|
||||
handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
|
||||
if (!handler_data) {
|
||||
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
|
||||
return DAL_INVALID_IRQ_HANDLER_IDX;
|
||||
}
|
||||
|
||||
memset(handler_data, 0, sizeof(*handler_data));
|
||||
|
||||
init_handler_common_data(&handler_data->hcd, ih, handler_args,
|
||||
&adev->dm);
|
||||
|
||||
irq_source = int_params->irq_source;
|
||||
|
||||
handler_data->irq_source = irq_source;
|
||||
|
||||
/* Lock the list, add the handler. */
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
switch (int_params->int_context) {
|
||||
case INTERRUPT_HIGH_IRQ_CONTEXT:
|
||||
hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
|
||||
break;
|
||||
case INTERRUPT_LOW_IRQ_CONTEXT:
|
||||
default:
|
||||
hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
|
||||
break;
|
||||
}
|
||||
|
||||
list_add_tail(&handler_data->hcd.list, hnd_list);
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
/* This pointer will be stored by code which requested interrupt
|
||||
* registration.
|
||||
* The same pointer will be needed in order to unregister the
|
||||
* interrupt. */
|
||||
|
||||
DRM_DEBUG_KMS(
|
||||
"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
|
||||
handler_data,
|
||||
irq_source,
|
||||
int_params->int_context);
|
||||
|
||||
return handler_data;
|
||||
}
|
||||
|
||||
void amdgpu_dm_irq_unregister_interrupt(
|
||||
struct amdgpu_device *adev,
|
||||
enum dc_irq_source irq_source,
|
||||
void *ih)
|
||||
{
|
||||
struct list_head *handler_list;
|
||||
struct dc_interrupt_params int_params;
|
||||
int i;
|
||||
|
||||
if (false == validate_irq_unregistration_params(irq_source, ih))
|
||||
return;
|
||||
|
||||
memset(&int_params, 0, sizeof(int_params));
|
||||
|
||||
int_params.irq_source = irq_source;
|
||||
|
||||
for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
|
||||
|
||||
int_params.int_context = i;
|
||||
|
||||
handler_list = remove_irq_handler(adev, ih, &int_params);
|
||||
|
||||
if (handler_list != NULL)
|
||||
break;
|
||||
}
|
||||
|
||||
if (handler_list == NULL) {
|
||||
/* If we got here, it means we searched all irq contexts
|
||||
* for this irq source, but the handler was not found. */
|
||||
DRM_ERROR(
|
||||
"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
|
||||
ih, irq_source);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_dm_irq_init(
|
||||
struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct irq_list_head *lh;
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ\n");
|
||||
|
||||
spin_lock_init(&adev->dm.irq_handler_list_table_lock);
|
||||
|
||||
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
||||
/* low context handler list init */
|
||||
lh = &adev->dm.irq_handler_list_low_tab[src];
|
||||
INIT_LIST_HEAD(&lh->head);
|
||||
INIT_WORK(&lh->work, dm_irq_work_func);
|
||||
|
||||
/* high context handler init */
|
||||
INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&adev->dm.timer_handler_list);
|
||||
|
||||
/* allocate and initialize the workqueue for DM timer */
|
||||
adev->dm.timer_workqueue = create_singlethread_workqueue(
|
||||
"dm_timer_queue");
|
||||
if (adev->dm.timer_workqueue == NULL) {
|
||||
DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_dm_irq_register_timer(
|
||||
struct amdgpu_device *adev,
|
||||
struct dc_timer_interrupt_params *int_params,
|
||||
interrupt_handler ih,
|
||||
void *args)
|
||||
{
|
||||
unsigned long jf_delay;
|
||||
struct list_head *handler_list;
|
||||
struct amdgpu_dm_timer_handler_data *handler_data;
|
||||
unsigned long irq_table_flags;
|
||||
|
||||
handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
|
||||
if (!handler_data) {
|
||||
DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
memset(handler_data, 0, sizeof(*handler_data));
|
||||
|
||||
init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm);
|
||||
|
||||
INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func);
|
||||
|
||||
/* Lock the list, add the handler. */
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
handler_list = &adev->dm.timer_handler_list;
|
||||
|
||||
list_add_tail(&handler_data->hcd.list, handler_list);
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
jf_delay = usecs_to_jiffies(int_params->micro_sec_interval);
|
||||
|
||||
queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work,
|
||||
jf_delay);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%u\n",
|
||||
handler_data, int_params->micro_sec_interval);
|
||||
return;
|
||||
}
|
||||
|
||||
/* DM IRQ and timer resource release */
|
||||
void amdgpu_dm_irq_fini(
|
||||
struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct irq_list_head *lh;
|
||||
DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
|
||||
|
||||
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
||||
|
||||
/* The handler was removed from the table,
|
||||
* it means it is safe to flush all the 'work'
|
||||
* (because no code can schedule a new one). */
|
||||
lh = &adev->dm.irq_handler_list_low_tab[src];
|
||||
flush_work(&lh->work);
|
||||
}
|
||||
|
||||
/* Cancel ALL timers and release handlers (if any). */
|
||||
remove_timer_handler(adev, NULL);
|
||||
/* Release the queue itself. */
|
||||
destroy_workqueue(adev->dm.timer_workqueue);
|
||||
}
|
||||
|
||||
int amdgpu_dm_irq_suspend(
|
||||
struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct list_head *hnd_list_h;
|
||||
struct list_head *hnd_list_l;
|
||||
unsigned long irq_table_flags;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: suspend\n");
|
||||
|
||||
/* disable HW interrupt */
|
||||
for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
||||
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
||||
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
||||
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
||||
dc_interrupt_set(adev->dm.dc, src, false);
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct list_head *hnd_list_h, *hnd_list_l;
|
||||
unsigned long irq_table_flags;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: early resume\n");
|
||||
|
||||
/* re-enable short pulse interrupts HW interrupt */
|
||||
for (src = DC_IRQ_SOURCE_HPD1RX; src < DC_IRQ_SOURCE_HPD6RX + 1; src++) {
|
||||
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
||||
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
||||
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
||||
dc_interrupt_set(adev->dm.dc, src, true);
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_dm_irq_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct list_head *hnd_list_h, *hnd_list_l;
|
||||
unsigned long irq_table_flags;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: resume\n");
|
||||
|
||||
/* re-enable HW interrupt */
|
||||
for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
||||
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
||||
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
||||
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
||||
dc_interrupt_set(adev->dm.dc, src, true);
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_schedule_work - schedule all work items registered for the
|
||||
* "irq_source".
|
||||
*/
|
||||
static void amdgpu_dm_irq_schedule_work(
|
||||
struct amdgpu_device *adev,
|
||||
enum dc_irq_source irq_source)
|
||||
{
|
||||
unsigned long irq_table_flags;
|
||||
struct work_struct *work = NULL;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
|
||||
work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
if (work) {
|
||||
if (!schedule_work(work))
|
||||
DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
|
||||
irq_source);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** amdgpu_dm_irq_immediate_work
|
||||
* Callback high irq work immediately, don't send to work queue
|
||||
*/
|
||||
static void amdgpu_dm_irq_immediate_work(
|
||||
struct amdgpu_device *adev,
|
||||
enum dc_irq_source irq_source)
|
||||
{
|
||||
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||
struct list_head *entry;
|
||||
unsigned long irq_table_flags;
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
|
||||
list_for_each(
|
||||
entry,
|
||||
&adev->dm.irq_handler_list_high_tab[irq_source]) {
|
||||
|
||||
handler_data =
|
||||
list_entry(
|
||||
entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
|
||||
/* Call a subcomponent which registered for immediate
|
||||
* interrupt notification */
|
||||
handler_data->hcd.handler(handler_data->hcd.handler_arg);
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_irq_handler
|
||||
*
|
||||
* Generic IRQ handler, calls all registered high irq work immediately, and
|
||||
* schedules work for low irq
|
||||
*/
|
||||
int amdgpu_dm_irq_handler(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
|
||||
enum dc_irq_source src =
|
||||
dc_interrupt_to_irq_source(
|
||||
adev->dm.dc,
|
||||
entry->src_id,
|
||||
entry->src_data[0]);
|
||||
|
||||
dc_interrupt_ack(adev->dm.dc, src);
|
||||
|
||||
/* Call high irq work immediately */
|
||||
amdgpu_dm_irq_immediate_work(adev, src);
|
||||
/*Schedule low_irq work */
|
||||
amdgpu_dm_irq_schedule_work(adev, src);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
|
||||
{
|
||||
switch (type) {
|
||||
case AMDGPU_HPD_1:
|
||||
return DC_IRQ_SOURCE_HPD1;
|
||||
case AMDGPU_HPD_2:
|
||||
return DC_IRQ_SOURCE_HPD2;
|
||||
case AMDGPU_HPD_3:
|
||||
return DC_IRQ_SOURCE_HPD3;
|
||||
case AMDGPU_HPD_4:
|
||||
return DC_IRQ_SOURCE_HPD4;
|
||||
case AMDGPU_HPD_5:
|
||||
return DC_IRQ_SOURCE_HPD5;
|
||||
case AMDGPU_HPD_6:
|
||||
return DC_IRQ_SOURCE_HPD6;
|
||||
default:
|
||||
return DC_IRQ_SOURCE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
|
||||
bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
|
||||
|
||||
dc_interrupt_set(adev->dm.dc, src, st);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dm_irq_state(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned crtc_id,
|
||||
enum amdgpu_interrupt_state state,
|
||||
const enum irq_type dal_irq_type,
|
||||
const char *func)
|
||||
{
|
||||
bool st;
|
||||
enum dc_irq_source irq_source;
|
||||
|
||||
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
if (!acrtc) {
|
||||
DRM_ERROR(
|
||||
"%s: crtc is NULL at id :%d\n",
|
||||
func,
|
||||
crtc_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
irq_source = dal_irq_type + acrtc->otg_inst;
|
||||
|
||||
st = (state == AMDGPU_IRQ_STATE_ENABLE);
|
||||
|
||||
dc_interrupt_set(adev->dm.dc, irq_source, st);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned crtc_id,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return dm_irq_state(
|
||||
adev,
|
||||
source,
|
||||
crtc_id,
|
||||
state,
|
||||
IRQ_TYPE_PFLIP,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned crtc_id,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return dm_irq_state(
|
||||
adev,
|
||||
source,
|
||||
crtc_id,
|
||||
state,
|
||||
IRQ_TYPE_VUPDATE,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
|
||||
.set = amdgpu_dm_set_crtc_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
|
||||
.set = amdgpu_dm_set_pflip_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
|
||||
.set = amdgpu_dm_set_hpd_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||
|
||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
||||
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
|
||||
|
||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
||||
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_hpd_init - hpd setup callback.
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Setup the hpd pins used by the card (evergreen+).
|
||||
* Enable the pin, set the polarity, and enable the hpd interrupts.
|
||||
*/
|
||||
void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector =
|
||||
to_amdgpu_connector(connector);
|
||||
|
||||
const struct dc_link *dc_link = amdgpu_connector->dc_link;
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd,
|
||||
true);
|
||||
}
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd_rx,
|
||||
true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_hpd_fini - hpd tear down callback.
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tear down the hpd pins used by the card (evergreen+).
|
||||
* Disable the hpd interrupts.
|
||||
*/
|
||||
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector =
|
||||
to_amdgpu_connector(connector);
|
||||
const struct dc_link *dc_link = amdgpu_connector->dc_link;
|
||||
|
||||
dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd_rx,
|
||||
false);
|
||||
}
|
||||
}
|
||||
}
|
||||
122
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
Normal file
122
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_IRQ_H__
|
||||
#define __AMDGPU_DM_IRQ_H__
|
||||
|
||||
#include "irq_types.h" /* DAL irq definitions */
|
||||
|
||||
/*
|
||||
* Display Manager IRQ-related interfaces (for use by DAL).
|
||||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
|
||||
*
|
||||
* This function should be called exactly once - during DM initialization.
|
||||
*
|
||||
* Returns:
|
||||
* 0 - success
|
||||
* non-zero - error
|
||||
*/
|
||||
int amdgpu_dm_irq_init(
|
||||
struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
|
||||
*
|
||||
* This function should be called exactly once - during DM destruction.
|
||||
*
|
||||
*/
|
||||
void amdgpu_dm_irq_fini(
|
||||
struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
|
||||
*
|
||||
* @adev: AMD DRM device
|
||||
* @int_params: parameters for the irq
|
||||
* @ih: pointer to the irq hander function
|
||||
* @handler_args: arguments which will be passed to ih
|
||||
*
|
||||
* Returns:
|
||||
* IRQ Handler Index on success.
|
||||
* NULL on failure.
|
||||
*
|
||||
* Cannot be called from an interrupt handler.
|
||||
*/
|
||||
void *amdgpu_dm_irq_register_interrupt(
|
||||
struct amdgpu_device *adev,
|
||||
struct dc_interrupt_params *int_params,
|
||||
void (*ih)(void *),
|
||||
void *handler_args);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
|
||||
* by amdgpu_dm_irq_register_interrupt().
|
||||
*
|
||||
* @adev: AMD DRM device.
|
||||
* @ih_index: irq handler index which was returned by
|
||||
* amdgpu_dm_irq_register_interrupt
|
||||
*/
|
||||
void amdgpu_dm_irq_unregister_interrupt(
|
||||
struct amdgpu_device *adev,
|
||||
enum dc_irq_source irq_source,
|
||||
void *ih_index);
|
||||
|
||||
void amdgpu_dm_irq_register_timer(
|
||||
struct amdgpu_device *adev,
|
||||
struct dc_timer_interrupt_params *int_params,
|
||||
interrupt_handler ih,
|
||||
void *args);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_handler
|
||||
* Generic IRQ handler, calls all registered high irq work immediately, and
|
||||
* schedules work for low irq
|
||||
*/
|
||||
int amdgpu_dm_irq_handler(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
||||
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
|
||||
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
|
||||
*
|
||||
*/
|
||||
int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
|
||||
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
|
||||
*
|
||||
*/
|
||||
int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
|
||||
int amdgpu_dm_irq_resume(struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __AMDGPU_DM_IRQ_H__ */
|
||||
443
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
Normal file
443
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
Normal file
@@ -0,0 +1,443 @@
|
||||
/*
|
||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm_types.h"
|
||||
#include "amdgpu_dm_mst_types.h"
|
||||
|
||||
#include "dc.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/* #define TRACE_DPCD */
|
||||
|
||||
#ifdef TRACE_DPCD
|
||||
#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
|
||||
|
||||
static inline char *side_band_msg_type_to_str(uint32_t address)
|
||||
{
|
||||
static char str[10] = {0};
|
||||
|
||||
if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
|
||||
strcpy(str, "DOWN_REQ");
|
||||
else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
|
||||
strcpy(str, "UP_REP");
|
||||
else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
|
||||
strcpy(str, "DOWN_REP");
|
||||
else
|
||||
strcpy(str, "UP_REQ");
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
void log_dpcd(uint8_t type,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size,
|
||||
bool res)
|
||||
{
|
||||
DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
|
||||
(type == DP_AUX_NATIVE_READ) ||
|
||||
(type == DP_AUX_I2C_READ) ?
|
||||
"Read" : "Write",
|
||||
address,
|
||||
SIDE_BAND_MSG(address) ?
|
||||
side_band_msg_type_to_str(address) : "Nop",
|
||||
res ? "OK" : "Fail");
|
||||
|
||||
if (res) {
|
||||
print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(aux->dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
||||
struct dc *dc = adev->dm.dc;
|
||||
bool res;
|
||||
|
||||
switch (msg->request) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
res = dc_read_dpcd(
|
||||
dc,
|
||||
TO_DM_AUX(aux)->link_index,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
break;
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
res = dc_write_dpcd(
|
||||
dc,
|
||||
TO_DM_AUX(aux)->link_index,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef TRACE_DPCD
|
||||
log_dpcd(msg->request,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
res);
|
||||
#endif
|
||||
|
||||
return msg->size;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
dm_dp_mst_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector *master = aconnector->mst_port;
|
||||
|
||||
enum drm_connector_status status =
|
||||
drm_dp_mst_detect_port(
|
||||
connector,
|
||||
&master->mst_mgr,
|
||||
aconnector->port);
|
||||
|
||||
/*
|
||||
* we do not want to make this connector connected until we have edid on
|
||||
* it
|
||||
*/
|
||||
if (status == connector_status_connected &&
|
||||
!aconnector->port->cached_edid)
|
||||
status = connector_status_disconnected;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void
|
||||
dm_dp_mst_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder;
|
||||
|
||||
drm_encoder_cleanup(&amdgpu_encoder->base);
|
||||
kfree(amdgpu_encoder);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(amdgpu_connector);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
.detect = dm_dp_mst_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = dm_dp_mst_connector_destroy,
|
||||
.reset = amdgpu_dm_connector_funcs_reset,
|
||||
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_set_property = amdgpu_dm_connector_atomic_set_property
|
||||
};
|
||||
|
||||
static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
|
||||
int ret = 0;
|
||||
|
||||
ret = drm_add_edid_modes(&aconnector->base, aconnector->edid);
|
||||
|
||||
drm_edid_to_eld(&aconnector->base, aconnector->edid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
return &amdgpu_connector->mst_encoder->base;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
|
||||
.get_modes = dm_dp_mst_get_modes,
|
||||
.mode_valid = amdgpu_dm_connector_mode_valid,
|
||||
.best_encoder = dm_mst_best_encoder,
|
||||
};
|
||||
|
||||
static struct amdgpu_encoder *
|
||||
dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
const struct drm_connector_helper_funcs *connector_funcs =
|
||||
connector->base.helper_private;
|
||||
struct drm_encoder *enc_master =
|
||||
connector_funcs->best_encoder(&connector->base);
|
||||
|
||||
DRM_DEBUG_KMS("enc master is %p\n", enc_master);
|
||||
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
|
||||
if (!amdgpu_encoder)
|
||||
return NULL;
|
||||
|
||||
encoder = &amdgpu_encoder->base;
|
||||
encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
|
||||
|
||||
drm_encoder_init(
|
||||
dev,
|
||||
&amdgpu_encoder->base,
|
||||
NULL,
|
||||
DRM_MODE_ENCODER_DPMST,
|
||||
NULL);
|
||||
|
||||
drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
|
||||
|
||||
return amdgpu_encoder;
|
||||
}
|
||||
|
||||
static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port,
|
||||
const char *pathprop)
|
||||
{
|
||||
struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
|
||||
struct drm_device *dev = master->base.dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_connector *aconnector;
|
||||
struct drm_connector *connector;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
aconnector = to_amdgpu_connector(connector);
|
||||
if (aconnector->mst_port == master
|
||||
&& !aconnector->port) {
|
||||
DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
|
||||
aconnector, connector->base.id, aconnector->mst_port);
|
||||
|
||||
aconnector->port = port;
|
||||
drm_mode_connector_set_path_property(connector, pathprop);
|
||||
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
return &aconnector->base;
|
||||
}
|
||||
}
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
|
||||
if (!aconnector)
|
||||
return NULL;
|
||||
|
||||
connector = &aconnector->base;
|
||||
aconnector->port = port;
|
||||
aconnector->mst_port = master;
|
||||
|
||||
if (drm_connector_init(
|
||||
dev,
|
||||
connector,
|
||||
&dm_dp_mst_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort)) {
|
||||
kfree(aconnector);
|
||||
return NULL;
|
||||
}
|
||||
drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
|
||||
|
||||
amdgpu_dm_connector_init_helper(
|
||||
&adev->dm,
|
||||
aconnector,
|
||||
DRM_MODE_CONNECTOR_DisplayPort,
|
||||
master->dc_link,
|
||||
master->connector_id);
|
||||
|
||||
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
|
||||
|
||||
/*
|
||||
* TODO: understand why this one is needed
|
||||
*/
|
||||
drm_object_attach_property(
|
||||
&connector->base,
|
||||
dev->mode_config.path_property,
|
||||
0);
|
||||
drm_object_attach_property(
|
||||
&connector->base,
|
||||
dev->mode_config.tile_property,
|
||||
0);
|
||||
|
||||
drm_mode_connector_set_path_property(connector, pathprop);
|
||||
|
||||
/*
|
||||
* Initialize connector state before adding the connectror to drm and
|
||||
* framebuffer lists
|
||||
*/
|
||||
amdgpu_dm_connector_funcs_reset(connector);
|
||||
|
||||
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
|
||||
aconnector, connector->base.id, aconnector->mst_port);
|
||||
|
||||
DRM_DEBUG_KMS(":%d\n", connector->base.id);
|
||||
|
||||
return connector;
|
||||
}
|
||||
|
||||
static void dm_dp_destroy_mst_connector(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
|
||||
|
||||
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
|
||||
aconnector, connector->base.id, aconnector->mst_port);
|
||||
|
||||
aconnector->port = NULL;
|
||||
if (aconnector->dc_sink) {
|
||||
amdgpu_dm_remove_sink_from_freesync_module(connector);
|
||||
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
}
|
||||
if (aconnector->edid) {
|
||||
kfree(aconnector->edid);
|
||||
aconnector->edid = NULL;
|
||||
}
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
|
||||
struct drm_device *dev = master->base.dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *aconnector;
|
||||
struct edid *edid;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
aconnector = to_amdgpu_connector(connector);
|
||||
if (aconnector->port &&
|
||||
aconnector->port->pdt != DP_PEER_DEVICE_NONE &&
|
||||
aconnector->port->pdt != DP_PEER_DEVICE_MST_BRANCHING &&
|
||||
!aconnector->dc_sink) {
|
||||
/*
|
||||
* This is plug in case, where port has been created but
|
||||
* sink hasn't been created yet
|
||||
*/
|
||||
if (!aconnector->edid) {
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST};
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
aconnector->edid = edid;
|
||||
|
||||
aconnector->dc_sink = dc_link_add_remote_sink(
|
||||
aconnector->dc_link,
|
||||
(uint8_t *)edid,
|
||||
(edid->extensions + 1) * EDID_LENGTH,
|
||||
&init_params);
|
||||
if (aconnector->dc_sink)
|
||||
amdgpu_dm_add_sink_to_freesync_module(
|
||||
connector,
|
||||
edid);
|
||||
|
||||
dm_restore_drm_connector_state(connector->dev, connector);
|
||||
} else
|
||||
edid = aconnector->edid;
|
||||
|
||||
DRM_DEBUG_KMS("edid retrieved %p\n", edid);
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
aconnector->edid);
|
||||
}
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
schedule_work(&adev->dm.mst_hotplug_work);
|
||||
}
|
||||
|
||||
static void dm_dp_mst_register_connector(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int i;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
if (adev->mode_info.rfbdev) {
|
||||
/*Do not add if already registered in past*/
|
||||
for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) {
|
||||
if (adev->mode_info.rfbdev->helper.connector_info[i]->connector
|
||||
== connector) {
|
||||
drm_modeset_unlock_all(dev);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
|
||||
}
|
||||
else
|
||||
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
|
||||
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_connector_register(connector);
|
||||
|
||||
}
|
||||
|
||||
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
|
||||
.add_connector = dm_dp_add_mst_connector,
|
||||
.destroy_connector = dm_dp_destroy_mst_connector,
|
||||
.hotplug = dm_dp_mst_hotplug,
|
||||
.register_connector = dm_dp_mst_register_connector
|
||||
};
|
||||
|
||||
void amdgpu_dm_initialize_mst_connector(
|
||||
struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_connector *aconnector)
|
||||
{
|
||||
aconnector->dm_dp_aux.aux.name = "dmdc";
|
||||
aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
|
||||
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
|
||||
aconnector->dm_dp_aux.link_index = aconnector->connector_id;
|
||||
|
||||
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
|
||||
aconnector->mst_mgr.cbs = &dm_mst_cbs;
|
||||
drm_dp_mst_topology_mgr_init(
|
||||
&aconnector->mst_mgr,
|
||||
dm->adev->ddev,
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
16,
|
||||
4,
|
||||
aconnector->connector_id);
|
||||
}
|
||||
|
||||
36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
Normal file
36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
|
||||
#define __DAL_AMDGPU_DM_MST_TYPES_H__
|
||||
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_connector;
|
||||
|
||||
void amdgpu_dm_initialize_mst_connector(
|
||||
struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_connector *aconnector);
|
||||
|
||||
#endif
|
||||
463
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
Normal file
463
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
Normal file
@@ -0,0 +1,463 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
#include "amdgpu_dm_types.h"
|
||||
#include "amdgpu_pm.h"
|
||||
|
||||
#define dm_alloc(size) kzalloc(size, GFP_KERNEL)
|
||||
#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL)
|
||||
#define dm_free(ptr) kfree(ptr)
|
||||
|
||||
/******************************************************************************
|
||||
* IRQ Interfaces.
|
||||
*****************************************************************************/
|
||||
|
||||
void dal_register_timer_interrupt(
|
||||
struct dc_context *ctx,
|
||||
struct dc_timer_interrupt_params *int_params,
|
||||
interrupt_handler ih,
|
||||
void *args)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
|
||||
if (!adev || !int_params) {
|
||||
DRM_ERROR("DM_IRQ: invalid input!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
|
||||
/* only low irq ctx is supported. */
|
||||
DRM_ERROR("DM_IRQ: invalid context: %d!\n",
|
||||
int_params->int_context);
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
|
||||
}
|
||||
|
||||
void dal_isr_acquire_lock(struct dc_context *ctx)
|
||||
{
|
||||
/*TODO*/
|
||||
}
|
||||
|
||||
void dal_isr_release_lock(struct dc_context *ctx)
|
||||
{
|
||||
/*TODO*/
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* End-of-IRQ Interfaces.
|
||||
*****************************************************************************/
|
||||
|
||||
bool dm_get_platform_info(struct dc_context *ctx,
|
||||
struct platform_info_params *params)
|
||||
{
|
||||
/*TODO*/
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_write_persistent_data(struct dc_context *ctx,
|
||||
const struct dc_sink *sink,
|
||||
const char *module_name,
|
||||
const char *key_name,
|
||||
void *params,
|
||||
unsigned int size,
|
||||
struct persistent_data_flag *flag)
|
||||
{
|
||||
/*TODO implement*/
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_read_persistent_data(struct dc_context *ctx,
|
||||
const struct dc_sink *sink,
|
||||
const char *module_name,
|
||||
const char *key_name,
|
||||
void *params,
|
||||
unsigned int size,
|
||||
struct persistent_data_flag *flag)
|
||||
{
|
||||
/*TODO implement*/
|
||||
return false;
|
||||
}
|
||||
|
||||
void dm_delay_in_microseconds(struct dc_context *ctx,
|
||||
unsigned int microSeconds)
|
||||
{
|
||||
/*TODO implement*/
|
||||
return;
|
||||
}
|
||||
|
||||
/**** power component interfaces ****/
|
||||
|
||||
bool dm_pp_pre_dce_clock_change(
|
||||
struct dc_context *ctx,
|
||||
struct dm_pp_gpu_clock_range *requested_state,
|
||||
struct dm_pp_gpu_clock_range *actual_state)
|
||||
{
|
||||
/*TODO*/
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_apply_safe_state(
|
||||
const struct dc_context *ctx)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
/* TODO: Does this require PreModeChange event to PPLIB? */
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dm_pp_apply_display_requirements(
|
||||
const struct dc_context *ctx,
|
||||
const struct dm_pp_display_configuration *pp_display_cfg)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
|
||||
memset(&adev->pm.pm_display_cfg, 0,
|
||||
sizeof(adev->pm.pm_display_cfg));
|
||||
|
||||
adev->pm.pm_display_cfg.cpu_cc6_disable =
|
||||
pp_display_cfg->cpu_cc6_disable;
|
||||
|
||||
adev->pm.pm_display_cfg.cpu_pstate_disable =
|
||||
pp_display_cfg->cpu_pstate_disable;
|
||||
|
||||
adev->pm.pm_display_cfg.cpu_pstate_separation_time =
|
||||
pp_display_cfg->cpu_pstate_separation_time;
|
||||
|
||||
adev->pm.pm_display_cfg.nb_pstate_switch_disable =
|
||||
pp_display_cfg->nb_pstate_switch_disable;
|
||||
|
||||
adev->pm.pm_display_cfg.num_display =
|
||||
pp_display_cfg->display_count;
|
||||
adev->pm.pm_display_cfg.num_path_including_non_display =
|
||||
pp_display_cfg->display_count;
|
||||
|
||||
adev->pm.pm_display_cfg.min_core_set_clock =
|
||||
pp_display_cfg->min_engine_clock_khz/10;
|
||||
adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
|
||||
pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
|
||||
adev->pm.pm_display_cfg.min_mem_set_clock =
|
||||
pp_display_cfg->min_memory_clock_khz/10;
|
||||
|
||||
adev->pm.pm_display_cfg.multi_monitor_in_sync =
|
||||
pp_display_cfg->all_displays_in_sync;
|
||||
adev->pm.pm_display_cfg.min_vblank_time =
|
||||
pp_display_cfg->avail_mclk_switch_time_us;
|
||||
|
||||
adev->pm.pm_display_cfg.display_clk =
|
||||
pp_display_cfg->disp_clk_khz/10;
|
||||
|
||||
adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
|
||||
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
|
||||
|
||||
adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
|
||||
adev->pm.pm_display_cfg.line_time_in_us =
|
||||
pp_display_cfg->line_time_in_us;
|
||||
|
||||
adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
|
||||
adev->pm.pm_display_cfg.crossfire_display_index = -1;
|
||||
adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
|
||||
|
||||
/* TODO: complete implementation of
|
||||
* amd_powerplay_display_configuration_change().
|
||||
* Follow example of:
|
||||
* PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
|
||||
* PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
|
||||
amd_powerplay_display_configuration_change(
|
||||
adev->powerplay.pp_handle,
|
||||
&adev->pm.pm_display_cfg);
|
||||
|
||||
/* TODO: replace by a separate call to 'apply display cfg'? */
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_service_get_system_clocks_range(
|
||||
const struct dc_context *ctx,
|
||||
struct dm_pp_gpu_clock_range *sys_clks)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
|
||||
/* Default values, in case PPLib is not compiled-in. */
|
||||
sys_clks->mclk.max_khz = 800000;
|
||||
sys_clks->mclk.min_khz = 800000;
|
||||
|
||||
sys_clks->sclk.max_khz = 600000;
|
||||
sys_clks->sclk.min_khz = 300000;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
|
||||
sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
|
||||
|
||||
sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
|
||||
sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void get_default_clock_levels(
|
||||
enum dm_pp_clock_type clk_type,
|
||||
struct dm_pp_clock_levels *clks)
|
||||
{
|
||||
uint32_t disp_clks_in_khz[6] = {
|
||||
300000, 400000, 496560, 626090, 685720, 757900 };
|
||||
uint32_t sclks_in_khz[6] = {
|
||||
300000, 360000, 423530, 514290, 626090, 720000 };
|
||||
uint32_t mclks_in_khz[2] = { 333000, 800000 };
|
||||
|
||||
switch (clk_type) {
|
||||
case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
|
||||
clks->num_levels = 6;
|
||||
memmove(clks->clocks_in_khz, disp_clks_in_khz,
|
||||
sizeof(disp_clks_in_khz));
|
||||
break;
|
||||
case DM_PP_CLOCK_TYPE_ENGINE_CLK:
|
||||
clks->num_levels = 6;
|
||||
memmove(clks->clocks_in_khz, sclks_in_khz,
|
||||
sizeof(sclks_in_khz));
|
||||
break;
|
||||
case DM_PP_CLOCK_TYPE_MEMORY_CLK:
|
||||
clks->num_levels = 2;
|
||||
memmove(clks->clocks_in_khz, mclks_in_khz,
|
||||
sizeof(mclks_in_khz));
|
||||
break;
|
||||
default:
|
||||
clks->num_levels = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static enum amd_pp_clock_type dc_to_pp_clock_type(
|
||||
enum dm_pp_clock_type dm_pp_clk_type)
|
||||
{
|
||||
enum amd_pp_clock_type amd_pp_clk_type = 0;
|
||||
|
||||
switch (dm_pp_clk_type) {
|
||||
case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
|
||||
amd_pp_clk_type = amd_pp_disp_clock;
|
||||
break;
|
||||
case DM_PP_CLOCK_TYPE_ENGINE_CLK:
|
||||
amd_pp_clk_type = amd_pp_sys_clock;
|
||||
break;
|
||||
case DM_PP_CLOCK_TYPE_MEMORY_CLK:
|
||||
amd_pp_clk_type = amd_pp_mem_clock;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
|
||||
dm_pp_clk_type);
|
||||
break;
|
||||
}
|
||||
|
||||
return amd_pp_clk_type;
|
||||
}
|
||||
|
||||
static void pp_to_dc_clock_levels(
|
||||
const struct amd_pp_clocks *pp_clks,
|
||||
struct dm_pp_clock_levels *dc_clks,
|
||||
enum dm_pp_clock_type dc_clk_type)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
|
||||
DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
|
||||
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
|
||||
pp_clks->count,
|
||||
DM_PP_MAX_CLOCK_LEVELS);
|
||||
|
||||
dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
|
||||
} else
|
||||
dc_clks->num_levels = pp_clks->count;
|
||||
|
||||
DRM_INFO("DM_PPLIB: values for %s clock\n",
|
||||
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
|
||||
|
||||
for (i = 0; i < dc_clks->num_levels; i++) {
|
||||
DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
|
||||
/* translate 10kHz to kHz */
|
||||
dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
|
||||
}
|
||||
}
|
||||
|
||||
bool dm_pp_get_clock_levels_by_type(
|
||||
const struct dc_context *ctx,
|
||||
enum dm_pp_clock_type clk_type,
|
||||
struct dm_pp_clock_levels *dc_clks)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
struct amd_pp_clocks pp_clks = { 0 };
|
||||
struct amd_pp_simple_clock_info validation_clks = { 0 };
|
||||
uint32_t i;
|
||||
|
||||
if (amd_powerplay_get_clock_by_type(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
||||
/* Error in pplib. Provide default values. */
|
||||
get_default_clock_levels(clk_type, dc_clks);
|
||||
return true;
|
||||
}
|
||||
|
||||
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
|
||||
|
||||
if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
|
||||
&validation_clks)) {
|
||||
/* Error in pplib. Provide default values. */
|
||||
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
|
||||
validation_clks.engine_max_clock = 72000;
|
||||
validation_clks.memory_max_clock = 80000;
|
||||
validation_clks.level = 0;
|
||||
}
|
||||
|
||||
DRM_INFO("DM_PPLIB: Validation clocks:\n");
|
||||
DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
|
||||
validation_clks.engine_max_clock);
|
||||
DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
|
||||
validation_clks.memory_max_clock);
|
||||
DRM_INFO("DM_PPLIB: level : %d\n",
|
||||
validation_clks.level);
|
||||
|
||||
/* Translate 10 kHz to kHz. */
|
||||
validation_clks.engine_max_clock *= 10;
|
||||
validation_clks.memory_max_clock *= 10;
|
||||
|
||||
/* Determine the highest non-boosted level from the Validation Clocks */
|
||||
if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
|
||||
for (i = 0; i < dc_clks->num_levels; i++) {
|
||||
if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
|
||||
/* This clock is higher the validation clock.
|
||||
* Than means the previous one is the highest
|
||||
* non-boosted one. */
|
||||
DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
|
||||
dc_clks->num_levels, i + 1);
|
||||
dc_clks->num_levels = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
|
||||
for (i = 0; i < dc_clks->num_levels; i++) {
|
||||
if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
|
||||
DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
|
||||
dc_clks->num_levels, i + 1);
|
||||
dc_clks->num_levels = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dm_pp_get_clock_levels_by_type_with_latency(
|
||||
const struct dc_context *ctx,
|
||||
enum dm_pp_clock_type clk_type,
|
||||
struct dm_pp_clock_levels_with_latency *clk_level_info)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_get_clock_levels_by_type_with_voltage(
|
||||
const struct dc_context *ctx,
|
||||
enum dm_pp_clock_type clk_type,
|
||||
struct dm_pp_clock_levels_with_voltage *clk_level_info)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_notify_wm_clock_changes(
|
||||
const struct dc_context *ctx,
|
||||
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_apply_power_level_change_request(
|
||||
const struct dc_context *ctx,
|
||||
struct dm_pp_power_level_change_request *level_change_req)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_apply_clock_for_voltage_request(
|
||||
const struct dc_context *ctx,
|
||||
struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_pp_get_static_clocks(
|
||||
const struct dc_context *ctx,
|
||||
struct dm_pp_static_clock_info *static_clk_info)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
}
|
||||
|
||||
/**** end of power component interfaces ****/
|
||||
|
||||
/* Calls to notification */
|
||||
|
||||
void dal_notify_setmode_complete(struct dc_context *ctx,
|
||||
uint32_t h_total,
|
||||
uint32_t v_total,
|
||||
uint32_t h_active,
|
||||
uint32_t v_active,
|
||||
uint32_t pix_clk_in_khz)
|
||||
{
|
||||
/*TODO*/
|
||||
}
|
||||
/* End of calls to notification */
|
||||
|
||||
long dm_get_pid(void)
|
||||
{
|
||||
return current->pid;
|
||||
}
|
||||
|
||||
long dm_get_tgid(void)
|
||||
{
|
||||
return current->tgid;
|
||||
}
|
||||
3150
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
Normal file
3150
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
Normal file
File diff suppressed because it is too large
Load Diff
101
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
Normal file
101
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright 2012-13 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_TYPES_H__
|
||||
#define __AMDGPU_DM_TYPES_H__
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
struct amdgpu_framebuffer;
|
||||
struct amdgpu_display_manager;
|
||||
struct dc_validation_set;
|
||||
struct dc_surface;
|
||||
|
||||
/*TODO Jodan Hersen use the one in amdgpu_dm*/
|
||||
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_crtc *amdgpu_crtc,
|
||||
uint32_t link_index);
|
||||
int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_connector *amdgpu_connector,
|
||||
uint32_t link_index,
|
||||
struct amdgpu_encoder *amdgpu_encoder);
|
||||
int amdgpu_dm_encoder_init(
|
||||
struct drm_device *dev,
|
||||
struct amdgpu_encoder *aencoder,
|
||||
uint32_t link_index);
|
||||
|
||||
void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
|
||||
void amdgpu_dm_connector_destroy(struct drm_connector *connector);
|
||||
void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
|
||||
|
||||
int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
|
||||
|
||||
int amdgpu_dm_atomic_commit(
|
||||
struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
|
||||
int dm_create_validation_set_for_target(
|
||||
struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct dc_validation_set *val_set);
|
||||
|
||||
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
|
||||
struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
|
||||
struct drm_connector *connector);
|
||||
|
||||
int amdgpu_dm_connector_atomic_set_property(
|
||||
struct drm_connector *connector,
|
||||
struct drm_connector_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
|
||||
int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_dm_connector_init_helper(
|
||||
struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_connector *aconnector,
|
||||
int connector_type,
|
||||
const struct dc_link *link,
|
||||
int link_index);
|
||||
|
||||
int amdgpu_dm_connector_mode_valid(
|
||||
struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector);
|
||||
|
||||
void amdgpu_dm_add_sink_to_freesync_module(
|
||||
struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
|
||||
void amdgpu_dm_remove_sink_from_freesync_module(
|
||||
struct drm_connector *connector);
|
||||
|
||||
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
|
||||
|
||||
#endif /* __AMDGPU_DM_TYPES_H__ */
|
||||
Reference in New Issue
Block a user