#include <drm/drm_colorop.h>
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
#define MAX_DRM_LUT_VALUE 0xFFFF
#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
void amdgpu_dm_init_color_mod(void)
{
setup_x_points_distribution();
}
static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x)
{
struct fixed31_32 val;
if (x & (1ULL << 63))
x = -(x & ~(1ULL << 63));
val.value = x;
return val;
}
#ifdef AMD_PRIVATE_COLOR
static const char * const
amdgpu_transfer_function_names[] = {
[AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default",
[AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity",
[AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF",
[AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF",
[AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF",
[AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF",
[AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF",
[AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF",
[AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF",
};
static const u32 amdgpu_eotf =
BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) |
BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF);
static const u32 amdgpu_inv_eotf =
BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) |
BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) |
BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF);
static struct drm_property *
amdgpu_create_tf_property(struct drm_device *dev,
const char *name,
u32 supported_tf)
{
u32 transfer_functions = supported_tf |
BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) |
BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY);
struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT];
int i, len;
len = 0;
for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) {
if ((transfer_functions & BIT(i)) == 0)
continue;
enum_list[len].type = i;
enum_list[len].name = amdgpu_transfer_function_names[i];
len++;
}
return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
name, enum_list, len);
}
int
amdgpu_dm_create_color_properties(struct amdgpu_device *adev)
{
struct drm_property *prop;
prop = drm_property_create(adev_to_drm(adev),
DRM_MODE_PROP_BLOB,
"AMD_PLANE_DEGAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_degamma_lut_property = prop;
prop = drm_property_create_range(adev_to_drm(adev),
DRM_MODE_PROP_IMMUTABLE,
"AMD_PLANE_DEGAMMA_LUT_SIZE",
0, UINT_MAX);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_degamma_lut_size_property = prop;
prop = amdgpu_create_tf_property(adev_to_drm(adev),
"AMD_PLANE_DEGAMMA_TF",
amdgpu_eotf);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_degamma_tf_property = prop;
prop = drm_property_create_range(adev_to_drm(adev),
0, "AMD_PLANE_HDR_MULT", 0, U64_MAX);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_hdr_mult_property = prop;
prop = drm_property_create(adev_to_drm(adev),
DRM_MODE_PROP_BLOB,
"AMD_PLANE_CTM", 0);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_ctm_property = prop;
prop = drm_property_create(adev_to_drm(adev),
DRM_MODE_PROP_BLOB,
"AMD_PLANE_SHAPER_LUT", 0);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_shaper_lut_property = prop;
prop = drm_property_create_range(adev_to_drm(adev),
DRM_MODE_PROP_IMMUTABLE,
"AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_shaper_lut_size_property = prop;
prop = amdgpu_create_tf_property(adev_to_drm(adev),
"AMD_PLANE_SHAPER_TF",
amdgpu_inv_eotf);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_shaper_tf_property = prop;
prop = drm_property_create(adev_to_drm(adev),
DRM_MODE_PROP_BLOB,
"AMD_PLANE_LUT3D", 0);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_lut3d_property = prop;
prop = drm_property_create_range(adev_to_drm(adev),
DRM_MODE_PROP_IMMUTABLE,
"AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_lut3d_size_property = prop;
prop = drm_property_create(adev_to_drm(adev),
DRM_MODE_PROP_BLOB,
"AMD_PLANE_BLEND_LUT", 0);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_blend_lut_property = prop;
prop = drm_property_create_range(adev_to_drm(adev),
DRM_MODE_PROP_IMMUTABLE,
"AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_blend_lut_size_property = prop;
prop = amdgpu_create_tf_property(adev_to_drm(adev),
"AMD_PLANE_BLEND_TF",
amdgpu_eotf);
if (!prop)
return -ENOMEM;
adev->mode_info.plane_blend_tf_property = prop;
prop = amdgpu_create_tf_property(adev_to_drm(adev),
"AMD_CRTC_REGAMMA_TF",
amdgpu_inv_eotf);
if (!prop)
return -ENOMEM;
adev->mode_info.regamma_tf_property = prop;
return 0;
}
#endif
static const struct drm_color_lut *
__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
{
*size = blob ? drm_color_lut_size(blob) : 0;
return blob ? (struct drm_color_lut *)blob->data : NULL;
}
static const struct drm_color_lut32 *
__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
{
*size = blob ? drm_color_lut32_size(blob) : 0;
return blob ? (struct drm_color_lut32 *)blob->data : NULL;
}
static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
{
int i;
uint32_t expected;
int delta;
for (i = 0; i < size; i++) {
if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue))
return false;
expected = i * MAX_DRM_LUT_VALUE / (size-1);
delta = lut[i].red - expected;
if (delta < -1 || 1 < delta)
return false;
}
return true;
}
static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
struct dc_gamma *gamma, bool is_legacy)
{
uint32_t r, g, b;
int i;
if (is_legacy) {
for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) {
r = drm_color_lut_extract(lut[i].red, 16);
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dc_fixpt_from_int(r);
gamma->entries.green[i] = dc_fixpt_from_int(g);
gamma->entries.blue[i] = dc_fixpt_from_int(b);
}
return;
}
for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
r = drm_color_lut_extract(lut[i].red, 16);
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
}
}
static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
{
int i;
for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
}
}
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
struct fixed31_32 *matrix)
{
int i;
for (i = 0; i < 12; i++) {
if (i % 4 == 3) {
matrix[i] = dc_fixpt_zero;
continue;
}
matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
}
}
static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm,
struct fixed31_32 *matrix)
{
int i;
for (i = 0; i < 12; i++) {
matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]);
}
}
static int __set_legacy_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES);
cal_buffer.buffer_index = -1;
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->type = GAMMA_RGB_256;
gamma->num_entries = lut_size;
__drm_lut_to_dc_gamma(lut, gamma, true);
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
NULL, &cal_buffer);
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static int __set_output_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
struct calculate_buffer cal_buffer = {0};
bool res;
cal_buffer.buffer_index = -1;
if (lut_size) {
ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->num_entries = lut_size;
__drm_lut_to_dc_gamma(lut, gamma, false);
}
if (func->tf == TRANSFER_FUNCTION_LINEAR) {
if (gamma)
gamma->type = GAMMA_CUSTOM;
res = mod_color_calculate_degamma_params(NULL, func,
gamma, gamma != NULL);
} else {
if (gamma)
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
has_rom, NULL, &cal_buffer);
}
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static int __set_output_tf_32(struct dc_transfer_func *func,
const struct drm_color_lut32 *lut, uint32_t lut_size,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
struct calculate_buffer cal_buffer = {0};
bool res;
cal_buffer.buffer_index = -1;
if (lut_size) {
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->num_entries = lut_size;
__drm_lut32_to_dc_gamma(lut, gamma);
}
if (func->tf == TRANSFER_FUNCTION_LINEAR) {
if (gamma)
gamma->type = GAMMA_CUSTOM;
res = mod_color_calculate_degamma_params(NULL, func,
gamma, gamma != NULL);
} else {
if (gamma)
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
has_rom, NULL, &cal_buffer);
}
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
out_tf->tf = tf;
out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom);
} else {
out_tf->type = TF_TYPE_BYPASS;
out_tf->tf = TRANSFER_FUNCTION_LINEAR;
}
return ret;
}
static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
struct dc_gamma *gamma = NULL;
bool res;
if (lut_size) {
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->type = GAMMA_CUSTOM;
gamma->num_entries = lut_size;
__drm_lut_to_dc_gamma(lut, gamma, false);
}
res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
const struct drm_color_lut32 *lut, uint32_t lut_size)
{
struct dc_gamma *gamma = NULL;
bool res;
if (lut_size) {
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->type = GAMMA_CUSTOM;
gamma->num_entries = lut_size;
__drm_lut32_to_dc_gamma(lut, gamma);
}
res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
switch (tf) {
default:
case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
return TRANSFER_FUNCTION_LINEAR;
case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF:
case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF:
return TRANSFER_FUNCTION_SRGB;
case AMDGPU_TRANSFER_FUNCTION_BT709_OETF:
case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF:
return TRANSFER_FUNCTION_BT709;
case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF:
case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF:
return TRANSFER_FUNCTION_PQ;
case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF:
case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF:
return TRANSFER_FUNCTION_GAMMA22;
case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF:
case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF:
return TRANSFER_FUNCTION_GAMMA24;
case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF:
case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF:
return TRANSFER_FUNCTION_GAMMA26;
}
}
static enum dc_transfer_func_predefined
amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
{
switch (tf) {
case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
return TRANSFER_FUNCTION_SRGB;
case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
return TRANSFER_FUNCTION_PQ;
case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
case DRM_COLOROP_1D_CURVE_BT2020_OETF:
return TRANSFER_FUNCTION_BT709;
case DRM_COLOROP_1D_CURVE_GAMMA22:
case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
return TRANSFER_FUNCTION_GAMMA22;
default:
return TRANSFER_FUNCTION_LINEAR;
}
}
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
{
rgb->red = drm_color_lut_extract(lut.red, bit_precision);
rgb->green = drm_color_lut_extract(lut.green, bit_precision);
rgb->blue = drm_color_lut_extract(lut.blue, bit_precision);
}
static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
uint32_t lut3d_size,
struct tetrahedral_params *params,
bool use_tetrahedral_9,
int bit_depth)
{
struct dc_rgb *lut0;
struct dc_rgb *lut1;
struct dc_rgb *lut2;
struct dc_rgb *lut3;
int lut_i, i;
if (use_tetrahedral_9) {
lut0 = params->tetrahedral_9.lut0;
lut1 = params->tetrahedral_9.lut1;
lut2 = params->tetrahedral_9.lut2;
lut3 = params->tetrahedral_9.lut3;
} else {
lut0 = params->tetrahedral_17.lut0;
lut1 = params->tetrahedral_17.lut1;
lut2 = params->tetrahedral_17.lut2;
lut3 = params->tetrahedral_17.lut3;
}
for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
__to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth);
__to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth);
__to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth);
}
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
const struct drm_color_lut32 lut,
int bit_precision)
{
rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
}
static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
uint32_t lut3d_size,
struct tetrahedral_params *params,
bool use_tetrahedral_9,
int bit_depth)
{
struct dc_rgb *lut0;
struct dc_rgb *lut1;
struct dc_rgb *lut2;
struct dc_rgb *lut3;
int lut_i, i;
if (use_tetrahedral_9) {
lut0 = params->tetrahedral_9.lut0;
lut1 = params->tetrahedral_9.lut1;
lut2 = params->tetrahedral_9.lut2;
lut3 = params->tetrahedral_9.lut3;
} else {
lut0 = params->tetrahedral_17.lut0;
lut1 = params->tetrahedral_17.lut1;
lut2 = params->tetrahedral_17.lut2;
lut3 = params->tetrahedral_17.lut3;
}
for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
__to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
__to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
__to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
__to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
}
__to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
}
static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d,
uint32_t drm_lut3d_size,
struct dc_3dlut *lut)
{
if (!drm_lut3d_size) {
lut->state.bits.initialized = 0;
} else {
lut->lut_3d.use_tetrahedral_9 = false;
lut->lut_3d.use_12bits = true;
lut->state.bits.initialized = 1;
__drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
lut->lut_3d.use_tetrahedral_9,
MAX_COLOR_3DLUT_BITDEPTH);
}
}
static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut,
bool has_rom,
enum dc_transfer_func_predefined tf,
uint32_t shaper_size,
struct dc_transfer_func *func_shaper)
{
int ret = 0;
if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) {
func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS;
func_shaper->tf = tf;
func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom);
} else {
func_shaper->type = TF_TYPE_BYPASS;
func_shaper->tf = TRANSFER_FUNCTION_LINEAR;
}
return ret;
}
static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut,
bool has_rom,
enum dc_transfer_func_predefined tf,
uint32_t blend_size,
struct dc_transfer_func *func_blend)
{
int ret = 0;
if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) {
func_blend->type = TF_TYPE_DISTRIBUTED_POINTS;
func_blend->tf = tf;
func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size);
} else {
func_blend->type = TF_TYPE_BYPASS;
func_blend->tf = TRANSFER_FUNCTION_LINEAR;
}
return ret;
}
int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct drm_plane_state *plane_state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size);
if (shaper && size != exp_size) {
drm_dbg(&adev->ddev,
"Invalid Shaper LUT size. Should be %u but got %u.\n",
exp_size, size);
return -EINVAL;
}
exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0;
lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size);
if (lut3d && size != exp_size) {
drm_dbg(&adev->ddev,
"Invalid 3D LUT size. Should be %u but got %u.\n",
exp_size, size);
return -EINVAL;
}
return 0;
}
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
const struct drm_color_lut *lut = NULL;
uint32_t size = 0;
lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Degamma LUT size. Should be %u but got %u.\n",
MAX_COLOR_LUT_ENTRIES, size);
return -EINVAL;
}
lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES &&
size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
size);
return -EINVAL;
}
return 0;
}
int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR;
bool is_legacy;
int r;
tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf);
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
if (r)
return r;
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, ®amma_size);
has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
has_regamma =
regamma_lut && !__is_lut_linear(regamma_lut, regamma_size);
is_legacy = regamma_size == MAX_COLOR_LEGACY_LUT_ENTRIES;
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
if (check_only) {
out_tf = kvzalloc_obj(*out_tf);
if (!out_tf)
return -ENOMEM;
} else {
out_tf = &stream->out_transfer_func;
}
if (is_legacy) {
crtc->cm_is_degamma_srgb = true;
out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
out_tf->tf = TRANSFER_FUNCTION_SRGB;
r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
} else {
regamma_size = has_regamma ? regamma_size : 0;
r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
}
crtc->cm_has_degamma = has_degamma;
if (check_only)
kvfree(out_tf);
return r;
}
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
{
struct dc_stream_state *stream = crtc->stream;
struct drm_color_ctm *ctm = NULL;
int ret;
ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
if (ret)
return ret;
if (crtc->base.ctm) {
ctm = (struct drm_color_ctm *)crtc->base.ctm->data;
__drm_ctm_to_dc_matrix(ctm, stream->gamut_remap_matrix.matrix);
stream->gamut_remap_matrix.enable_remap = true;
stream->csc_color_matrix.enable_adjustment = false;
} else {
stream->gamut_remap_matrix.enable_remap = false;
stream->csc_color_matrix.enable_adjustment = false;
}
return 0;
}
static int
map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state,
struct dc_color_caps *caps)
{
const struct drm_color_lut *degamma_lut;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
uint32_t degamma_size;
int r;
switch (dc_plane_state->format) {
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
tf = TRANSFER_FUNCTION_BT709;
break;
default:
break;
}
if (crtc->cm_has_degamma) {
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
°amma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
if (crtc->cm_is_degamma_srgb)
dc_plane_state->in_transfer_func.tf = tf;
else
dc_plane_state->in_transfer_func.tf =
TRANSFER_FUNCTION_LINEAR;
r = __set_input_tf(caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
} else {
dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED;
dc_plane_state->in_transfer_func.tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
!mod_color_calculate_degamma_params(caps,
&dc_plane_state->in_transfer_func,
NULL, false))
return -ENOMEM;
}
return 0;
}
static int
__set_dm_plane_degamma(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct dc_color_caps *color_caps)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *degamma_lut;
enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
uint32_t degamma_size;
bool has_degamma_lut;
int ret;
degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut,
°amma_size);
has_degamma_lut = degamma_lut &&
!__is_lut_linear(degamma_lut, degamma_size);
tf = dm_plane_state->degamma_tf;
if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
return -EINVAL;
dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf);
if (has_degamma_lut) {
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
dc_plane_state->in_transfer_func.type =
TF_TYPE_DISTRIBUTED_POINTS;
ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (ret)
return ret;
} else {
dc_plane_state->in_transfer_func.type =
TF_TYPE_PREDEFINED;
if (!mod_color_calculate_degamma_params(color_caps,
&dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
}
return 0;
}
static int
__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
struct drm_colorop_state *colorop_state)
{
struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
struct drm_colorop *colorop = colorop_state->colorop;
struct drm_device *drm = colorop->dev;
if (colorop->type != DRM_COLOROP_1D_CURVE)
return -EINVAL;
if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
return -EINVAL;
if (colorop_state->bypass) {
tf->type = TF_TYPE_BYPASS;
tf->tf = TRANSFER_FUNCTION_LINEAR;
return 0;
}
drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_PREDEFINED;
tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
return 0;
}
static int
__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
int i = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (!colorop_state)
return -EINVAL;
return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
}
static int
__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
const struct drm_device *dev = colorop->dev;
const struct drm_property_blob *blob;
struct drm_color_ctm_3x4 *ctm = NULL;
int i = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
blob = colorop_state->data;
if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
ctm = (struct drm_color_ctm_3x4 *) blob->data;
__drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
dc_plane_state->gamut_remap_matrix.enable_remap = true;
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
} else {
drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
blob->length, sizeof(struct drm_color_ctm_3x4));
return -EINVAL;
}
}
return 0;
}
static int
__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
const struct drm_device *dev = colorop->dev;
int i = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
}
return 0;
}
static int
__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
const struct drm_color_lut32 *shaper_lut;
struct drm_device *dev = colorop->dev;
bool enabled = false;
u32 shaper_size;
int i = 0, ret = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf(tf, 0, 0, false);
if (ret)
return ret;
enabled = true;
}
colorop = old_colorop->next;
if (!colorop) {
drm_dbg(dev, "no Shaper LUT colorop found\n");
return -EINVAL;
}
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
shaper_size = shaper_lut != NULL ? shaper_size : 0;
if (shaper_size == colorop->size) {
ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
if (ret)
return ret;
enabled = true;
}
}
if (!enabled)
tf->type = TF_TYPE_BYPASS;
return 0;
}
static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
uint32_t drm_lut3d_size,
struct dc_3dlut *lut)
{
if (!drm_lut3d_size) {
lut->state.bits.initialized = 0;
return -EINVAL;
}
lut->lut_3d.use_12bits = true;
lut->lut_3d.use_tetrahedral_9 = false;
lut->state.bits.initialized = 1;
__drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
lut->lut_3d.use_tetrahedral_9, 12);
return 0;
}
static int
__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
struct drm_atomic_state *state = plane_state->state;
const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
const struct drm_device *dev = colorop->dev;
const struct drm_color_lut32 *lut3d;
uint32_t lut3d_size;
int i = 0, ret = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
if (!has_3dlut) {
drm_dbg(dev, "3D LUT is not supported by hardware\n");
return -EINVAL;
}
drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
if (ret) {
drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
colorop->base.id, lut3d_size);
return ret;
}
if (tf->type == TF_TYPE_BYPASS) {
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = TRANSFER_FUNCTION_LINEAR;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf_32(tf, NULL, 0, false);
}
}
return ret;
}
static int
__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
const struct drm_color_lut32 *blend_lut = NULL;
struct drm_device *dev = colorop->dev;
uint32_t blend_size = 0;
int i = 0;
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
__set_input_tf_32(NULL, tf, blend_lut, blend_size);
}
colorop = old_colorop->next;
if (!colorop) {
drm_dbg(dev, "no Blend LUT colorop found\n");
return -EINVAL;
}
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
blend_size = blend_lut != NULL ? blend_size : 0;
if (blend_size == colorop->size)
__set_input_tf_32(NULL, tf, blend_lut, blend_size);
}
return 0;
}
static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut;
uint32_t shaper_size, lut3d_size, blend_size;
int ret;
dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult);
shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
shaper_size = shaper_lut != NULL ? shaper_size : 0;
shaper_tf = dm_plane_state->shaper_tf;
lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
amdgpu_tf_to_dc_tf(shaper_tf),
shaper_size,
&dc_plane_state->in_shaper_func);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d shaper LUT failed.\n",
plane_state->plane->index);
return ret;
}
blend_tf = dm_plane_state->blend_tf;
blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size);
blend_size = blend_lut != NULL ? blend_size : 0;
ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
amdgpu_tf_to_dc_tf(blend_tf),
blend_size, &dc_plane_state->blend_tf);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d gamma lut failed.\n",
plane_state->plane->index);
return ret;
}
return 0;
}
static int
amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_colorop *colorop = plane_state->color_pipeline;
struct drm_device *dev = plane_state->plane->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
int ret;
if (!colorop)
return -EINVAL;
ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no multiplier colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no 3x4 matrix colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
if (has_3dlut) {
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no Shaper TF colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
colorop = colorop->next;
if (!colorop)
return -EINVAL;
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no 3D LUT colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
}
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no Blend TF colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
colorop = colorop->next;
if (!colorop)
return -EINVAL;
return 0;
}
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
struct drm_color_ctm_3x4 *ctm = NULL;
struct dc_color_caps *color_caps = NULL;
bool has_crtc_cm_degamma;
int ret;
ret = amdgpu_dm_verify_lut3d_size(adev, plane_state);
if (ret) {
drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n");
return ret;
}
if (dc_plane_state->ctx && dc_plane_state->ctx->dc)
color_caps = &dc_plane_state->ctx->dc->caps.color;
dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR;
has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps);
if (ret == -ENOMEM)
return ret;
if (has_crtc_cm_degamma && ret != -EINVAL) {
drm_dbg_kms(crtc->base.crtc->dev,
"doesn't support plane and CRTC degamma at the same time\n");
return -EINVAL;
}
if (has_crtc_cm_degamma) {
ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps);
if (ret)
return ret;
}
if (dm_plane_state->ctm) {
ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data;
__drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
dc_plane_state->gamut_remap_matrix.enable_remap = true;
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
} else {
dc_plane_state->gamut_remap_matrix.enable_remap = false;
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
return 0;
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}