diff options
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_kms.c')
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_kms.c | 256 |
1 files changed, 236 insertions, 20 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 08318e69061b..149825ff5df8 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -11,6 +11,8 @@ * crtc, HDMI encoder). */ +#include <linux/clk.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> @@ -144,22 +146,130 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + u32 dispctrl; + u32 dsp3_mux; + + if (!crtc_state->active) + continue; + + if (vc4_state->assigned_channel != 2) + continue; + + /* + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to + * FIFO X'. + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. + * + * DSP3 is connected to FIFO2 unless the transposer is + * enabled. In this case, FIFO 2 is directly accessed by the + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 + * route. + */ + if (vc4_state->feed_txp) + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); + else + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); + + dispctrl = HVS_READ(SCALER_DISPCTRL) & + ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); + } +} + +static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned char dsp2_mux = 0; + unsigned char dsp3_mux = 3; + unsigned char dsp4_mux = 3; + unsigned char dsp5_mux = 3; + unsigned int i; + u32 reg; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + + if (!crtc_state->active) + continue; + + switch (vc4_crtc->data->hvs_output) { + case 2: + dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + break; + + case 3: + dsp3_mux = vc4_state->assigned_channel; + break; + + case 4: + dsp4_mux = vc4_state->assigned_channel; + break; + + case 5: + dsp5_mux = vc4_state->assigned_channel; + break; + + default: + break; + } + } + + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); +} + static void vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc; + struct vc4_hvs *hvs = vc4->hvs; + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state; + + if (!new_crtc_state->commit) continue; - vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); - vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 500000000); + drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); @@ -168,6 +278,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); + if (vc4->hvs->hvs5) + vc5_hvs_pv_muxing_commit(vc4, state); + else + vc4_hvs_pv_muxing_commit(vc4, state); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -182,6 +297,9 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) drm_atomic_helper_commit_cleanup_done(state); + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 0); + drm_atomic_state_put(state); up(&vc4->async_modeset); @@ -374,8 +492,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) /* CTM is being enabled or the matrix changed. */ if (new_crtc_state->ctm) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + /* fifo is 1-based since 0 disables CTM. */ - int fifo = to_vc4_crtc(crtc)->channel + 1; + int fifo = vc4_crtc_state->assigned_channel + 1; /* Check userland isn't trying to turn on CTM for more * than one CRTC at a time. @@ -415,6 +536,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; + if (!vc4->load_tracker_available) + return 0; + priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -485,10 +609,87 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { .atomic_destroy_state = vc4_load_tracker_destroy_state, }; +#define NUM_OUTPUTS 6 +#define NUM_CHANNELS 3 + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { - int ret; + unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + int i, ret; + + /* + * Since the HVS FIFOs are shared across all the pixelvalves and + * the TXP (and thus all the CRTCs), we need to pull the current + * state of all the enabled CRTCs so that an update to a single + * CRTC still keeps the previous FIFOs enabled and assigned to + * the same CRTCs, instead of evaluating only the CRTC being + * modified. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state; + + if (!crtc->state->enable) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *new_vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + unsigned int matching_channels; + + if (old_crtc_state->enable && !new_crtc_state->enable) + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + + if (!new_crtc_state->enable) + continue; + + if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) { + unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel); + continue; + } + + /* + * The problem we have to solve here is that we have + * up to 7 encoders, connected to up to 6 CRTCs. + * + * Those CRTCs, depending on the instance, can be + * routed to 1, 2 or 3 HVS FIFOs, and we need to set + * the change the muxing between FIFOs and outputs in + * the HVS accordingly. + * + * It would be pretty hard to come up with an + * algorithm that would generically solve + * this. However, the current routing trees we support + * allow us to simplify a bit the problem. + * + * Indeed, with the current supported layouts, if we + * try to assign in the ascending crtc index order the + * FIFOs, we can't fall into the situation where an + * earlier CRTC that had multiple routes is assigned + * one that was the only option for a later CRTC. + * + * If the layout changes and doesn't give us that in + * the future, we will need to have something smarter, + * but it works so far. + */ + matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; + if (matching_channels) { + unsigned int channel = ffs(matching_channels) - 1; + + new_vc4_crtc_state->assigned_channel = channel; + unassigned_channels &= ~BIT(channel); + } else { + return -EINVAL; + } + } ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) @@ -512,12 +713,18 @@ int vc4_kms_load(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_ctm_state *ctm_state; struct vc4_load_tracker_state *load_state; + bool is_vc5 = of_device_is_compatible(dev->dev->of_node, + "brcm,bcm2711-vc5"); int ret; - /* Start with the load tracker enabled. Can be disabled through the - * debugfs load_tracker file. - */ - vc4->load_tracker_enabled = true; + if (!is_vc5) { + vc4->load_tracker_available = true; + + /* Start with the load tracker enabled. Can be + * disabled through the debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + } sema_init(&vc4->async_modeset, 1); @@ -531,8 +738,14 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + if (is_vc5) { + dev->mode_config.max_width = 7680; + dev->mode_config.max_height = 7680; + } else { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } + dev->mode_config.funcs = &vc4_mode_funcs; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; @@ -547,14 +760,17 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) { - drm_atomic_private_obj_fini(&vc4->ctm_manager); - return -ENOMEM; - } + if (vc4->load_tracker_available) { + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } - drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, - &vc4_load_tracker_state_funcs); + drm_atomic_private_obj_init(dev, &vc4->load_tracker, + &load_state->base, + &vc4_load_tracker_state_funcs); + } drm_mode_config_reset(dev); |