Commit 0e31225f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2019-08-02-1' of git://anongit.freedesktop.org/drm/drm

Pull more drm fixes from Daniel Vetter:
 "Dave sends his pull, everyone realizes they've been asleep at the
  wheel and hits send on their own pulls :-/

  Normally I'd just ignore these all because w/e for me and Dave. But
  this time around the latecomers also included drm-intel-fixes, which
  failed to send out a -fixes pull thus far for this release (screwed up
  vacation coverage, despite that 2/3 maintainers were around ... they
  all look appropriately guilty), and that really is overdue to get
  landed.

  And since I had to do a pull request anyway I pulled the other two
  late ones too.

  intel fixes (didn't have any ever since the main merge window pull):
   - gvt fixes (2 cc: stable)
   - fix gpu reset vs mm-shrinker vs wakeup fun (needed a few patches)
   - two gem locking fixes (one cc: stable)
   - pile of misc fixes all over with minor impact, 6 cc: stable, others
     from this window

  exynos:
   - misc minor fixes

  misc:
   - some build/Kconfig fixes
   - regression fix for vm scalability perf test which seems to mostly
     exercise dmesg/console logging ...
   - the vgem cache flush fix for arm64 broke the world on x86, so
     that's reverted again

* tag 'drm-fixes-2019-08-02-1' of git://anongit.freedesktop.org/drm/drm: (42 commits)
  Revert "drm/vgem: fix cache synchronization on arm/arm64"
  drm/exynos: fix missing decrement of retry counter
  drm/exynos: add CONFIG_MMU dependency
  drm/exynos: remove redundant assignment to pointer 'node'
  drm/exynos: using dev_get_drvdata directly
  drm/bochs: Use shadow buffer for bochs framebuffer console
  drm/fb-helper: Instanciate shadow FB if configured in device's mode_config
  drm/fb-helper: Map DRM client buffer only when required
  drm/client: Support unmapping of DRM client buffers
  drm/i915: Only recover active engines
  drm/i915: Add a wakeref getter for iff the wakeref is already active
  drm/i915: Lift intel_engines_resume() to callers
  drm/vgem: fix cache synchronization on arm/arm64
  drm/i810: Use CONFIG_PREEMPTION
  drm/bridge: tc358764: Fix build error
  drm/bridge: lvds-encoder: Fix build error while CONFIG_DRM_KMS_HELPER=m
  drm/i915/gvt: Adding ppgtt to GVT GEM context after shadow pdps settled.
  drm/i915/gvt: grab runtime pm first for forcewake use
  drm/i915/gvt: fix incorrect cache entry for guest page mapping
  drm/i915/gvt: Checking workload's gma earlier
  ...
parents 4f1a6ef1 9c8c9c7c
Showing with 178 additions and 64 deletions
+178 -64
......@@ -394,7 +394,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
# !PREEMPT because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
......
......@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
......
......@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
......@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge"
depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
select DRM_KMS_HELPER
select DRM_PANEL
help
Toshiba TC358764 DSI/LVDS bridge driver.
......
......@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
void *vaddr;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
......@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
buffer->gem = obj;
return buffer;
err_delete:
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
}
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap() should be followed by a call to
* drm_client_buffer_vunmap(); or the client buffer should be mapped
* throughout its lifetime.
*
* Returns:
* The mapped memory's address
*/
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
{
void *vaddr;
if (buffer->vaddr)
return buffer->vaddr;
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
......@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_delete;
}
vaddr = drm_gem_vmap(buffer->gem);
if (IS_ERR(vaddr))
return vaddr;
buffer->vaddr = vaddr;
return buffer;
err_delete:
drm_client_buffer_delete(buffer);
return vaddr;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
return ERR_PTR(ret);
/**
* drm_client_buffer_vunmap - Unmap DRM client buffer
* @buffer: DRM client buffer
*
* This function removes a client buffer's memory mapping. Calling this
* function is only required by clients that manage their buffer mappings
* by themselves.
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
drm_gem_vunmap(buffer->gem, buffer->vaddr);
buffer->vaddr = NULL;
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{
......
......@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
void *vaddr;
spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip;
......@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
/* Generic fbdev uses a shadow buffer */
if (helper->buffer)
if (helper->buffer) {
vaddr = drm_client_buffer_vmap(helper->buffer);
if (IS_ERR(vaddr))
return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
if (helper->fb->funcs->dirty)
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
&clip_copy, 1);
if (helper->buffer)
drm_client_buffer_vunmap(helper->buffer);
}
}
......@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_framebuffer *fb = fb_helper->fb;
return dev->mode_config.prefer_shadow_fbdev ||
dev->mode_config.prefer_shadow ||
fb->funcs->dirty;
}
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
{
......@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
struct drm_clip_rect *clip = &helper->dirty_clip;
unsigned long flags;
if (!helper->fb->funcs->dirty)
if (!drm_fbdev_use_shadow_fb(helper))
return;
spin_lock_irqsave(&helper->dirty_lock, flags);
......@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
void *vaddr;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
......@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
fbi->screen_buffer = buffer->vaddr;
/* Shamelessly leak the physical address to user-space */
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (fb->funcs->dirty) {
if (drm_fbdev_use_shadow_fb(fb_helper)) {
struct fb_ops *fbops;
void *shadow;
......@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
} else {
/* buffer is mapped for HW framebuffer */
vaddr = drm_client_buffer_vmap(fb_helper->buffer);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
fbi->screen_buffer = vaddr;
/* Shamelessly leak the physical address to user-space */
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
}
return 0;
......
......@@ -2,6 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
......
......@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
module_param_named(fimc_devs, fimc_mask, uint, 0644);
MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_fimc_context(dev) dev_get_drvdata(dev)
enum {
FIMC_CLK_LCLK,
......
......@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
......
......@@ -58,7 +58,7 @@
#define GSC_COEF_DEPTH 3
#define GSC_AUTOSUSPEND_DELAY 2000
#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_gsc_context(dev) dev_get_drvdata(dev)
#define gsc_read(offset) readl(ctx->regs + (offset))
#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
......
......@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
} while (retry > 1 &&
} while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
......
......@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 226) {
u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
switch (wakeup_time) {
......
......@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
bi->num_qgv_points = qi.num_points;
for (j = 0; j < qi.num_points; j++) {
const struct intel_qgv_point *sp = &qi.points[j];
int ct, bw;
......@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
......@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
/* Did we initialize the bw limits successfully? */
if (dev_priv->max_bw[0].num_planes == 0)
return UINT_MAX;
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
/*
* Pcode will not expose all QGV points when
* SAGV is forced to off/min/med/max.
*/
if (qgv_point >= bi->num_qgv_points)
return UINT_MAX;
if (num_planes >= bi->num_planes)
return bi->deratedbw[qgv_point];
}
......
......@@ -2239,6 +2239,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
* "For DP audio configuration, cdclk frequency shall be set to
* meet the following requirements:
* DP Link Frequency(MHz) | Cdclk frequency(MHz)
* 270 | 320 or higher
* 162 | 200 or higher"
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
min_cdclk = max(crtc_state->port_clock, min_cdclk);
/*
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
......
......@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
trace_intel_pipe_enable(dev_priv, pipe);
trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
......@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
trace_intel_pipe_disable(dev_priv, pipe);
trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
......
......@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
int pw_idx = power_well->desc->hsw.idx;
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
enum aux_ch aux_ch;
u32 val;
aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
ICL_AUX_PW_TO_CH(pw_idx);
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
if (power_well->desc->hsw.is_tc_tbt)
if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
......
......@@ -478,13 +478,13 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
/* PSR2 TP2/TP3 wakeup time for 16 panels */
u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
/* PSR2 TP2/TP3 wakeup time for 16 panels */
u32 psr2_tp2_tp3_wakeup_time;
} __packed;
/*
......
......@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
intel_gt_resume(i915);
if (i915_gem_init_hw(i915))
if (intel_gt_resume(i915))
goto err_wedged;
intel_uc_resume(i915);
......
......@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
* the page in order to dirty it -- holding
* the page reference is not sufficient to
* prevent the inode from being truncated.
* Play safe and take the lock.
*/
set_page_dirty_lock(page);
mark_page_accessed(page);
put_page(page);
......
......@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
if (ce->state)
__context_unpin_state(ce->state);
intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
......@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
intel_context_get(ce);
err = intel_ring_pin(ce->ring);
if (err)
goto err_put;
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, flags);
if (err) {
i915_active_cancel(&ce->active);
intel_context_put(ce);
return err;
}
if (err)
goto err_ring;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err) {
i915_active_release(&ce->active);
return err;
}
if (err)
goto err_state;
}
return 0;
err_state:
__context_unpin_state(ce->state);
err_ring:
intel_ring_unpin(ce->ring);
err_put:
intel_context_put(ce);
i915_active_cancel(&ce->active);
return err;
}
void intel_context_active_release(struct intel_context *ce)
......
......@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
unsigned int slice = fls(sseu->slice_mask) - 1;
unsigned int subslice;
u32 mcr_s_ss_select;
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
subslice = fls(sseu->subslice_mask[slice]);
GEM_BUG_ON(!subslice);
subslice--;
if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
......@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
if (header) {
va_list ap;
......@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_engine_count(error, engine),
i915_reset_count(error));
rcu_read_lock();
drm_printf(m, "\tRequests:\n");
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
......@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
}
rcu_read_unlock();
spin_unlock_irqrestore(&engine->active.lock, flags);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
......@@ -1672,7 +1676,6 @@ struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
......@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->active.lock, flags);
lockdep_assert_held(&engine->active.lock);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
......@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
active = request;
break;
}
spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment