Commit e05bf4f3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Fixes all over the place.

  The rockchip and imx fixes I missed while on holidays, so I've queued
  them now which makes this a bit bigger.

  The rest is misc amdgpu, radeon, i915 and armada.

  I think the most important thing is the ioctl fix, we dropped the
  avoid compat ball, so we get to add a compat wrapper.

  There is also an i915 revert to avoid a regression with existing
  userspace"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (43 commits)
  drm/ttm: improve uncached page deallocation.
  drm/ttm: fix uncached page deallocation to properly fill page pool v3.
  drm/amdgpu/dce8: Re-set VBLANK interrupt state when enabling a CRTC
  drm/radeon/ci: silence a harmless PCC warning
  drm/amdgpu/cz: silence some dpm debug output
  drm/amdgpu/cz: store the forced dpm level
  drm/amdgpu/cz: unforce dpm levels before forcing to low/high
  drm/amdgpu: remove bogus check in gfx8 rb setup
  drm/amdgpu: set proper index/data pair for smc regs on CZ (v2)
  drm/amdgpu: disable the IP module if early_init returns -ENOENT (v2)
  drm/amdgpu: stop context leak in the error path
  drm/amdgpu: validate the context id in the dependencies
  drm/radeon: fix user ptr race condition
  drm/radeon: Don't flush the GART TLB if rdev->gart.ptr == NULL
  drm/radeon: add a dpm quirk for Sapphire Radeon R9 270X 2GB GDDR5
  drm/armada: avoid saving the adjusted mode to crtc->mode
  drm/armada: fix overlay when partially off-screen
  drm/armada: convert overlay to use drm_plane_helper_check_update()
  drm/armada: fix gem object free after failed prime import
  drm/armada: fix incorrect overlay plane cleanup
  ...
parents 21bdb584 e9308884
Showing with 266 additions and 114 deletions
+266 -114
......@@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
<td rowspan="2" valign="top" >omap</td>
<td valign="top" >omap</td>
<td valign="top" >Generic</td>
<td valign="top" >“zorder”</td>
<td valign="top" >RANGE</td>
......
......@@ -65,8 +65,10 @@ Optional properties:
- edid: verbatim EDID data block describing attached display.
- ddc: phandle describing the i2c bus handling the display data
channel
- port: A port node with endpoint definitions as defined in
- port@[0-1]: Port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
Port 0 is the input port connected to the IPU display interface,
port 1 is the output port connected to a panel.
example:
......@@ -75,9 +77,29 @@ display@di0 {
edid = [edid-data];
interface-pix-fmt = "rgb24";
port {
port@0 {
reg = <0>;
display_in: endpoint {
remote-endpoint = <&ipu_di0_disp0>;
};
};
port@1 {
reg = <1>;
display_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
panel {
...
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};
......@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ib *ib;
int i, j, r;
......@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
......@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
if (r)
return r;
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
if (ctx == NULL)
return -EINVAL;
r = amdgpu_fence_recreate(ring, p->filp,
deps[j].handle,
&fence);
if (r)
if (r) {
amdgpu_ctx_put(ctx);
return r;
}
amdgpu_sync_fence(&ib->sync, fence);
amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
}
}
......@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r)
if (r) {
amdgpu_ctx_put(ctx);
return r;
}
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
if (r)
if (r) {
amdgpu_ctx_put(ctx);
return r;
}
r = fence_wait_timeout(&fence->base, true, timeout);
amdgpu_fence_unref(&fence);
......
......@@ -1207,10 +1207,15 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (r)
if (r == -ENOENT)
adev->ip_block_enabled[i] = false;
else if (r)
return r;
else
adev->ip_block_enabled[i] = true;
} else {
adev->ip_block_enabled[i] = true;
}
adev->ip_block_enabled[i] = true;
}
}
......
......@@ -1679,25 +1679,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
if (ret)
return ret;
DRM_INFO("DPM unforce state min=%d, max=%d.\n",
pi->sclk_dpm.soft_min_clk,
pi->sclk_dpm.soft_max_clk);
DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
pi->sclk_dpm.soft_min_clk,
pi->sclk_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
enum amdgpu_dpm_forced_level level)
enum amdgpu_dpm_forced_level level)
{
int ret = 0;
switch (level) {
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
ret = cz_dpm_unforce_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_force_highest(adev);
if (ret)
return ret;
break;
case AMDGPU_DPM_FORCED_LEVEL_LOW:
ret = cz_dpm_unforce_dpm_levels(adev);
if (ret)
return ret;
ret = cz_dpm_force_lowest(adev);
if (ret)
return ret;
......@@ -1711,6 +1717,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
break;
}
adev->pm.dpm.forced_level = level;
return ret;
}
......
......@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
......@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
/* Make sure VBLANK interrupt is still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
......
......@@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
if (data & 1)
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
else
data = 0;
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
......
......@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
/* smu_8_0_d.h */
#define mmMP0PUB_IND_INDEX 0x180
#define mmMP0PUB_IND_DATA 0x181
static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmMP0PUB_IND_INDEX, (reg));
r = RREG32(mmMP0PUB_IND_DATA);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmMP0PUB_IND_INDEX, (reg));
WREG32(mmMP0PUB_IND_DATA, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
......@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
bool smc_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->smc_rreg = &vi_smc_rreg;
adev->smc_wreg = &vi_smc_wreg;
if (adev->flags & AMDGPU_IS_APU) {
adev->smc_rreg = &cz_smc_rreg;
adev->smc_wreg = &cz_smc_wreg;
} else {
adev->smc_rreg = &vi_smc_rreg;
adev->smc_wreg = &vi_smc_wreg;
}
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
......
......@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
crtc->mode = *adj;
val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
if (val != dcrtc->dumb_ctrl) {
dcrtc->dumb_ctrl = val;
......
......@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
DMA_TO_DEVICE);
if (dobj->sgt)
dma_buf_unmap_attachment(dobj->obj.import_attach,
dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
......
......@@ -7,6 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
......@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
if (fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
}
static unsigned armada_limit(int start, unsigned size, unsigned max)
{
int end = start + size;
if (end < 0)
return 0;
if (start < 0)
start = 0;
return (unsigned)end > max ? max - start : end - start;
wake_up(&dplane->vbl.wait);
}
static int
......@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
struct drm_rect dest = {
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
uint32_t val, ctrl0;
unsigned idx = 0;
bool visible;
int ret;
crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
0, INT_MAX, true, false, &visible);
if (ret)
return ret;
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
/* Does the position/size result in nothing to display? */
if (crtc_w == 0 || crtc_h == 0) {
if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
}
/*
* FIXME: if the starting point is off screen, we need to
* adjust src_x, src_y, src_w, src_h appropriately, and
* according to the scale.
*/
if (!dcrtc->plane) {
dcrtc->plane = plane;
......@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
val = (src_h & 0xffff0000) | src_w >> 16;
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
dplane->src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = crtc_h << 16 | crtc_w;
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
dplane->dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = crtc_y << 16 | crtc_x;
val = dest.y1 << 16 | dest.x1;
dplane->dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
......@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
dcrtc->base + LCD_SPU_SRAM_PARA1);
}
ret = wait_event_timeout(dplane->vbl.wait,
list_empty(&dplane->vbl.update.node),
HZ/25);
if (ret < 0)
return ret;
wait_event_timeout(dplane->vbl.wait,
list_empty(&dplane->vbl.update.node),
HZ/25);
if (plane->fb != fb) {
struct armada_gem_object *obj = drm_fb_obj(fb);
uint32_t sy, su, sv;
uint32_t addr[3], pixel_format;
int i, num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
......@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
older_fb);
}
src_y >>= 16;
src_x >>= 16;
sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
src_x * fb->bits_per_pixel / 8;
su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
src_x;
sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
src_x;
src_y = src.y1 >> 16;
src_x = src.x1 >> 16;
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
pixel_format = fb->pixel_format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
num_planes = drm_format_num_planes(pixel_format);
/*
* Annoyingly, shifting a YUYV-format image by one pixel
* causes the U/V planes to toggle. Toggle the UV swap.
* (Unfortunately, this causes momentary colour flickering.)
*/
if (src_x & (hsub - 1) && num_planes == 1)
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
for (i = 0; i < num_planes; i++)
addr[i] = obj->dev_addr + fb->offsets[i] +
src_y * fb->pitches[i] +
src_x * drm_format_plane_cpp(pixel_format, i);
for (; i < ARRAY_SIZE(addr); i++)
addr[i] = 0;
armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, su,
armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U0);
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V0);
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y1);
armada_reg_queue_set(dplane->vbl.regs, idx, su,
armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U1);
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
......@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
LCD_SPU_DMA_PITCH_UV);
}
val = (src_h & 0xffff0000) | src_w >> 16;
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
if (dplane->src_hw != val) {
dplane->src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = crtc_h << 16 | crtc_w;
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
if (dplane->dst_hw != val) {
dplane->dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = crtc_y << 16 | crtc_x;
val = dest.y1 << 16 | dest.x1;
if (dplane->dst_yx != val) {
dplane->dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
if (dplane->ctrl0 != ctrl0) {
dplane->ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
......@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
static void armada_plane_destroy(struct drm_plane *plane)
{
kfree(plane);
struct armada_plane *dplane = drm_to_armada_plane(plane);
drm_plane_cleanup(plane);
kfree(dplane);
}
static int armada_plane_set_property(struct drm_plane *plane,
......
......@@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
/* For some reason crtc x/y offsets are signed internally. */
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
/*
* Universal plane src offsets are only 16.16, prevent havoc for
* drivers using universal plane code internally.
*/
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
drm_modeset_lock_all(dev);
......
......@@ -70,6 +70,8 @@
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
typedef struct drm_version_32 {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
......@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
u32 height;
u32 pixel_format;
u32 flags;
u32 handles[4];
u32 pitches[4];
u32 offsets[4];
u64 modifier[4];
} __attribute__((packed)) drm_mode_fb_cmd232_t;
static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
struct drm_mode_fb_cmd232 req32;
struct drm_mode_fb_cmd2 __user *req64;
int i;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
req64 = compat_alloc_user_space(sizeof(*req64));
if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
|| __put_user(req32.width, &req64->width)
|| __put_user(req32.height, &req64->height)
|| __put_user(req32.pixel_format, &req64->pixel_format)
|| __put_user(req32.flags, &req64->flags))
return -EFAULT;
for (i = 0; i < 4; i++) {
if (__put_user(req32.handles[i], &req64->handles[i]))
return -EFAULT;
if (__put_user(req32.pitches[i], &req64->pitches[i]))
return -EFAULT;
if (__put_user(req32.offsets[i], &req64->offsets[i]))
return -EFAULT;
if (__put_user(req32.modifier[i], &req64->modifier[i]))
return -EFAULT;
}
err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
if (err)
return err;
if (__get_user(req32.fb_id, &req64->fb_id))
return -EFAULT;
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
......@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
};
/**
......
......@@ -826,6 +826,7 @@ struct intel_context {
struct kref ref;
int user_handle;
uint8_t remap_slice;
struct drm_i915_private *i915;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
......@@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
unsigned int cache_level:3;
unsigned int cache_dirty:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
unsigned int pin_display;
......@@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
......
......@@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->pages = st;
obj->has_dma_mapping = true;
return 0;
}
......@@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
sg_free_table(obj->pages);
kfree(obj->pages);
obj->has_dma_mapping = false;
}
static void
......@@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
i915_gem_gtt_finish_object(obj);
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
......@@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sg_page_iter sg_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
......@@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_pages;
}
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
......@@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_mark_end(sg);
obj->pages = st;
ret = i915_gem_gtt_prepare_object(obj);
if (ret)
goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
......@@ -2300,10 +2306,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
if (PTR_ERR(page) == -ENOSPC)
return -ENOMEM;
else
return PTR_ERR(page);
if (ret == -ENOSPC)
ret = -ENOMEM;
return ret;
}
/* Ensure that the associated pages are gathered from the backing storage
......@@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
}
request->emitted_jiffies = jiffies;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
......@@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
i915_gem_gtt_finish_object(obj);
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
......@@ -3768,22 +3773,16 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
goto err_remove_node;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret)
goto err_remove_node;
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
goto err_finish_gtt;
goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
return vma;
err_finish_gtt:
i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
......
......@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
trace_i915_context_free(ctx);
......@@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
......
......@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
return PTR_ERR(sg);
obj->pages = sg;
obj->has_dma_mapping = true;
return 0;
}
......@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
obj->has_dma_mapping = false;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
......
......@@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
if (obj->has_dma_mapping)
return 0;
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
......@@ -1972,10 +1969,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
if (!obj->has_dma_mapping)
dma_unmap_sg(&dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
......
......@@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
......
......@@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
/* Since the swizzling may vary within an
* object, we have no idea what the swizzling
* is for any page in particular. Thus we
* cannot migrate tiled pages using the GPU,
* nor can we tell userspace what the exact
* swizzling is for any object.
*/
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
}
if (dcc == 0xffffffff) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment