blob: 1b278a22c8b7214c9f35d6d5aad7b22ce25cd3b3 [file] [log] [blame]
Rob Clark16ea9752013-01-08 15:04:28 -06001/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Sean Paulce2f2c32016-09-21 06:14:53 -070018#include <drm/drm_atomic.h>
Jyri Sarha305198d2016-04-07 15:05:16 +030019#include <drm/drm_atomic_helper.h>
Sean Paulce2f2c32016-09-21 06:14:53 -070020#include <drm/drm_crtc.h>
21#include <drm/drm_flip_work.h>
22#include <drm/drm_plane_helper.h>
Jyri Sarha4e910c72016-09-06 22:55:33 +030023#include <linux/workqueue.h>
Bartosz Golaszewski93452352016-10-31 15:19:26 +010024#include <linux/completion.h>
25#include <linux/dma-mapping.h>
Rob Herring86418f92017-03-22 08:26:06 -050026#include <linux/of_graph.h>
Jyri Sarhace99f722017-10-12 12:19:46 +030027#include <linux/math64.h>
Rob Clark16ea9752013-01-08 15:04:28 -060028
29#include "tilcdc_drv.h"
30#include "tilcdc_regs.h"
31
Bartosz Golaszewski93452352016-10-31 15:19:26 +010032#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
Jyri Sarha55e165c2016-11-15 23:37:24 +020033#define TILCDC_PALETTE_SIZE 32
34#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020035
Rob Clark16ea9752013-01-08 15:04:28 -060036struct tilcdc_crtc {
37 struct drm_crtc base;
38
Jyri Sarha47f571c2016-04-07 15:04:18 +030039 struct drm_plane primary;
Rob Clark16ea9752013-01-08 15:04:28 -060040 const struct tilcdc_panel_info *info;
Rob Clark16ea9752013-01-08 15:04:28 -060041 struct drm_pending_vblank_event *event;
Jyri Sarha2d53a182016-10-25 12:27:31 +030042 struct mutex enable_lock;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +030043 bool enabled;
Jyri Sarha2d53a182016-10-25 12:27:31 +030044 bool shutdown;
Rob Clark16ea9752013-01-08 15:04:28 -060045 wait_queue_head_t frame_done_wq;
46 bool frame_done;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020047 spinlock_t irq_lock;
48
Jyri Sarha642e5162016-09-06 16:19:54 +030049 unsigned int lcd_fck_rate;
50
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020051 ktime_t last_vblank;
Jyri Sarhace99f722017-10-12 12:19:46 +030052 unsigned int hvtotal_us;
Rob Clark16ea9752013-01-08 15:04:28 -060053
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020054 struct drm_framebuffer *next_fb;
Rob Clark16ea9752013-01-08 15:04:28 -060055
Jyri Sarha103cd8b2015-02-10 14:13:23 +020056 /* Only set if an external encoder is connected */
57 bool simulate_vesa_sync;
Jyri Sarha5895d082016-01-08 14:33:09 +020058
59 int sync_lost_count;
60 bool frame_intact;
Jyri Sarha13b3d722016-04-06 14:02:38 +030061 struct work_struct recover_work;
Bartosz Golaszewski93452352016-10-31 15:19:26 +010062
63 dma_addr_t palette_dma_handle;
Jyri Sarha55e165c2016-11-15 23:37:24 +020064 u16 *palette_base;
Bartosz Golaszewski93452352016-10-31 15:19:26 +010065 struct completion palette_loaded;
Rob Clark16ea9752013-01-08 15:04:28 -060066};
67#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
68
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +030069static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
Rob Clark16ea9752013-01-08 15:04:28 -060070{
Rob Clark16ea9752013-01-08 15:04:28 -060071 struct drm_device *dev = crtc->dev;
Daniel Schultz4c268d62016-10-28 13:52:41 +020072 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -060073 struct drm_gem_cma_object *gem;
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +030074 dma_addr_t start, end;
Jyri Sarha7eb9f062016-08-26 15:10:14 +030075 u64 dma_base_and_ceiling;
Rob Clark16ea9752013-01-08 15:04:28 -060076
Rob Clark16ea9752013-01-08 15:04:28 -060077 gem = drm_fb_cma_get_gem_obj(fb, 0);
78
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +030079 start = gem->paddr + fb->offsets[0] +
80 crtc->y * fb->pitches[0] +
Ville Syrjälä353c8592016-12-14 23:30:57 +020081 crtc->x * fb->format->cpp[0];
Rob Clark16ea9752013-01-08 15:04:28 -060082
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +030083 end = start + (crtc->mode.vdisplay * fb->pitches[0]);
Rob Clark16ea9752013-01-08 15:04:28 -060084
Jyri Sarha7eb9f062016-08-26 15:10:14 +030085 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
86 * with a single insruction, if available. This should make it more
87 * unlikely that LCDC would fetch the DMA addresses in the middle of
88 * an update.
89 */
Daniel Schultz4c268d62016-10-28 13:52:41 +020090 if (priv->rev == 1)
91 end -= 1;
92
93 dma_base_and_ceiling = (u64)end << 32 | start;
Jyri Sarha7eb9f062016-08-26 15:10:14 +030094 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
Rob Clark16ea9752013-01-08 15:04:28 -060095}
96
Bartosz Golaszewski93452352016-10-31 15:19:26 +010097/*
Jyri Sarha55e165c2016-11-15 23:37:24 +020098 * The driver currently only supports only true color formats. For
99 * true color the palette block is bypassed, but a 32 byte palette
100 * should still be loaded. The first 16-bit entry must be 0x4000 while
101 * all other entries must be zeroed.
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100102 */
103static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
104{
Jyri Sarha55e165c2016-11-15 23:37:24 +0200105 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
106 struct drm_device *dev = crtc->dev;
107 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarhae59f5af2016-11-17 18:46:16 +0200108 int ret;
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100109
Jyri Sarha274c34d2016-11-15 23:57:42 +0200110 reinit_completion(&tilcdc_crtc->palette_loaded);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100111
112 /* Tell the LCDC where the palette is located. */
113 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
114 tilcdc_crtc->palette_dma_handle);
115 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
Jyri Sarha55e165c2016-11-15 23:37:24 +0200116 (u32) tilcdc_crtc->palette_dma_handle +
117 TILCDC_PALETTE_SIZE - 1);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100118
Jyri Sarha55e165c2016-11-15 23:37:24 +0200119 /* Set dma load mode for palette loading only. */
120 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
121 LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
122 LCDC_PALETTE_LOAD_MODE_MASK);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100123
Jyri Sarha55e165c2016-11-15 23:37:24 +0200124 /* Enable DMA Palette Loaded Interrupt */
125 if (priv->rev == 1)
126 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
127 else
128 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
129
130 /* Enable LCDC DMA and wait for palette to be loaded. */
131 tilcdc_clear_irqstatus(dev, 0xffffffff);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100132 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
133
Jyri Sarhae59f5af2016-11-17 18:46:16 +0200134 ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
135 msecs_to_jiffies(50));
136 if (ret == 0)
137 dev_err(dev->dev, "%s: Palette loading timeout", __func__);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100138
Jyri Sarha55e165c2016-11-15 23:37:24 +0200139 /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100140 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha55e165c2016-11-15 23:37:24 +0200141 if (priv->rev == 1)
142 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
143 else
144 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100145}
146
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300147static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
148{
149 struct tilcdc_drm_private *priv = dev->dev_private;
150
151 tilcdc_clear_irqstatus(dev, 0xffffffff);
152
153 if (priv->rev == 1) {
154 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
Jyri Sarha36725832016-11-21 18:30:19 +0200155 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300156 LCDC_V1_UNDERFLOW_INT_ENA);
Karl Beldan8d6c3f72016-08-23 12:57:00 +0000157 tilcdc_set(dev, LCDC_DMA_CTRL_REG,
158 LCDC_V1_END_OF_FRAME_INT_ENA);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300159 } else {
160 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
161 LCDC_V2_UNDERFLOW_INT_ENA |
162 LCDC_V2_END_OF_FRAME0_INT_ENA |
163 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
164 }
165}
166
167static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
168{
169 struct tilcdc_drm_private *priv = dev->dev_private;
170
171 /* disable irqs that we might have enabled: */
172 if (priv->rev == 1) {
173 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
Jyri Sarha36725832016-11-21 18:30:19 +0200174 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300175 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
176 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
177 LCDC_V1_END_OF_FRAME_INT_ENA);
178 } else {
179 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
180 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
181 LCDC_V2_END_OF_FRAME0_INT_ENA |
182 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
183 }
184}
185
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300186static void reset(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600187{
188 struct drm_device *dev = crtc->dev;
189 struct tilcdc_drm_private *priv = dev->dev_private;
190
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300191 if (priv->rev != 2)
192 return;
193
194 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
195 usleep_range(250, 1000);
196 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
197}
198
Jyri Sarha75d7f272016-11-24 23:25:08 +0200199/*
200 * Calculate the percentage difference between the requested pixel clock rate
201 * and the effective rate resulting from calculating the clock divider value.
202 */
203static unsigned int tilcdc_pclk_diff(unsigned long rate,
204 unsigned long real_rate)
205{
206 int r = rate / 100, rr = real_rate / 100;
207
208 return (unsigned int)(abs(((rr - r) * 100) / r));
209}
210
211static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
212{
213 struct drm_device *dev = crtc->dev;
214 struct tilcdc_drm_private *priv = dev->dev_private;
215 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
216 unsigned long clk_rate, real_rate, req_rate;
217 unsigned int clkdiv;
218 int ret;
219
220 clkdiv = 2; /* first try using a standard divider of 2 */
221
222 /* mode.clock is in KHz, set_rate wants parameter in Hz */
223 req_rate = crtc->mode.clock * 1000;
224
225 ret = clk_set_rate(priv->clk, req_rate * clkdiv);
226 clk_rate = clk_get_rate(priv->clk);
227 if (ret < 0) {
228 /*
229 * If we fail to set the clock rate (some architectures don't
230 * use the common clock framework yet and may not implement
231 * all the clk API calls for every clock), try the next best
232 * thing: adjusting the clock divider, unless clk_get_rate()
233 * failed as well.
234 */
235 if (!clk_rate) {
236 /* Nothing more we can do. Just bail out. */
237 dev_err(dev->dev,
238 "failed to set the pixel clock - unable to read current lcdc clock rate\n");
239 return;
240 }
241
242 clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
243
244 /*
245 * Emit a warning if the real clock rate resulting from the
246 * calculated divider differs much from the requested rate.
247 *
248 * 5% is an arbitrary value - LCDs are usually quite tolerant
249 * about pixel clock rates.
250 */
251 real_rate = clkdiv * req_rate;
252
253 if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
254 dev_warn(dev->dev,
255 "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
256 clk_rate, real_rate);
257 }
258 }
259
260 tilcdc_crtc->lcd_fck_rate = clk_rate;
261
262 DBG("lcd_clk=%u, mode clock=%d, div=%u",
263 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
264
265 /* Configure the LCD clock divisor. */
266 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
267 LCDC_RASTER_MODE);
268
269 if (priv->rev == 2)
270 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
271 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
272 LCDC_V2_CORE_CLK_EN);
273}
274
Xiongwei Song584d4ed2017-12-02 19:24:22 +0800275static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
Jyri Sarhace99f722017-10-12 12:19:46 +0300276{
277 return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
278 mode->clock);
279}
280
Jyri Sarha75d7f272016-11-24 23:25:08 +0200281static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
282{
283 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
284 struct drm_device *dev = crtc->dev;
285 struct tilcdc_drm_private *priv = dev->dev_private;
286 const struct tilcdc_panel_info *info = tilcdc_crtc->info;
287 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
288 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
289 struct drm_framebuffer *fb = crtc->primary->state->fb;
290
291 if (WARN_ON(!info))
292 return;
293
294 if (WARN_ON(!fb))
295 return;
296
297 /* Configure the Burst Size and fifo threshold of DMA: */
298 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
299 switch (info->dma_burst_sz) {
300 case 1:
301 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
302 break;
303 case 2:
304 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
305 break;
306 case 4:
307 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
308 break;
309 case 8:
310 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
311 break;
312 case 16:
313 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
314 break;
315 default:
316 dev_err(dev->dev, "invalid burst size\n");
317 return;
318 }
319 reg |= (info->fifo_th << 8);
320 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
321
322 /* Configure timings: */
323 hbp = mode->htotal - mode->hsync_end;
324 hfp = mode->hsync_start - mode->hdisplay;
325 hsw = mode->hsync_end - mode->hsync_start;
326 vbp = mode->vtotal - mode->vsync_end;
327 vfp = mode->vsync_start - mode->vdisplay;
328 vsw = mode->vsync_end - mode->vsync_start;
329
330 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
331 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
332
333 /* Set AC Bias Period and Number of Transitions per Interrupt: */
334 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
335 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
336 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
337
338 /*
339 * subtract one from hfp, hbp, hsw because the hardware uses
340 * a value of 0 as 1
341 */
342 if (priv->rev == 2) {
343 /* clear bits we're going to set */
344 reg &= ~0x78000033;
345 reg |= ((hfp-1) & 0x300) >> 8;
346 reg |= ((hbp-1) & 0x300) >> 4;
347 reg |= ((hsw-1) & 0x3c0) << 21;
348 }
349 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
350
351 reg = (((mode->hdisplay >> 4) - 1) << 4) |
352 (((hbp-1) & 0xff) << 24) |
353 (((hfp-1) & 0xff) << 16) |
354 (((hsw-1) & 0x3f) << 10);
355 if (priv->rev == 2)
356 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
357 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
358
359 reg = ((mode->vdisplay - 1) & 0x3ff) |
360 ((vbp & 0xff) << 24) |
361 ((vfp & 0xff) << 16) |
362 (((vsw-1) & 0x3f) << 10);
363 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
364
365 /*
366 * be sure to set Bit 10 for the V2 LCDC controller,
367 * otherwise limited to 1024 pixels width, stopping
368 * 1920x1080 being supported.
369 */
370 if (priv->rev == 2) {
371 if ((mode->vdisplay - 1) & 0x400) {
372 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
373 LCDC_LPP_B10);
374 } else {
375 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
376 LCDC_LPP_B10);
377 }
378 }
379
380 /* Configure display type: */
381 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
382 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
383 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
384 0x000ff000 /* Palette Loading Delay bits */);
385 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
386 if (info->tft_alt_mode)
387 reg |= LCDC_TFT_ALT_ENABLE;
388 if (priv->rev == 2) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +0200389 switch (fb->format->format) {
Jyri Sarha75d7f272016-11-24 23:25:08 +0200390 case DRM_FORMAT_BGR565:
391 case DRM_FORMAT_RGB565:
392 break;
393 case DRM_FORMAT_XBGR8888:
394 case DRM_FORMAT_XRGB8888:
395 reg |= LCDC_V2_TFT_24BPP_UNPACK;
396 /* fallthrough */
397 case DRM_FORMAT_BGR888:
398 case DRM_FORMAT_RGB888:
399 reg |= LCDC_V2_TFT_24BPP_MODE;
400 break;
401 default:
402 dev_err(dev->dev, "invalid pixel format\n");
403 return;
404 }
405 }
406 reg |= info->fdd < 12;
407 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
408
409 if (info->invert_pxl_clk)
410 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
411 else
412 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
413
414 if (info->sync_ctrl)
415 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
416 else
417 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
418
419 if (info->sync_edge)
420 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
421 else
422 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
423
424 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
425 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
426 else
427 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
428
429 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
430 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
431 else
432 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
433
434 if (info->raster_order)
435 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
436 else
437 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
438
439 tilcdc_crtc_set_clk(crtc);
440
441 tilcdc_crtc_load_palette(crtc);
442
443 set_scanout(crtc, fb);
444
Jyri Sarha75d7f272016-11-24 23:25:08 +0200445 crtc->hwmode = crtc->state->adjusted_mode;
Jyri Sarhace99f722017-10-12 12:19:46 +0300446
447 tilcdc_crtc->hvtotal_us =
448 tilcdc_mode_hvtotal(&crtc->hwmode);
Jyri Sarha75d7f272016-11-24 23:25:08 +0200449}
450
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300451static void tilcdc_crtc_enable(struct drm_crtc *crtc)
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300452{
453 struct drm_device *dev = crtc->dev;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300454 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200455 unsigned long flags;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300456
Jyri Sarha2d53a182016-10-25 12:27:31 +0300457 mutex_lock(&tilcdc_crtc->enable_lock);
458 if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
459 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300460 return;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300461 }
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300462
463 pm_runtime_get_sync(dev->dev);
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300464
465 reset(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600466
Jyri Sarha75d7f272016-11-24 23:25:08 +0200467 tilcdc_crtc_set_mode(crtc);
468
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300469 tilcdc_crtc_enable_irqs(dev);
470
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +0300471 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
Jyri Sarhaf13e0882016-11-19 18:00:32 +0200472 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
473 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
474 LCDC_PALETTE_LOAD_MODE_MASK);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200475
476 /* There is no real chance for a race here as the time stamp
477 * is taken before the raster DMA is started. The spin-lock is
478 * taken to have a memory barrier after taking the time-stamp
479 * and to avoid a context switch between taking the stamp and
480 * enabling the raster.
481 */
482 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
483 tilcdc_crtc->last_vblank = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600484 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha11abbc92017-03-01 10:30:28 +0200485 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300486
487 drm_crtc_vblank_on(crtc);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300488
489 tilcdc_crtc->enabled = true;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300490 mutex_unlock(&tilcdc_crtc->enable_lock);
Rob Clark16ea9752013-01-08 15:04:28 -0600491}
492
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300493static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
494 struct drm_crtc_state *old_state)
495{
496 tilcdc_crtc_enable(crtc);
497}
498
Jyri Sarha2d53a182016-10-25 12:27:31 +0300499static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
Rob Clark16ea9752013-01-08 15:04:28 -0600500{
Jyri Sarha2d5be882016-04-07 20:20:23 +0300501 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600502 struct drm_device *dev = crtc->dev;
Jyri Sarha75d7f272016-11-24 23:25:08 +0200503 int ret;
Rob Clark16ea9752013-01-08 15:04:28 -0600504
Jyri Sarha2d53a182016-10-25 12:27:31 +0300505 mutex_lock(&tilcdc_crtc->enable_lock);
506 if (shutdown)
507 tilcdc_crtc->shutdown = true;
508 if (!tilcdc_crtc->enabled) {
509 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300510 return;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300511 }
Jyri Sarha2d5be882016-04-07 20:20:23 +0300512 tilcdc_crtc->frame_done = false;
Rob Clark16ea9752013-01-08 15:04:28 -0600513 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha2d5be882016-04-07 20:20:23 +0300514
515 /*
Jyri Sarha75d7f272016-11-24 23:25:08 +0200516 * Wait for framedone irq which will still come before putting
517 * things to sleep..
Jyri Sarha2d5be882016-04-07 20:20:23 +0300518 */
Jyri Sarha75d7f272016-11-24 23:25:08 +0200519 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
520 tilcdc_crtc->frame_done,
521 msecs_to_jiffies(500));
522 if (ret == 0)
523 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
524 __func__);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300525
526 drm_crtc_vblank_off(crtc);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300527
528 tilcdc_crtc_disable_irqs(dev);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300529
530 pm_runtime_put_sync(dev->dev);
531
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300532 tilcdc_crtc->enabled = false;
Jyri Sarha2d53a182016-10-25 12:27:31 +0300533 mutex_unlock(&tilcdc_crtc->enable_lock);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300534}
535
Jyri Sarha9e79e062016-10-18 23:23:27 +0300536static void tilcdc_crtc_disable(struct drm_crtc *crtc)
537{
Jyri Sarha2d53a182016-10-25 12:27:31 +0300538 tilcdc_crtc_off(crtc, false);
539}
540
Laurent Pinchart64581712017-06-30 12:36:45 +0300541static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
542 struct drm_crtc_state *old_state)
543{
544 tilcdc_crtc_disable(crtc);
545}
546
Jyri Sarha2d53a182016-10-25 12:27:31 +0300547void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
548{
549 tilcdc_crtc_off(crtc, true);
Jyri Sarha9e79e062016-10-18 23:23:27 +0300550}
551
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300552static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
553{
554 return crtc->state && crtc->state->enable && crtc->state->active;
Rob Clark16ea9752013-01-08 15:04:28 -0600555}
556
Jyri Sarha13b3d722016-04-06 14:02:38 +0300557static void tilcdc_crtc_recover_work(struct work_struct *work)
558{
559 struct tilcdc_crtc *tilcdc_crtc =
560 container_of(work, struct tilcdc_crtc, recover_work);
561 struct drm_crtc *crtc = &tilcdc_crtc->base;
562
563 dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
564
Daniel Vetter33e5b662017-03-22 22:50:47 +0100565 drm_modeset_lock(&crtc->mutex, NULL);
Jyri Sarha13b3d722016-04-06 14:02:38 +0300566
567 if (!tilcdc_crtc_is_on(crtc))
568 goto out;
569
570 tilcdc_crtc_disable(crtc);
571 tilcdc_crtc_enable(crtc);
572out:
Daniel Vetter33e5b662017-03-22 22:50:47 +0100573 drm_modeset_unlock(&crtc->mutex);
Jyri Sarha13b3d722016-04-06 14:02:38 +0300574}
575
Rob Clark16ea9752013-01-08 15:04:28 -0600576static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
577{
Jyri Sarha4e910c72016-09-06 22:55:33 +0300578 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600579
Jyri Sarhaba3fd952017-05-29 22:09:44 +0300580 tilcdc_crtc_shutdown(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600581
Jyri Sarha4e910c72016-09-06 22:55:33 +0300582 flush_workqueue(priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600583
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300584 of_node_put(crtc->port);
Rob Clark16ea9752013-01-08 15:04:28 -0600585 drm_crtc_cleanup(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600586}
587
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300588int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
Rob Clark16ea9752013-01-08 15:04:28 -0600589 struct drm_framebuffer *fb,
Jyri Sarhae0e344e2016-06-22 17:21:06 +0300590 struct drm_pending_vblank_event *event)
Rob Clark16ea9752013-01-08 15:04:28 -0600591{
592 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
593 struct drm_device *dev = crtc->dev;
Tomi Valkeinen6f206e92014-02-07 17:37:07 +0000594
Rob Clark16ea9752013-01-08 15:04:28 -0600595 if (tilcdc_crtc->event) {
596 dev_err(dev->dev, "already pending page flip!\n");
597 return -EBUSY;
598 }
599
Jyri Sarha11abbc92017-03-01 10:30:28 +0200600 tilcdc_crtc->event = event;
Tomi Valkeinen65734a22015-10-19 12:30:03 +0300601
Jyri Sarha11abbc92017-03-01 10:30:28 +0200602 mutex_lock(&tilcdc_crtc->enable_lock);
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +0300603
Jyri Sarha11abbc92017-03-01 10:30:28 +0200604 if (tilcdc_crtc->enabled) {
605 unsigned long flags;
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300606 ktime_t next_vblank;
607 s64 tdiff;
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +0300608
Jyri Sarha11abbc92017-03-01 10:30:28 +0200609 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200610
Jyri Sarha11abbc92017-03-01 10:30:28 +0200611 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
Jyri Sarhace99f722017-10-12 12:19:46 +0300612 tilcdc_crtc->hvtotal_us);
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300613 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
614
615 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
616 tilcdc_crtc->next_fb = fb;
Jyri Sarha11abbc92017-03-01 10:30:28 +0200617 else
618 set_scanout(crtc, fb);
619
620 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300621 }
622
Jyri Sarha11abbc92017-03-01 10:30:28 +0200623 mutex_unlock(&tilcdc_crtc->enable_lock);
Rob Clark16ea9752013-01-08 15:04:28 -0600624
625 return 0;
626}
627
Rob Clark16ea9752013-01-08 15:04:28 -0600628static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
629 const struct drm_display_mode *mode,
630 struct drm_display_mode *adjusted_mode)
631{
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200632 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
633
634 if (!tilcdc_crtc->simulate_vesa_sync)
635 return true;
636
637 /*
638 * tilcdc does not generate VESA-compliant sync but aligns
639 * VS on the second edge of HS instead of first edge.
640 * We use adjusted_mode, to fixup sync by aligning both rising
641 * edges and add HSKEW offset to fix the sync.
642 */
643 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
644 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
645
646 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
647 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
648 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
649 } else {
650 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
651 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
652 }
653
Rob Clark16ea9752013-01-08 15:04:28 -0600654 return true;
655}
656
Jyri Sarhadb380c52016-04-07 15:10:23 +0300657static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
658 struct drm_crtc_state *state)
659{
660 struct drm_display_mode *mode = &state->mode;
661 int ret;
662
663 /* If we are not active we don't care */
664 if (!state->active)
665 return 0;
666
667 if (state->state->planes[0].ptr != crtc->primary ||
668 state->state->planes[0].state == NULL ||
669 state->state->planes[0].state->crtc != crtc) {
670 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
671 return -EINVAL;
672 }
673
674 ret = tilcdc_crtc_mode_valid(crtc, mode);
675 if (ret) {
676 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
677 return -EINVAL;
678 }
679
680 return 0;
681}
682
Shawn Guo55cbc4d2017-02-07 17:16:33 +0800683static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
684{
685 return 0;
686}
687
688static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
689{
690}
691
Jyri Sarha46a956a2017-05-26 13:20:17 +0300692static void tilcdc_crtc_reset(struct drm_crtc *crtc)
693{
694 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
695 struct drm_device *dev = crtc->dev;
696 int ret;
697
698 drm_atomic_helper_crtc_reset(crtc);
699
700 /* Turn the raster off if it for some reason is on. */
701 pm_runtime_get_sync(dev->dev);
702 if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
703 /* Enable DMA Frame Done Interrupt */
704 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
705 tilcdc_clear_irqstatus(dev, 0xffffffff);
706
707 tilcdc_crtc->frame_done = false;
708 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
709
710 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
711 tilcdc_crtc->frame_done,
712 msecs_to_jiffies(500));
713 if (ret == 0)
714 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
715 __func__);
716 }
717 pm_runtime_put_sync(dev->dev);
718}
719
Rob Clark16ea9752013-01-08 15:04:28 -0600720static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
Jyri Sarha305198d2016-04-07 15:05:16 +0300721 .destroy = tilcdc_crtc_destroy,
722 .set_config = drm_atomic_helper_set_config,
723 .page_flip = drm_atomic_helper_page_flip,
Jyri Sarha46a956a2017-05-26 13:20:17 +0300724 .reset = tilcdc_crtc_reset,
Jyri Sarha305198d2016-04-07 15:05:16 +0300725 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
726 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
Shawn Guo55cbc4d2017-02-07 17:16:33 +0800727 .enable_vblank = tilcdc_crtc_enable_vblank,
728 .disable_vblank = tilcdc_crtc_disable_vblank,
Rob Clark16ea9752013-01-08 15:04:28 -0600729};
730
731static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
Rob Clark16ea9752013-01-08 15:04:28 -0600732 .mode_fixup = tilcdc_crtc_mode_fixup,
Jyri Sarhadb380c52016-04-07 15:10:23 +0300733 .atomic_check = tilcdc_crtc_atomic_check,
Laurent Pinchart0b20a0f2017-06-30 12:36:44 +0300734 .atomic_enable = tilcdc_crtc_atomic_enable,
Laurent Pinchart64581712017-06-30 12:36:45 +0300735 .atomic_disable = tilcdc_crtc_atomic_disable,
Rob Clark16ea9752013-01-08 15:04:28 -0600736};
737
738int tilcdc_crtc_max_width(struct drm_crtc *crtc)
739{
740 struct drm_device *dev = crtc->dev;
741 struct tilcdc_drm_private *priv = dev->dev_private;
742 int max_width = 0;
743
744 if (priv->rev == 1)
745 max_width = 1024;
746 else if (priv->rev == 2)
747 max_width = 2048;
748
749 return max_width;
750}
751
752int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
753{
754 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
755 unsigned int bandwidth;
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500756 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
Rob Clark16ea9752013-01-08 15:04:28 -0600757
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500758 /*
759 * check to see if the width is within the range that
760 * the LCD Controller physically supports
761 */
Rob Clark16ea9752013-01-08 15:04:28 -0600762 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
763 return MODE_VIRTUAL_X;
764
765 /* width must be multiple of 16 */
766 if (mode->hdisplay & 0xf)
767 return MODE_VIRTUAL_X;
768
769 if (mode->vdisplay > 2048)
770 return MODE_VIRTUAL_Y;
771
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500772 DBG("Processing mode %dx%d@%d with pixel clock %d",
773 mode->hdisplay, mode->vdisplay,
774 drm_mode_vrefresh(mode), mode->clock);
775
776 hbp = mode->htotal - mode->hsync_end;
777 hfp = mode->hsync_start - mode->hdisplay;
778 hsw = mode->hsync_end - mode->hsync_start;
779 vbp = mode->vtotal - mode->vsync_end;
780 vfp = mode->vsync_start - mode->vdisplay;
781 vsw = mode->vsync_end - mode->vsync_start;
782
783 if ((hbp-1) & ~0x3ff) {
784 DBG("Pruning mode: Horizontal Back Porch out of range");
785 return MODE_HBLANK_WIDE;
786 }
787
788 if ((hfp-1) & ~0x3ff) {
789 DBG("Pruning mode: Horizontal Front Porch out of range");
790 return MODE_HBLANK_WIDE;
791 }
792
793 if ((hsw-1) & ~0x3ff) {
794 DBG("Pruning mode: Horizontal Sync Width out of range");
795 return MODE_HSYNC_WIDE;
796 }
797
798 if (vbp & ~0xff) {
799 DBG("Pruning mode: Vertical Back Porch out of range");
800 return MODE_VBLANK_WIDE;
801 }
802
803 if (vfp & ~0xff) {
804 DBG("Pruning mode: Vertical Front Porch out of range");
805 return MODE_VBLANK_WIDE;
806 }
807
808 if ((vsw-1) & ~0x3f) {
809 DBG("Pruning mode: Vertical Sync Width out of range");
810 return MODE_VSYNC_WIDE;
811 }
812
Darren Etheridge4e564342013-06-21 13:52:23 -0500813 /*
814 * some devices have a maximum allowed pixel clock
815 * configured from the DT
816 */
817 if (mode->clock > priv->max_pixelclock) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500818 DBG("Pruning mode: pixel clock too high");
Darren Etheridge4e564342013-06-21 13:52:23 -0500819 return MODE_CLOCK_HIGH;
820 }
821
822 /*
823 * some devices further limit the max horizontal resolution
824 * configured from the DT
825 */
826 if (mode->hdisplay > priv->max_width)
827 return MODE_BAD_WIDTH;
828
Rob Clark16ea9752013-01-08 15:04:28 -0600829 /* filter out modes that would require too much memory bandwidth: */
Darren Etheridge4e564342013-06-21 13:52:23 -0500830 bandwidth = mode->hdisplay * mode->vdisplay *
831 drm_mode_vrefresh(mode);
832 if (bandwidth > priv->max_bandwidth) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500833 DBG("Pruning mode: exceeds defined bandwidth limit");
Rob Clark16ea9752013-01-08 15:04:28 -0600834 return MODE_BAD;
Darren Etheridge4e564342013-06-21 13:52:23 -0500835 }
Rob Clark16ea9752013-01-08 15:04:28 -0600836
837 return MODE_OK;
838}
839
840void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
841 const struct tilcdc_panel_info *info)
842{
843 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
844 tilcdc_crtc->info = info;
845}
846
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200847void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
848 bool simulate_vesa_sync)
849{
850 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
851
852 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
853}
854
Rob Clark16ea9752013-01-08 15:04:28 -0600855void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
856{
Rob Clark16ea9752013-01-08 15:04:28 -0600857 struct drm_device *dev = crtc->dev;
858 struct tilcdc_drm_private *priv = dev->dev_private;
Jyri Sarha642e5162016-09-06 16:19:54 +0300859 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600860
Daniel Vetter33e5b662017-03-22 22:50:47 +0100861 drm_modeset_lock(&crtc->mutex, NULL);
Jyri Sarha642e5162016-09-06 16:19:54 +0300862 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
863 if (tilcdc_crtc_is_on(crtc)) {
864 pm_runtime_get_sync(dev->dev);
865 tilcdc_crtc_disable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600866
Jyri Sarha642e5162016-09-06 16:19:54 +0300867 tilcdc_crtc_set_clk(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600868
Jyri Sarha642e5162016-09-06 16:19:54 +0300869 tilcdc_crtc_enable(crtc);
870 pm_runtime_put_sync(dev->dev);
871 }
Rob Clark16ea9752013-01-08 15:04:28 -0600872 }
Daniel Vetter33e5b662017-03-22 22:50:47 +0100873 drm_modeset_unlock(&crtc->mutex);
Rob Clark16ea9752013-01-08 15:04:28 -0600874}
875
Jyri Sarha5895d082016-01-08 14:33:09 +0200876#define SYNC_LOST_COUNT_LIMIT 50
877
Rob Clark16ea9752013-01-08 15:04:28 -0600878irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
879{
880 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
881 struct drm_device *dev = crtc->dev;
882 struct tilcdc_drm_private *priv = dev->dev_private;
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100883 uint32_t stat, reg;
Rob Clark16ea9752013-01-08 15:04:28 -0600884
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300885 stat = tilcdc_read_irqstatus(dev);
886 tilcdc_clear_irqstatus(dev, stat);
887
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +0300888 if (stat & LCDC_END_OF_FRAME0) {
Rob Clark16ea9752013-01-08 15:04:28 -0600889 unsigned long flags;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200890 bool skip_event = false;
891 ktime_t now;
892
893 now = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600894
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200895 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600896
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200897 tilcdc_crtc->last_vblank = now;
Rob Clark16ea9752013-01-08 15:04:28 -0600898
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200899 if (tilcdc_crtc->next_fb) {
900 set_scanout(crtc, tilcdc_crtc->next_fb);
901 tilcdc_crtc->next_fb = NULL;
902 skip_event = true;
Tomi Valkeinen2b2080d2015-10-20 09:37:27 +0300903 }
904
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200905 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
906
Gustavo Padovan099ede82016-07-04 21:04:52 -0300907 drm_crtc_handle_vblank(crtc);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200908
909 if (!skip_event) {
910 struct drm_pending_vblank_event *event;
911
912 spin_lock_irqsave(&dev->event_lock, flags);
913
914 event = tilcdc_crtc->event;
915 tilcdc_crtc->event = NULL;
916 if (event)
Gustavo Padovandfebc152016-04-14 10:48:22 -0700917 drm_crtc_send_vblank_event(crtc, event);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200918
919 spin_unlock_irqrestore(&dev->event_lock, flags);
920 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200921
922 if (tilcdc_crtc->frame_intact)
923 tilcdc_crtc->sync_lost_count = 0;
924 else
925 tilcdc_crtc->frame_intact = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600926 }
927
Jyri Sarha14944112016-04-07 20:36:48 +0300928 if (stat & LCDC_FIFO_UNDERFLOW)
Daniel Schultzd7014532016-10-28 13:52:42 +0200929 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
Jyri Sarha14944112016-04-07 20:36:48 +0300930 __func__, stat);
931
Jyri Sarha55e165c2016-11-15 23:37:24 +0200932 if (stat & LCDC_PL_LOAD_DONE) {
933 complete(&tilcdc_crtc->palette_loaded);
934 if (priv->rev == 1)
935 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
936 LCDC_V1_PL_INT_ENA);
937 else
938 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
939 LCDC_V2_PL_INT_ENA);
Bartosz Golaszewski93452352016-10-31 15:19:26 +0100940 }
941
Jyri Sarhacba88442016-11-16 00:12:27 +0200942 if (stat & LCDC_SYNC_LOST) {
943 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
944 __func__, stat);
945 tilcdc_crtc->frame_intact = false;
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100946 if (priv->rev == 1) {
947 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
948 if (reg & LCDC_RASTER_ENABLE) {
Jyri Sarhacba88442016-11-16 00:12:27 +0200949 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100950 LCDC_RASTER_ENABLE);
951 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
952 LCDC_RASTER_ENABLE);
953 }
954 } else {
955 if (tilcdc_crtc->sync_lost_count++ >
956 SYNC_LOST_COUNT_LIMIT) {
957 dev_err(dev->dev,
958 "%s(0x%08x): Sync lost flood detected, recovering",
959 __func__, stat);
960 queue_work(system_wq,
961 &tilcdc_crtc->recover_work);
Jyri Sarhacba88442016-11-16 00:12:27 +0200962 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
963 LCDC_SYNC_LOST);
Bartosz Golaszewskif97fd382016-12-19 15:47:14 +0100964 tilcdc_crtc->sync_lost_count = 0;
965 }
Jyri Sarhacba88442016-11-16 00:12:27 +0200966 }
967 }
968
Jyri Sarha36725832016-11-21 18:30:19 +0200969 if (stat & LCDC_FRAME_DONE) {
970 tilcdc_crtc->frame_done = true;
971 wake_up(&tilcdc_crtc->frame_done_wq);
972 /* rev 1 lcdc appears to hang if irq is not disbaled here */
973 if (priv->rev == 1)
974 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
975 LCDC_V1_FRAME_DONE_INT_ENA);
976 }
977
Jyri Sarha14944112016-04-07 20:36:48 +0300978 /* For revision 2 only */
Rob Clark16ea9752013-01-08 15:04:28 -0600979 if (priv->rev == 2) {
Jyri Sarha14944112016-04-07 20:36:48 +0300980 /* Indicate to LCDC that the interrupt service routine has
981 * completed, see 13.3.6.1.6 in AM335x TRM.
982 */
983 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
984 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +0200985
Rob Clark16ea9752013-01-08 15:04:28 -0600986 return IRQ_HANDLED;
987}
988
Jyri Sarha9963d362016-11-15 22:56:46 +0200989int tilcdc_crtc_create(struct drm_device *dev)
Rob Clark16ea9752013-01-08 15:04:28 -0600990{
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300991 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600992 struct tilcdc_crtc *tilcdc_crtc;
993 struct drm_crtc *crtc;
994 int ret;
995
Jyri Sarhad0ec32c2016-02-23 12:44:27 +0200996 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
Markus Elfring3366ba32018-02-06 21:51:15 +0100997 if (!tilcdc_crtc)
Jyri Sarha9963d362016-11-15 22:56:46 +0200998 return -ENOMEM;
Rob Clark16ea9752013-01-08 15:04:28 -0600999
Jyri Sarha55e165c2016-11-15 23:37:24 +02001000 init_completion(&tilcdc_crtc->palette_loaded);
1001 tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
1002 TILCDC_PALETTE_SIZE,
Bartosz Golaszewski93452352016-10-31 15:19:26 +01001003 &tilcdc_crtc->palette_dma_handle,
1004 GFP_KERNEL | __GFP_ZERO);
Jyri Sarha55e165c2016-11-15 23:37:24 +02001005 if (!tilcdc_crtc->palette_base)
1006 return -ENOMEM;
1007 *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
Bartosz Golaszewski93452352016-10-31 15:19:26 +01001008
Rob Clark16ea9752013-01-08 15:04:28 -06001009 crtc = &tilcdc_crtc->base;
1010
Jyri Sarha47f571c2016-04-07 15:04:18 +03001011 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
1012 if (ret < 0)
1013 goto fail;
1014
Jyri Sarha2d53a182016-10-25 12:27:31 +03001015 mutex_init(&tilcdc_crtc->enable_lock);
1016
Rob Clark16ea9752013-01-08 15:04:28 -06001017 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
1018
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +02001019 spin_lock_init(&tilcdc_crtc->irq_lock);
Jyri Sarha13b3d722016-04-06 14:02:38 +03001020 INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +02001021
Jyri Sarha47f571c2016-04-07 15:04:18 +03001022 ret = drm_crtc_init_with_planes(dev, crtc,
1023 &tilcdc_crtc->primary,
1024 NULL,
1025 &tilcdc_crtc_funcs,
1026 "tilcdc crtc");
Rob Clark16ea9752013-01-08 15:04:28 -06001027 if (ret < 0)
1028 goto fail;
1029
1030 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
1031
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001032 if (priv->is_componentized) {
Rob Herring86418f92017-03-22 08:26:06 -05001033 crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001034 if (!crtc->port) { /* This should never happen */
Rob Herring4bf99142017-07-18 16:43:04 -05001035 dev_err(dev->dev, "Port node not found in %pOF\n",
1036 dev->dev->of_node);
Jyri Sarha9963d362016-11-15 22:56:46 +02001037 ret = -EINVAL;
Jyri Sarhad66284fb2015-05-27 11:58:37 +03001038 goto fail;
1039 }
1040 }
1041
Jyri Sarha9963d362016-11-15 22:56:46 +02001042 priv->crtc = crtc;
1043 return 0;
Rob Clark16ea9752013-01-08 15:04:28 -06001044
1045fail:
1046 tilcdc_crtc_destroy(crtc);
Jyri Sarhaabf83152017-01-31 16:18:42 +02001047 return ret;
Rob Clark16ea9752013-01-08 15:04:28 -06001048}