blob: 0451d699f5151161577dafe246cf5d7981c7577a [file] [log] [blame]
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <video/imx-ipu-image-convert.h>
20#include "ipu-prv.h"
21
22/*
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
25 * (lines).
26 *
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
30 *
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
38 *
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
41 *
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
51 */
52
53#define MAX_STRIPES_W 4
54#define MAX_STRIPES_H 4
55#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
56
57#define MIN_W 16
58#define MIN_H 8
59#define MAX_W 4096
60#define MAX_H 4096
61
62enum ipu_image_convert_type {
63 IMAGE_CONVERT_IN = 0,
64 IMAGE_CONVERT_OUT,
65};
66
67struct ipu_image_convert_dma_buf {
68 void *virt;
69 dma_addr_t phys;
70 unsigned long len;
71};
72
73struct ipu_image_convert_dma_chan {
74 int in;
75 int out;
76 int rot_in;
77 int rot_out;
78 int vdi_in_p;
79 int vdi_in;
80 int vdi_in_n;
81};
82
83/* dimensions of one tile */
84struct ipu_image_tile {
85 u32 width;
86 u32 height;
Philipp Zabel571dd822018-09-18 11:34:12 +020087 u32 left;
88 u32 top;
Steve Longerbeamcd98e852016-09-17 12:33:58 -070089 /* size and strides are in bytes */
90 u32 size;
91 u32 stride;
92 u32 rot_stride;
93 /* start Y or packed offset of this tile */
94 u32 offset;
95 /* offset from start to tile in U plane, for planar formats */
96 u32 u_off;
97 /* offset from start to tile in V plane, for planar formats */
98 u32 v_off;
99};
100
101struct ipu_image_convert_image {
102 struct ipu_image base;
103 enum ipu_image_convert_type type;
104
105 const struct ipu_image_pixfmt *fmt;
106 unsigned int stride;
107
108 /* # of rows (horizontal stripes) if dest height is > 1024 */
109 unsigned int num_rows;
110 /* # of columns (vertical stripes) if dest width is > 1024 */
111 unsigned int num_cols;
112
113 struct ipu_image_tile tile[MAX_TILES];
114};
115
116struct ipu_image_pixfmt {
117 u32 fourcc; /* V4L2 fourcc */
118 int bpp; /* total bpp */
119 int uv_width_dec; /* decimation in width for U/V planes */
120 int uv_height_dec; /* decimation in height for U/V planes */
121 bool planar; /* planar format */
122 bool uv_swapped; /* U and V planes are swapped */
123 bool uv_packed; /* partial planar (U and V in same plane) */
124};
125
126struct ipu_image_convert_ctx;
127struct ipu_image_convert_chan;
128struct ipu_image_convert_priv;
129
130struct ipu_image_convert_ctx {
131 struct ipu_image_convert_chan *chan;
132
133 ipu_image_convert_cb_t complete;
134 void *complete_context;
135
136 /* Source/destination image data and rotation mode */
137 struct ipu_image_convert_image in;
138 struct ipu_image_convert_image out;
139 enum ipu_rotate_mode rot_mode;
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200140 u32 downsize_coeff_h;
141 u32 downsize_coeff_v;
142 u32 image_resize_coeff_h;
143 u32 image_resize_coeff_v;
144 u32 resize_coeffs_h[MAX_STRIPES_W];
145 u32 resize_coeffs_v[MAX_STRIPES_H];
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700146
147 /* intermediate buffer for rotation */
148 struct ipu_image_convert_dma_buf rot_intermediate[2];
149
150 /* current buffer number for double buffering */
151 int cur_buf_num;
152
153 bool aborting;
154 struct completion aborted;
155
156 /* can we use double-buffering for this conversion operation? */
157 bool double_buffering;
158 /* num_rows * num_cols */
159 unsigned int num_tiles;
160 /* next tile to process */
161 unsigned int next_tile;
162 /* where to place converted tile in dest image */
163 unsigned int out_tile_map[MAX_TILES];
164
165 struct list_head list;
166};
167
168struct ipu_image_convert_chan {
169 struct ipu_image_convert_priv *priv;
170
171 enum ipu_ic_task ic_task;
172 const struct ipu_image_convert_dma_chan *dma_ch;
173
174 struct ipu_ic *ic;
175 struct ipuv3_channel *in_chan;
176 struct ipuv3_channel *out_chan;
177 struct ipuv3_channel *rotation_in_chan;
178 struct ipuv3_channel *rotation_out_chan;
179
180 /* the IPU end-of-frame irqs */
181 int out_eof_irq;
182 int rot_out_eof_irq;
183
184 spinlock_t irqlock;
185
186 /* list of convert contexts */
187 struct list_head ctx_list;
188 /* queue of conversion runs */
189 struct list_head pending_q;
190 /* queue of completed runs */
191 struct list_head done_q;
192
193 /* the current conversion run */
194 struct ipu_image_convert_run *current_run;
195};
196
197struct ipu_image_convert_priv {
198 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
199 struct ipu_soc *ipu;
200};
201
202static const struct ipu_image_convert_dma_chan
203image_convert_dma_chan[IC_NUM_TASKS] = {
204 [IC_TASK_VIEWFINDER] = {
205 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
206 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
207 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
208 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
209 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
210 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
211 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
212 },
213 [IC_TASK_POST_PROCESSOR] = {
214 .in = IPUV3_CHANNEL_MEM_IC_PP,
215 .out = IPUV3_CHANNEL_IC_PP_MEM,
216 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
217 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
218 },
219};
220
221static const struct ipu_image_pixfmt image_convert_formats[] = {
222 {
223 .fourcc = V4L2_PIX_FMT_RGB565,
224 .bpp = 16,
225 }, {
226 .fourcc = V4L2_PIX_FMT_RGB24,
227 .bpp = 24,
228 }, {
229 .fourcc = V4L2_PIX_FMT_BGR24,
230 .bpp = 24,
231 }, {
232 .fourcc = V4L2_PIX_FMT_RGB32,
233 .bpp = 32,
234 }, {
235 .fourcc = V4L2_PIX_FMT_BGR32,
236 .bpp = 32,
237 }, {
Philipp Zabel5c41bb62018-08-02 10:40:33 +0200238 .fourcc = V4L2_PIX_FMT_XRGB32,
239 .bpp = 32,
240 }, {
241 .fourcc = V4L2_PIX_FMT_XBGR32,
242 .bpp = 32,
243 }, {
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700244 .fourcc = V4L2_PIX_FMT_YUYV,
245 .bpp = 16,
246 .uv_width_dec = 2,
247 .uv_height_dec = 1,
248 }, {
249 .fourcc = V4L2_PIX_FMT_UYVY,
250 .bpp = 16,
251 .uv_width_dec = 2,
252 .uv_height_dec = 1,
253 }, {
254 .fourcc = V4L2_PIX_FMT_YUV420,
255 .bpp = 12,
256 .planar = true,
257 .uv_width_dec = 2,
258 .uv_height_dec = 2,
259 }, {
260 .fourcc = V4L2_PIX_FMT_YVU420,
261 .bpp = 12,
262 .planar = true,
263 .uv_width_dec = 2,
264 .uv_height_dec = 2,
265 .uv_swapped = true,
266 }, {
267 .fourcc = V4L2_PIX_FMT_NV12,
268 .bpp = 12,
269 .planar = true,
270 .uv_width_dec = 2,
271 .uv_height_dec = 2,
272 .uv_packed = true,
273 }, {
274 .fourcc = V4L2_PIX_FMT_YUV422P,
275 .bpp = 16,
276 .planar = true,
277 .uv_width_dec = 2,
278 .uv_height_dec = 1,
279 }, {
280 .fourcc = V4L2_PIX_FMT_NV16,
281 .bpp = 16,
282 .planar = true,
283 .uv_width_dec = 2,
284 .uv_height_dec = 1,
285 .uv_packed = true,
286 },
287};
288
289static const struct ipu_image_pixfmt *get_format(u32 fourcc)
290{
291 const struct ipu_image_pixfmt *ret = NULL;
292 unsigned int i;
293
294 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
295 if (image_convert_formats[i].fourcc == fourcc) {
296 ret = &image_convert_formats[i];
297 break;
298 }
299 }
300
301 return ret;
302}
303
304static void dump_format(struct ipu_image_convert_ctx *ctx,
305 struct ipu_image_convert_image *ic_image)
306{
307 struct ipu_image_convert_chan *chan = ctx->chan;
308 struct ipu_image_convert_priv *priv = chan->priv;
309
310 dev_dbg(priv->ipu->dev,
Philipp Zabela3f42412018-09-18 11:34:16 +0200311 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700312 chan->ic_task, ctx,
313 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
314 ic_image->base.pix.width, ic_image->base.pix.height,
315 ic_image->num_cols, ic_image->num_rows,
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700316 ic_image->fmt->fourcc & 0xff,
317 (ic_image->fmt->fourcc >> 8) & 0xff,
318 (ic_image->fmt->fourcc >> 16) & 0xff,
319 (ic_image->fmt->fourcc >> 24) & 0xff);
320}
321
322int ipu_image_convert_enum_format(int index, u32 *fourcc)
323{
324 const struct ipu_image_pixfmt *fmt;
325
326 if (index >= (int)ARRAY_SIZE(image_convert_formats))
327 return -EINVAL;
328
329 /* Format found */
330 fmt = &image_convert_formats[index];
331 *fourcc = fmt->fourcc;
332 return 0;
333}
334EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
335
336static void free_dma_buf(struct ipu_image_convert_priv *priv,
337 struct ipu_image_convert_dma_buf *buf)
338{
339 if (buf->virt)
340 dma_free_coherent(priv->ipu->dev,
341 buf->len, buf->virt, buf->phys);
342 buf->virt = NULL;
343 buf->phys = 0;
344}
345
346static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
347 struct ipu_image_convert_dma_buf *buf,
348 int size)
349{
350 buf->len = PAGE_ALIGN(size);
351 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
352 GFP_DMA | GFP_KERNEL);
353 if (!buf->virt) {
354 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
355 return -ENOMEM;
356 }
357
358 return 0;
359}
360
361static inline int num_stripes(int dim)
362{
363 if (dim <= 1024)
364 return 1;
365 else if (dim <= 2048)
366 return 2;
367 else
368 return 4;
369}
370
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200371/*
372 * Calculate downsizing coefficients, which are the same for all tiles,
373 * and bilinear resizing coefficients, which are used to find the best
374 * seam positions.
375 */
376static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
377 struct ipu_image *in,
378 struct ipu_image *out)
379{
380 u32 downsized_width = in->rect.width;
381 u32 downsized_height = in->rect.height;
382 u32 downsize_coeff_v = 0;
383 u32 downsize_coeff_h = 0;
384 u32 resized_width = out->rect.width;
385 u32 resized_height = out->rect.height;
386 u32 resize_coeff_h;
387 u32 resize_coeff_v;
388
389 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
390 resized_width = out->rect.height;
391 resized_height = out->rect.width;
392 }
393
394 /* Do not let invalid input lead to an endless loop below */
395 if (WARN_ON(resized_width == 0 || resized_height == 0))
396 return -EINVAL;
397
398 while (downsized_width >= resized_width * 2) {
399 downsized_width >>= 1;
400 downsize_coeff_h++;
401 }
402
403 while (downsized_height >= resized_height * 2) {
404 downsized_height >>= 1;
405 downsize_coeff_v++;
406 }
407
408 /*
409 * Calculate the bilinear resizing coefficients that could be used if
410 * we were converting with a single tile. The bottom right output pixel
411 * should sample as close as possible to the bottom right input pixel
412 * out of the decimator, but not overshoot it:
413 */
414 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
415 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
416
417 dev_dbg(ctx->chan->priv->ipu->dev,
418 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
419 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
420 resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
421
422 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
423 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
424 return -EINVAL;
425
426 ctx->downsize_coeff_h = downsize_coeff_h;
427 ctx->downsize_coeff_v = downsize_coeff_v;
428 ctx->image_resize_coeff_h = resize_coeff_h;
429 ctx->image_resize_coeff_v = resize_coeff_v;
430
431 return 0;
432}
433
Philipp Zabel64fbae52018-09-18 11:34:15 +0200434#define round_closest(x, y) round_down((x) + (y)/2, (y))
435
436/*
437 * Find the best aligned seam position in the inverval [out_start, out_end].
438 * Rotation and image offsets are out of scope.
439 *
440 * @out_start: start of inverval, must be within 1024 pixels / lines
441 * of out_end
442 * @out_end: end of interval, smaller than or equal to out_edge
443 * @in_edge: input right / bottom edge
444 * @out_edge: output right / bottom edge
445 * @in_align: input alignment, either horizontal 8-byte line start address
446 * alignment, or pixel alignment due to image format
447 * @out_align: output alignment, either horizontal 8-byte line start address
448 * alignment, or pixel alignment due to image format or rotator
449 * block size
450 * @in_burst: horizontal input burst size in case of horizontal flip
451 * @out_burst: horizontal output burst size or rotator block size
452 * @downsize_coeff: downsizing section coefficient
453 * @resize_coeff: main processing section resizing coefficient
454 * @_in_seam: aligned input seam position return value
455 * @_out_seam: aligned output seam position return value
456 */
457static void find_best_seam(struct ipu_image_convert_ctx *ctx,
458 unsigned int out_start,
459 unsigned int out_end,
460 unsigned int in_edge,
461 unsigned int out_edge,
462 unsigned int in_align,
463 unsigned int out_align,
464 unsigned int in_burst,
465 unsigned int out_burst,
466 unsigned int downsize_coeff,
467 unsigned int resize_coeff,
468 u32 *_in_seam,
469 u32 *_out_seam)
470{
471 struct device *dev = ctx->chan->priv->ipu->dev;
472 unsigned int out_pos;
473 /* Input / output seam position candidates */
474 unsigned int out_seam = 0;
475 unsigned int in_seam = 0;
476 unsigned int min_diff = UINT_MAX;
477
478 /*
479 * Output tiles must start at a multiple of 8 bytes horizontally and
480 * possibly at an even line horizontally depending on the pixel format.
481 * Only consider output aligned positions for the seam.
482 */
483 out_start = round_up(out_start, out_align);
484 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
485 unsigned int in_pos;
486 unsigned int in_pos_aligned;
487 unsigned int abs_diff;
488
489 /*
490 * Tiles in the right row / bottom column may not be allowed to
491 * overshoot horizontally / vertically. out_burst may be the
492 * actual DMA burst size, or the rotator block size.
493 */
494 if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
495 continue;
496
497 /*
498 * Input sample position, corresponding to out_pos, 19.13 fixed
499 * point.
500 */
501 in_pos = (out_pos * resize_coeff) << downsize_coeff;
502 /*
503 * The closest input sample position that we could actually
504 * start the input tile at, 19.13 fixed point.
505 */
506 in_pos_aligned = round_closest(in_pos, 8192U * in_align);
507
508 if ((in_burst > 1) &&
509 (in_edge - in_pos_aligned / 8192U) % in_burst)
510 continue;
511
512 if (in_pos < in_pos_aligned)
513 abs_diff = in_pos_aligned - in_pos;
514 else
515 abs_diff = in_pos - in_pos_aligned;
516
517 if (abs_diff < min_diff) {
518 in_seam = in_pos_aligned;
519 out_seam = out_pos;
520 min_diff = abs_diff;
521 }
522 }
523
524 *_out_seam = out_seam;
525 /* Convert 19.13 fixed point to integer seam position */
526 *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U);
527
528 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n",
529 __func__, out_seam, out_align, out_start, out_end,
530 *_in_seam, in_align, min_diff / 8192,
531 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
532}
533
534/*
535 * Tile left edges are required to be aligned to multiples of 8 bytes
536 * by the IDMAC.
537 */
538static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
539{
540 if (fmt->planar)
541 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
542 else
543 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
544}
545
546/*
547 * Tile top edge alignment is only limited by chroma subsampling.
548 */
549static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
550{
551 return fmt->uv_height_dec > 1 ? 2 : 1;
552}
553
Philipp Zabel76e77bf2018-09-18 11:34:14 +0200554/*
555 * We have to adjust the tile width such that the tile physaddrs and
556 * U and V plane offsets are multiples of 8 bytes as required by
557 * the IPU DMA Controller. For the planar formats, this corresponds
558 * to a pixel alignment of 16 (but use a more formal equation since
559 * the variables are available). For all the packed formats, 8 is
560 * good enough.
561 */
562static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
563{
564 return fmt->planar ? 8 * fmt->uv_width_dec : 8;
565}
566
567/*
568 * For tile height alignment, we have to ensure that the output tile
569 * heights are multiples of 8 lines if the IRT is required by the
570 * given rotation mode (the IRT performs rotations on 8x8 blocks
571 * at a time). If the IRT is not used, or for input image tiles,
572 * 2 lines are good enough.
573 */
574static inline u32 tile_height_align(enum ipu_image_convert_type type,
575 enum ipu_rotate_mode rot_mode)
576{
577 return (type == IMAGE_CONVERT_OUT &&
578 ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
579}
580
Philipp Zabel64fbae52018-09-18 11:34:15 +0200581/*
582 * Fill in left position and width and for all tiles in an input column, and
583 * for all corresponding output tiles. If the 90° rotator is used, the output
584 * tiles are in a row, and output tile top position and height are set.
585 */
586static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
587 unsigned int col,
588 struct ipu_image_convert_image *in,
589 unsigned int in_left, unsigned int in_width,
590 struct ipu_image_convert_image *out,
591 unsigned int out_left, unsigned int out_width)
592{
593 unsigned int row, tile_idx;
594 struct ipu_image_tile *in_tile, *out_tile;
595
596 for (row = 0; row < in->num_rows; row++) {
597 tile_idx = in->num_cols * row + col;
598 in_tile = &in->tile[tile_idx];
599 out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
600
601 in_tile->left = in_left;
602 in_tile->width = in_width;
603
604 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
605 out_tile->top = out_left;
606 out_tile->height = out_width;
607 } else {
608 out_tile->left = out_left;
609 out_tile->width = out_width;
610 }
611 }
612}
613
614/*
615 * Fill in top position and height and for all tiles in an input row, and
616 * for all corresponding output tiles. If the 90° rotator is used, the output
617 * tiles are in a column, and output tile left position and width are set.
618 */
619static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
620 struct ipu_image_convert_image *in,
621 unsigned int in_top, unsigned int in_height,
622 struct ipu_image_convert_image *out,
623 unsigned int out_top, unsigned int out_height)
624{
625 unsigned int col, tile_idx;
626 struct ipu_image_tile *in_tile, *out_tile;
627
628 for (col = 0; col < in->num_cols; col++) {
629 tile_idx = in->num_cols * row + col;
630 in_tile = &in->tile[tile_idx];
631 out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
632
633 in_tile->top = in_top;
634 in_tile->height = in_height;
635
636 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
637 out_tile->left = out_top;
638 out_tile->width = out_height;
639 } else {
640 out_tile->top = out_top;
641 out_tile->height = out_height;
642 }
643 }
644}
645
646/*
647 * Find the best horizontal and vertical seam positions to split into tiles.
648 * Minimize the fractional part of the input sampling position for the
649 * top / left pixels of each tile.
650 */
651static void find_seams(struct ipu_image_convert_ctx *ctx,
652 struct ipu_image_convert_image *in,
653 struct ipu_image_convert_image *out)
654{
655 struct device *dev = ctx->chan->priv->ipu->dev;
656 unsigned int resized_width = out->base.rect.width;
657 unsigned int resized_height = out->base.rect.height;
658 unsigned int col;
659 unsigned int row;
660 unsigned int in_left_align = tile_left_align(in->fmt);
661 unsigned int in_top_align = tile_top_align(in->fmt);
662 unsigned int out_left_align = tile_left_align(out->fmt);
663 unsigned int out_top_align = tile_top_align(out->fmt);
664 unsigned int out_width_align = tile_width_align(out->fmt);
665 unsigned int out_height_align = tile_height_align(out->type,
666 ctx->rot_mode);
667 unsigned int in_right = in->base.rect.width;
668 unsigned int in_bottom = in->base.rect.height;
669 unsigned int out_right = out->base.rect.width;
670 unsigned int out_bottom = out->base.rect.height;
671 unsigned int flipped_out_left;
672 unsigned int flipped_out_top;
673
674 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
675 /* Switch width/height and align top left to IRT block size */
676 resized_width = out->base.rect.height;
677 resized_height = out->base.rect.width;
678 out_left_align = out_height_align;
679 out_top_align = out_width_align;
680 out_width_align = out_left_align;
681 out_height_align = out_top_align;
682 out_right = out->base.rect.height;
683 out_bottom = out->base.rect.width;
684 }
685
686 for (col = in->num_cols - 1; col > 0; col--) {
687 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
688 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
689 bool allow_out_overshoot = (col < in->num_cols - 1) &&
690 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
691 unsigned int out_start;
692 unsigned int out_end;
693 unsigned int in_left;
694 unsigned int out_left;
695
696 /*
697 * Align input width to burst length if the scaling step flips
698 * horizontally.
699 */
700
701 /* Start within 1024 pixels of the right edge */
702 out_start = max_t(int, 0, out_right - 1024);
703 /* End before having to add more columns to the left */
704 out_end = min_t(unsigned int, out_right, col * 1024);
705
706 find_best_seam(ctx, out_start, out_end,
707 in_right, out_right,
708 in_left_align, out_left_align,
709 allow_in_overshoot ? 1 : 8 /* burst length */,
710 allow_out_overshoot ? 1 : out_width_align,
711 ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
712 &in_left, &out_left);
713
714 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
715 flipped_out_left = resized_width - out_right;
716 else
717 flipped_out_left = out_left;
718
719 fill_tile_column(ctx, col, in, in_left, in_right - in_left,
720 out, flipped_out_left, out_right - out_left);
721
722 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
723 in_left, in_right - in_left,
724 flipped_out_left, out_right - out_left);
725
726 in_right = in_left;
727 out_right = out_left;
728 }
729
730 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
731 resized_width - out_right : 0;
732
733 fill_tile_column(ctx, 0, in, 0, in_right,
734 out, flipped_out_left, out_right);
735
736 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
737 in_right, flipped_out_left, out_right);
738
739 for (row = in->num_rows - 1; row > 0; row--) {
740 bool allow_overshoot = row < in->num_rows - 1;
741 unsigned int out_start;
742 unsigned int out_end;
743 unsigned int in_top;
744 unsigned int out_top;
745
746 /* Start within 1024 lines of the bottom edge */
747 out_start = max_t(int, 0, out_bottom - 1024);
748 /* End before having to add more rows above */
749 out_end = min_t(unsigned int, out_bottom, row * 1024);
750
751 find_best_seam(ctx, out_start, out_end,
752 in_bottom, out_bottom,
753 in_top_align, out_top_align,
754 1, allow_overshoot ? 1 : out_height_align,
755 ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
756 &in_top, &out_top);
757
758 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
759 ipu_rot_mode_is_irt(ctx->rot_mode))
760 flipped_out_top = resized_height - out_bottom;
761 else
762 flipped_out_top = out_top;
763
764 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
765 out, flipped_out_top, out_bottom - out_top);
766
767 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
768 in_top, in_bottom - in_top,
769 flipped_out_top, out_bottom - out_top);
770
771 in_bottom = in_top;
772 out_bottom = out_top;
773 }
774
775 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
776 ipu_rot_mode_is_irt(ctx->rot_mode))
777 flipped_out_top = resized_height - out_bottom;
778 else
779 flipped_out_top = 0;
780
781 fill_tile_row(ctx, 0, in, 0, in_bottom,
782 out, flipped_out_top, out_bottom);
783
784 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
785 in_bottom, flipped_out_top, out_bottom);
786}
787
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700788static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
789 struct ipu_image_convert_image *image)
790{
Philipp Zabela3f42412018-09-18 11:34:16 +0200791 struct ipu_image_convert_chan *chan = ctx->chan;
792 struct ipu_image_convert_priv *priv = chan->priv;
Philipp Zabel571dd822018-09-18 11:34:12 +0200793 unsigned int i;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700794
795 for (i = 0; i < ctx->num_tiles; i++) {
Philipp Zabel64fbae52018-09-18 11:34:15 +0200796 struct ipu_image_tile *tile;
Philipp Zabel571dd822018-09-18 11:34:12 +0200797 const unsigned int row = i / image->num_cols;
798 const unsigned int col = i % image->num_cols;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700799
Philipp Zabel64fbae52018-09-18 11:34:15 +0200800 if (image->type == IMAGE_CONVERT_OUT)
801 tile = &image->tile[ctx->out_tile_map[i]];
802 else
803 tile = &image->tile[i];
804
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700805 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
806 tile->width;
807
808 if (image->fmt->planar) {
809 tile->stride = tile->width;
810 tile->rot_stride = tile->height;
811 } else {
812 tile->stride =
813 (image->fmt->bpp * tile->width) >> 3;
814 tile->rot_stride =
815 (image->fmt->bpp * tile->height) >> 3;
816 }
Philipp Zabela3f42412018-09-18 11:34:16 +0200817
818 dev_dbg(priv->ipu->dev,
819 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
820 chan->ic_task, ctx,
821 image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
822 row, col,
823 tile->width, tile->height, tile->left, tile->top);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700824 }
825}
826
827/*
828 * Use the rotation transformation to find the tile coordinates
829 * (row, col) of a tile in the destination frame that corresponds
830 * to the given tile coordinates of a source frame. The destination
831 * coordinate is then converted to a tile index.
832 */
833static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
834 int src_row, int src_col)
835{
836 struct ipu_image_convert_chan *chan = ctx->chan;
837 struct ipu_image_convert_priv *priv = chan->priv;
838 struct ipu_image_convert_image *s_image = &ctx->in;
839 struct ipu_image_convert_image *d_image = &ctx->out;
840 int dst_row, dst_col;
841
842 /* with no rotation it's a 1:1 mapping */
843 if (ctx->rot_mode == IPU_ROTATE_NONE)
844 return src_row * s_image->num_cols + src_col;
845
846 /*
847 * before doing the transform, first we have to translate
848 * source row,col for an origin in the center of s_image
849 */
850 src_row = src_row * 2 - (s_image->num_rows - 1);
851 src_col = src_col * 2 - (s_image->num_cols - 1);
852
853 /* do the rotation transform */
854 if (ctx->rot_mode & IPU_ROT_BIT_90) {
855 dst_col = -src_row;
856 dst_row = src_col;
857 } else {
858 dst_col = src_col;
859 dst_row = src_row;
860 }
861
862 /* apply flip */
863 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
864 dst_col = -dst_col;
865 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
866 dst_row = -dst_row;
867
868 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
869 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
870
871 /*
872 * finally translate dest row,col using an origin in upper
873 * left of d_image
874 */
875 dst_row += d_image->num_rows - 1;
876 dst_col += d_image->num_cols - 1;
877 dst_row /= 2;
878 dst_col /= 2;
879
880 return dst_row * d_image->num_cols + dst_col;
881}
882
883/*
884 * Fill the out_tile_map[] with transformed destination tile indeces.
885 */
886static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
887{
888 struct ipu_image_convert_image *s_image = &ctx->in;
889 unsigned int row, col, tile = 0;
890
891 for (row = 0; row < s_image->num_rows; row++) {
892 for (col = 0; col < s_image->num_cols; col++) {
893 ctx->out_tile_map[tile] =
894 transform_tile_index(ctx, row, col);
895 tile++;
896 }
897 }
898}
899
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700900static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
901 struct ipu_image_convert_image *image)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700902{
903 struct ipu_image_convert_chan *chan = ctx->chan;
904 struct ipu_image_convert_priv *priv = chan->priv;
905 const struct ipu_image_pixfmt *fmt = image->fmt;
906 unsigned int row, col, tile = 0;
Philipp Zabel571dd822018-09-18 11:34:12 +0200907 u32 H, top, y_stride, uv_stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700908 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
909 u32 y_row_off, y_col_off, y_off;
910 u32 y_size, uv_size;
911
912 /* setup some convenience vars */
913 H = image->base.pix.height;
914
915 y_stride = image->stride;
916 uv_stride = y_stride / fmt->uv_width_dec;
917 if (fmt->uv_packed)
918 uv_stride *= 2;
919
920 y_size = H * y_stride;
921 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
922
923 for (row = 0; row < image->num_rows; row++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200924 top = image->tile[tile].top;
925 y_row_off = top * y_stride;
926 uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700927
928 for (col = 0; col < image->num_cols; col++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200929 y_col_off = image->tile[tile].left;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700930 uv_col_off = y_col_off / fmt->uv_width_dec;
931 if (fmt->uv_packed)
932 uv_col_off *= 2;
933
934 y_off = y_row_off + y_col_off;
935 uv_off = uv_row_off + uv_col_off;
936
937 u_off = y_size - y_off + uv_off;
938 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
939 if (fmt->uv_swapped) {
940 tmp = u_off;
941 u_off = v_off;
942 v_off = tmp;
943 }
944
945 image->tile[tile].offset = y_off;
946 image->tile[tile].u_off = u_off;
947 image->tile[tile++].v_off = v_off;
948
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700949 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
950 dev_err(priv->ipu->dev,
951 "task %u: ctx %p: %s@[%d,%d]: "
952 "y_off %08x, u_off %08x, v_off %08x\n",
953 chan->ic_task, ctx,
954 image->type == IMAGE_CONVERT_IN ?
955 "Input" : "Output", row, col,
956 y_off, u_off, v_off);
957 return -EINVAL;
958 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700959 }
960 }
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700961
962 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700963}
964
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700965static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
966 struct ipu_image_convert_image *image)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700967{
968 struct ipu_image_convert_chan *chan = ctx->chan;
969 struct ipu_image_convert_priv *priv = chan->priv;
970 const struct ipu_image_pixfmt *fmt = image->fmt;
971 unsigned int row, col, tile = 0;
Philipp Zabel571dd822018-09-18 11:34:12 +0200972 u32 bpp, stride, offset;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700973 u32 row_off, col_off;
974
975 /* setup some convenience vars */
976 stride = image->stride;
977 bpp = fmt->bpp;
978
979 for (row = 0; row < image->num_rows; row++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200980 row_off = image->tile[tile].top * stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700981
982 for (col = 0; col < image->num_cols; col++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200983 col_off = (image->tile[tile].left * bpp) >> 3;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700984
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700985 offset = row_off + col_off;
986
987 image->tile[tile].offset = offset;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700988 image->tile[tile].u_off = 0;
989 image->tile[tile++].v_off = 0;
990
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700991 if (offset & 0x7) {
992 dev_err(priv->ipu->dev,
993 "task %u: ctx %p: %s@[%d,%d]: "
994 "phys %08x\n",
995 chan->ic_task, ctx,
996 image->type == IMAGE_CONVERT_IN ?
997 "Input" : "Output", row, col,
998 row_off + col_off);
999 return -EINVAL;
1000 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001001 }
1002 }
Steve Longerbeamc4e45652018-09-21 11:46:39 -07001003
1004 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001005}
1006
Steve Longerbeamc4e45652018-09-21 11:46:39 -07001007static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001008 struct ipu_image_convert_image *image)
1009{
1010 if (image->fmt->planar)
Steve Longerbeamc4e45652018-09-21 11:46:39 -07001011 return calc_tile_offsets_planar(ctx, image);
1012
1013 return calc_tile_offsets_packed(ctx, image);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001014}
1015
1016/*
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001017 * Calculate the resizing ratio for the IC main processing section given input
1018 * size, fixed downsizing coefficient, and output size.
1019 * Either round to closest for the next tile's first pixel to minimize seams
1020 * and distortion (for all but right column / bottom row), or round down to
1021 * avoid sampling beyond the edges of the input image for this tile's last
1022 * pixel.
1023 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
1024 */
1025static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
1026 u32 output_size, bool allow_overshoot)
1027{
1028 u32 downsized = input_size >> downsize_coeff;
1029
1030 if (allow_overshoot)
1031 return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
1032 else
1033 return 8192 * (downsized - 1) / (output_size - 1);
1034}
1035
1036/*
1037 * Slightly modify resize coefficients per tile to hide the bilinear
1038 * interpolator reset at tile borders, shifting the right / bottom edge
1039 * by up to a half input pixel. This removes noticeable seams between
1040 * tiles at higher upscaling factors.
1041 */
1042static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
1043{
1044 struct ipu_image_convert_chan *chan = ctx->chan;
1045 struct ipu_image_convert_priv *priv = chan->priv;
1046 struct ipu_image_tile *in_tile, *out_tile;
1047 unsigned int col, row, tile_idx;
1048 unsigned int last_output;
1049
1050 for (col = 0; col < ctx->in.num_cols; col++) {
1051 bool closest = (col < ctx->in.num_cols - 1) &&
1052 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
1053 u32 resized_width;
1054 u32 resize_coeff_h;
1055
1056 tile_idx = col;
1057 in_tile = &ctx->in.tile[tile_idx];
1058 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1059
1060 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1061 resized_width = out_tile->height;
1062 else
1063 resized_width = out_tile->width;
1064
1065 resize_coeff_h = calc_resize_coeff(in_tile->width,
1066 ctx->downsize_coeff_h,
1067 resized_width, closest);
1068
1069 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
1070 __func__, col, resize_coeff_h);
1071
1072
1073 for (row = 0; row < ctx->in.num_rows; row++) {
1074 tile_idx = row * ctx->in.num_cols + col;
1075 in_tile = &ctx->in.tile[tile_idx];
1076 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1077
1078 /*
1079 * With the horizontal scaling factor known, round up
1080 * resized width (output width or height) to burst size.
1081 */
1082 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1083 out_tile->height = round_up(resized_width, 8);
1084 else
1085 out_tile->width = round_up(resized_width, 8);
1086
1087 /*
1088 * Calculate input width from the last accessed input
1089 * pixel given resized width and scaling coefficients.
1090 * Round up to burst size.
1091 */
1092 last_output = round_up(resized_width, 8) - 1;
1093 if (closest)
1094 last_output++;
1095 in_tile->width = round_up(
1096 (DIV_ROUND_UP(last_output * resize_coeff_h,
1097 8192) + 1)
1098 << ctx->downsize_coeff_h, 8);
1099 }
1100
1101 ctx->resize_coeffs_h[col] = resize_coeff_h;
1102 }
1103
1104 for (row = 0; row < ctx->in.num_rows; row++) {
1105 bool closest = (row < ctx->in.num_rows - 1) &&
1106 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
1107 u32 resized_height;
1108 u32 resize_coeff_v;
1109
1110 tile_idx = row * ctx->in.num_cols;
1111 in_tile = &ctx->in.tile[tile_idx];
1112 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1113
1114 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1115 resized_height = out_tile->width;
1116 else
1117 resized_height = out_tile->height;
1118
1119 resize_coeff_v = calc_resize_coeff(in_tile->height,
1120 ctx->downsize_coeff_v,
1121 resized_height, closest);
1122
1123 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
1124 __func__, row, resize_coeff_v);
1125
1126 for (col = 0; col < ctx->in.num_cols; col++) {
1127 tile_idx = row * ctx->in.num_cols + col;
1128 in_tile = &ctx->in.tile[tile_idx];
1129 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1130
1131 /*
1132 * With the vertical scaling factor known, round up
1133 * resized height (output width or height) to IDMAC
1134 * limitations.
1135 */
1136 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1137 out_tile->width = round_up(resized_height, 2);
1138 else
1139 out_tile->height = round_up(resized_height, 2);
1140
1141 /*
1142 * Calculate input width from the last accessed input
1143 * pixel given resized height and scaling coefficients.
1144 * Align to IDMAC restrictions.
1145 */
1146 last_output = round_up(resized_height, 2) - 1;
1147 if (closest)
1148 last_output++;
1149 in_tile->height = round_up(
1150 (DIV_ROUND_UP(last_output * resize_coeff_v,
1151 8192) + 1)
1152 << ctx->downsize_coeff_v, 2);
1153 }
1154
1155 ctx->resize_coeffs_v[row] = resize_coeff_v;
1156 }
1157}
1158
1159/*
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001160 * return the number of runs in given queue (pending_q or done_q)
1161 * for this context. hold irqlock when calling.
1162 */
1163static int get_run_count(struct ipu_image_convert_ctx *ctx,
1164 struct list_head *q)
1165{
1166 struct ipu_image_convert_run *run;
1167 int count = 0;
1168
1169 lockdep_assert_held(&ctx->chan->irqlock);
1170
1171 list_for_each_entry(run, q, list) {
1172 if (run->ctx == ctx)
1173 count++;
1174 }
1175
1176 return count;
1177}
1178
1179static void convert_stop(struct ipu_image_convert_run *run)
1180{
1181 struct ipu_image_convert_ctx *ctx = run->ctx;
1182 struct ipu_image_convert_chan *chan = ctx->chan;
1183 struct ipu_image_convert_priv *priv = chan->priv;
1184
1185 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
1186 __func__, chan->ic_task, ctx, run);
1187
1188 /* disable IC tasks and the channels */
1189 ipu_ic_task_disable(chan->ic);
1190 ipu_idmac_disable_channel(chan->in_chan);
1191 ipu_idmac_disable_channel(chan->out_chan);
1192
1193 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1194 ipu_idmac_disable_channel(chan->rotation_in_chan);
1195 ipu_idmac_disable_channel(chan->rotation_out_chan);
1196 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
1197 }
1198
1199 ipu_ic_disable(chan->ic);
1200}
1201
1202static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
1203 struct ipuv3_channel *channel,
1204 struct ipu_image_convert_image *image,
1205 enum ipu_rotate_mode rot_mode,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001206 bool rot_swap_width_height,
1207 unsigned int tile)
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001208{
1209 struct ipu_image_convert_chan *chan = ctx->chan;
1210 unsigned int burst_size;
1211 u32 width, height, stride;
1212 dma_addr_t addr0, addr1 = 0;
1213 struct ipu_image tile_image;
1214 unsigned int tile_idx[2];
1215
1216 if (image->type == IMAGE_CONVERT_OUT) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001217 tile_idx[0] = ctx->out_tile_map[tile];
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001218 tile_idx[1] = ctx->out_tile_map[1];
1219 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001220 tile_idx[0] = tile;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001221 tile_idx[1] = 1;
1222 }
1223
1224 if (rot_swap_width_height) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001225 width = image->tile[tile_idx[0]].height;
1226 height = image->tile[tile_idx[0]].width;
1227 stride = image->tile[tile_idx[0]].rot_stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001228 addr0 = ctx->rot_intermediate[0].phys;
1229 if (ctx->double_buffering)
1230 addr1 = ctx->rot_intermediate[1].phys;
1231 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001232 width = image->tile[tile_idx[0]].width;
1233 height = image->tile[tile_idx[0]].height;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001234 stride = image->stride;
1235 addr0 = image->base.phys0 +
1236 image->tile[tile_idx[0]].offset;
1237 if (ctx->double_buffering)
1238 addr1 = image->base.phys0 +
1239 image->tile[tile_idx[1]].offset;
1240 }
1241
1242 ipu_cpmem_zero(channel);
1243
1244 memset(&tile_image, 0, sizeof(tile_image));
1245 tile_image.pix.width = tile_image.rect.width = width;
1246 tile_image.pix.height = tile_image.rect.height = height;
1247 tile_image.pix.bytesperline = stride;
1248 tile_image.pix.pixelformat = image->fmt->fourcc;
1249 tile_image.phys0 = addr0;
1250 tile_image.phys1 = addr1;
Steve Longerbeamdec408f2018-10-06 14:45:48 -07001251 if (image->fmt->planar && !rot_swap_width_height) {
1252 tile_image.u_offset = image->tile[tile_idx[0]].u_off;
1253 tile_image.v_offset = image->tile[tile_idx[0]].v_off;
1254 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001255
Steve Longerbeamdec408f2018-10-06 14:45:48 -07001256 ipu_cpmem_set_image(channel, &tile_image);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001257
1258 if (rot_mode)
1259 ipu_cpmem_set_rotation(channel, rot_mode);
1260
1261 if (channel == chan->rotation_in_chan ||
1262 channel == chan->rotation_out_chan) {
1263 burst_size = 8;
1264 ipu_cpmem_set_block_mode(channel);
1265 } else
1266 burst_size = (width % 16) ? 8 : 16;
1267
1268 ipu_cpmem_set_burstsize(channel, burst_size);
1269
1270 ipu_ic_task_idma_init(chan->ic, channel, width, height,
1271 burst_size, rot_mode);
1272
Lucas Stach320a89a2017-03-08 12:13:19 +01001273 /*
1274 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
1275 * only do this when there is no PRG present.
1276 */
1277 if (!channel->ipu->prg_priv)
1278 ipu_cpmem_set_axi_id(channel, 1);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001279
1280 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
1281}
1282
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001283static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001284{
1285 struct ipu_image_convert_ctx *ctx = run->ctx;
1286 struct ipu_image_convert_chan *chan = ctx->chan;
1287 struct ipu_image_convert_priv *priv = chan->priv;
1288 struct ipu_image_convert_image *s_image = &ctx->in;
1289 struct ipu_image_convert_image *d_image = &ctx->out;
1290 enum ipu_color_space src_cs, dest_cs;
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001291 unsigned int dst_tile = ctx->out_tile_map[tile];
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001292 unsigned int dest_width, dest_height;
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001293 unsigned int col, row;
1294 u32 rsc;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001295 int ret;
1296
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001297 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
1298 __func__, chan->ic_task, ctx, run, tile, dst_tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001299
1300 src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
1301 dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
1302
1303 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1304 /* swap width/height for resizer */
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001305 dest_width = d_image->tile[dst_tile].height;
1306 dest_height = d_image->tile[dst_tile].width;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001307 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001308 dest_width = d_image->tile[dst_tile].width;
1309 dest_height = d_image->tile[dst_tile].height;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001310 }
1311
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001312 row = tile / s_image->num_cols;
1313 col = tile % s_image->num_cols;
1314
1315 rsc = (ctx->downsize_coeff_v << 30) |
1316 (ctx->resize_coeffs_v[row] << 16) |
1317 (ctx->downsize_coeff_h << 14) |
1318 (ctx->resize_coeffs_h[col]);
1319
1320 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
1321 __func__, s_image->tile[tile].width,
1322 s_image->tile[tile].height, dest_width, dest_height, rsc);
1323
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001324 /* setup the IC resizer and CSC */
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001325 ret = ipu_ic_task_init_rsc(chan->ic,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001326 s_image->tile[tile].width,
1327 s_image->tile[tile].height,
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001328 dest_width,
1329 dest_height,
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001330 src_cs, dest_cs,
1331 rsc);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001332 if (ret) {
1333 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
1334 return ret;
1335 }
1336
1337 /* init the source MEM-->IC PP IDMAC channel */
1338 init_idmac_channel(ctx, chan->in_chan, s_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001339 IPU_ROTATE_NONE, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001340
1341 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1342 /* init the IC PP-->MEM IDMAC channel */
1343 init_idmac_channel(ctx, chan->out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001344 IPU_ROTATE_NONE, true, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001345
1346 /* init the MEM-->IC PP ROT IDMAC channel */
1347 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001348 ctx->rot_mode, true, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001349
1350 /* init the destination IC PP ROT-->MEM IDMAC channel */
1351 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001352 IPU_ROTATE_NONE, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001353
1354 /* now link IC PP-->MEM to MEM-->IC PP ROT */
1355 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
1356 } else {
1357 /* init the destination IC PP-->MEM IDMAC channel */
1358 init_idmac_channel(ctx, chan->out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001359 ctx->rot_mode, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001360 }
1361
1362 /* enable the IC */
1363 ipu_ic_enable(chan->ic);
1364
1365 /* set buffers ready */
1366 ipu_idmac_select_buffer(chan->in_chan, 0);
1367 ipu_idmac_select_buffer(chan->out_chan, 0);
1368 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1369 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
1370 if (ctx->double_buffering) {
1371 ipu_idmac_select_buffer(chan->in_chan, 1);
1372 ipu_idmac_select_buffer(chan->out_chan, 1);
1373 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1374 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
1375 }
1376
1377 /* enable the channels! */
1378 ipu_idmac_enable_channel(chan->in_chan);
1379 ipu_idmac_enable_channel(chan->out_chan);
1380 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1381 ipu_idmac_enable_channel(chan->rotation_in_chan);
1382 ipu_idmac_enable_channel(chan->rotation_out_chan);
1383 }
1384
1385 ipu_ic_task_enable(chan->ic);
1386
1387 ipu_cpmem_dump(chan->in_chan);
1388 ipu_cpmem_dump(chan->out_chan);
1389 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1390 ipu_cpmem_dump(chan->rotation_in_chan);
1391 ipu_cpmem_dump(chan->rotation_out_chan);
1392 }
1393
1394 ipu_dump(priv->ipu);
1395
1396 return 0;
1397}
1398
1399/* hold irqlock when calling */
1400static int do_run(struct ipu_image_convert_run *run)
1401{
1402 struct ipu_image_convert_ctx *ctx = run->ctx;
1403 struct ipu_image_convert_chan *chan = ctx->chan;
1404
1405 lockdep_assert_held(&chan->irqlock);
1406
1407 ctx->in.base.phys0 = run->in_phys;
1408 ctx->out.base.phys0 = run->out_phys;
1409
1410 ctx->cur_buf_num = 0;
1411 ctx->next_tile = 1;
1412
1413 /* remove run from pending_q and set as current */
1414 list_del(&run->list);
1415 chan->current_run = run;
1416
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001417 return convert_start(run, 0);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001418}
1419
1420/* hold irqlock when calling */
1421static void run_next(struct ipu_image_convert_chan *chan)
1422{
1423 struct ipu_image_convert_priv *priv = chan->priv;
1424 struct ipu_image_convert_run *run, *tmp;
1425 int ret;
1426
1427 lockdep_assert_held(&chan->irqlock);
1428
1429 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1430 /* skip contexts that are aborting */
1431 if (run->ctx->aborting) {
1432 dev_dbg(priv->ipu->dev,
1433 "%s: task %u: skipping aborting ctx %p run %p\n",
1434 __func__, chan->ic_task, run->ctx, run);
1435 continue;
1436 }
1437
1438 ret = do_run(run);
1439 if (!ret)
1440 break;
1441
1442 /*
1443 * something went wrong with start, add the run
1444 * to done q and continue to the next run in the
1445 * pending q.
1446 */
1447 run->status = ret;
1448 list_add_tail(&run->list, &chan->done_q);
1449 chan->current_run = NULL;
1450 }
1451}
1452
1453static void empty_done_q(struct ipu_image_convert_chan *chan)
1454{
1455 struct ipu_image_convert_priv *priv = chan->priv;
1456 struct ipu_image_convert_run *run;
1457 unsigned long flags;
1458
1459 spin_lock_irqsave(&chan->irqlock, flags);
1460
1461 while (!list_empty(&chan->done_q)) {
1462 run = list_entry(chan->done_q.next,
1463 struct ipu_image_convert_run,
1464 list);
1465
1466 list_del(&run->list);
1467
1468 dev_dbg(priv->ipu->dev,
1469 "%s: task %u: completing ctx %p run %p with %d\n",
1470 __func__, chan->ic_task, run->ctx, run, run->status);
1471
1472 /* call the completion callback and free the run */
1473 spin_unlock_irqrestore(&chan->irqlock, flags);
1474 run->ctx->complete(run, run->ctx->complete_context);
1475 spin_lock_irqsave(&chan->irqlock, flags);
1476 }
1477
1478 spin_unlock_irqrestore(&chan->irqlock, flags);
1479}
1480
1481/*
1482 * the bottom half thread clears out the done_q, calling the
1483 * completion handler for each.
1484 */
1485static irqreturn_t do_bh(int irq, void *dev_id)
1486{
1487 struct ipu_image_convert_chan *chan = dev_id;
1488 struct ipu_image_convert_priv *priv = chan->priv;
1489 struct ipu_image_convert_ctx *ctx;
1490 unsigned long flags;
1491
1492 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
1493 chan->ic_task);
1494
1495 empty_done_q(chan);
1496
1497 spin_lock_irqsave(&chan->irqlock, flags);
1498
1499 /*
1500 * the done_q is cleared out, signal any contexts
1501 * that are aborting that abort can complete.
1502 */
1503 list_for_each_entry(ctx, &chan->ctx_list, list) {
1504 if (ctx->aborting) {
1505 dev_dbg(priv->ipu->dev,
1506 "%s: task %u: signaling abort for ctx %p\n",
1507 __func__, chan->ic_task, ctx);
Steve Longerbeamaa60b262018-09-19 16:17:15 -07001508 complete_all(&ctx->aborted);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001509 }
1510 }
1511
1512 spin_unlock_irqrestore(&chan->irqlock, flags);
1513
1514 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
1515 chan->ic_task);
1516
1517 return IRQ_HANDLED;
1518}
1519
Philipp Zabel0537db82018-09-18 11:34:11 +02001520static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1521{
1522 unsigned int cur_tile = ctx->next_tile - 1;
1523 unsigned int next_tile = ctx->next_tile;
1524
1525 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
1526 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
1527 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
1528 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
1529 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
1530 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
1531 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
1532 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
1533 return true;
1534
1535 return false;
1536}
1537
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001538/* hold irqlock when calling */
1539static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1540{
1541 struct ipu_image_convert_ctx *ctx = run->ctx;
1542 struct ipu_image_convert_chan *chan = ctx->chan;
1543 struct ipu_image_tile *src_tile, *dst_tile;
1544 struct ipu_image_convert_image *s_image = &ctx->in;
1545 struct ipu_image_convert_image *d_image = &ctx->out;
1546 struct ipuv3_channel *outch;
1547 unsigned int dst_idx;
1548
1549 lockdep_assert_held(&chan->irqlock);
1550
1551 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
1552 chan->rotation_out_chan : chan->out_chan;
1553
1554 /*
1555 * It is difficult to stop the channel DMA before the channels
1556 * enter the paused state. Without double-buffering the channels
1557 * are always in a paused state when the EOF irq occurs, so it
1558 * is safe to stop the channels now. For double-buffering we
1559 * just ignore the abort until the operation completes, when it
1560 * is safe to shut down.
1561 */
1562 if (ctx->aborting && !ctx->double_buffering) {
1563 convert_stop(run);
1564 run->status = -EIO;
1565 goto done;
1566 }
1567
1568 if (ctx->next_tile == ctx->num_tiles) {
1569 /*
1570 * the conversion is complete
1571 */
1572 convert_stop(run);
1573 run->status = 0;
1574 goto done;
1575 }
1576
1577 /*
1578 * not done, place the next tile buffers.
1579 */
1580 if (!ctx->double_buffering) {
Philipp Zabel0537db82018-09-18 11:34:11 +02001581 if (ic_settings_changed(ctx)) {
1582 convert_stop(run);
1583 convert_start(run, ctx->next_tile);
1584 } else {
1585 src_tile = &s_image->tile[ctx->next_tile];
1586 dst_idx = ctx->out_tile_map[ctx->next_tile];
1587 dst_tile = &d_image->tile[dst_idx];
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001588
Philipp Zabel0537db82018-09-18 11:34:11 +02001589 ipu_cpmem_set_buffer(chan->in_chan, 0,
1590 s_image->base.phys0 +
1591 src_tile->offset);
1592 ipu_cpmem_set_buffer(outch, 0,
1593 d_image->base.phys0 +
1594 dst_tile->offset);
1595 if (s_image->fmt->planar)
1596 ipu_cpmem_set_uv_offset(chan->in_chan,
1597 src_tile->u_off,
1598 src_tile->v_off);
1599 if (d_image->fmt->planar)
1600 ipu_cpmem_set_uv_offset(outch,
1601 dst_tile->u_off,
1602 dst_tile->v_off);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001603
Philipp Zabel0537db82018-09-18 11:34:11 +02001604 ipu_idmac_select_buffer(chan->in_chan, 0);
1605 ipu_idmac_select_buffer(outch, 0);
1606 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001607 } else if (ctx->next_tile < ctx->num_tiles - 1) {
1608
1609 src_tile = &s_image->tile[ctx->next_tile + 1];
1610 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
1611 dst_tile = &d_image->tile[dst_idx];
1612
1613 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
1614 s_image->base.phys0 + src_tile->offset);
1615 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
1616 d_image->base.phys0 + dst_tile->offset);
1617
1618 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
1619 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
1620
1621 ctx->cur_buf_num ^= 1;
1622 }
1623
1624 ctx->next_tile++;
1625 return IRQ_HANDLED;
1626done:
1627 list_add_tail(&run->list, &chan->done_q);
1628 chan->current_run = NULL;
1629 run_next(chan);
1630 return IRQ_WAKE_THREAD;
1631}
1632
1633static irqreturn_t norotate_irq(int irq, void *data)
1634{
1635 struct ipu_image_convert_chan *chan = data;
1636 struct ipu_image_convert_ctx *ctx;
1637 struct ipu_image_convert_run *run;
1638 unsigned long flags;
1639 irqreturn_t ret;
1640
1641 spin_lock_irqsave(&chan->irqlock, flags);
1642
1643 /* get current run and its context */
1644 run = chan->current_run;
1645 if (!run) {
1646 ret = IRQ_NONE;
1647 goto out;
1648 }
1649
1650 ctx = run->ctx;
1651
1652 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1653 /* this is a rotation operation, just ignore */
1654 spin_unlock_irqrestore(&chan->irqlock, flags);
1655 return IRQ_HANDLED;
1656 }
1657
1658 ret = do_irq(run);
1659out:
1660 spin_unlock_irqrestore(&chan->irqlock, flags);
1661 return ret;
1662}
1663
1664static irqreturn_t rotate_irq(int irq, void *data)
1665{
1666 struct ipu_image_convert_chan *chan = data;
1667 struct ipu_image_convert_priv *priv = chan->priv;
1668 struct ipu_image_convert_ctx *ctx;
1669 struct ipu_image_convert_run *run;
1670 unsigned long flags;
1671 irqreturn_t ret;
1672
1673 spin_lock_irqsave(&chan->irqlock, flags);
1674
1675 /* get current run and its context */
1676 run = chan->current_run;
1677 if (!run) {
1678 ret = IRQ_NONE;
1679 goto out;
1680 }
1681
1682 ctx = run->ctx;
1683
1684 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1685 /* this was NOT a rotation operation, shouldn't happen */
1686 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1687 spin_unlock_irqrestore(&chan->irqlock, flags);
1688 return IRQ_HANDLED;
1689 }
1690
1691 ret = do_irq(run);
1692out:
1693 spin_unlock_irqrestore(&chan->irqlock, flags);
1694 return ret;
1695}
1696
1697/*
1698 * try to force the completion of runs for this ctx. Called when
1699 * abort wait times out in ipu_image_convert_abort().
1700 */
1701static void force_abort(struct ipu_image_convert_ctx *ctx)
1702{
1703 struct ipu_image_convert_chan *chan = ctx->chan;
1704 struct ipu_image_convert_run *run;
1705 unsigned long flags;
1706
1707 spin_lock_irqsave(&chan->irqlock, flags);
1708
1709 run = chan->current_run;
1710 if (run && run->ctx == ctx) {
1711 convert_stop(run);
1712 run->status = -EIO;
1713 list_add_tail(&run->list, &chan->done_q);
1714 chan->current_run = NULL;
1715 run_next(chan);
1716 }
1717
1718 spin_unlock_irqrestore(&chan->irqlock, flags);
1719
1720 empty_done_q(chan);
1721}
1722
1723static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1724{
1725 if (chan->out_eof_irq >= 0)
1726 free_irq(chan->out_eof_irq, chan);
1727 if (chan->rot_out_eof_irq >= 0)
1728 free_irq(chan->rot_out_eof_irq, chan);
1729
1730 if (!IS_ERR_OR_NULL(chan->in_chan))
1731 ipu_idmac_put(chan->in_chan);
1732 if (!IS_ERR_OR_NULL(chan->out_chan))
1733 ipu_idmac_put(chan->out_chan);
1734 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1735 ipu_idmac_put(chan->rotation_in_chan);
1736 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1737 ipu_idmac_put(chan->rotation_out_chan);
1738 if (!IS_ERR_OR_NULL(chan->ic))
1739 ipu_ic_put(chan->ic);
1740
1741 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1742 chan->rotation_out_chan = NULL;
1743 chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1744}
1745
1746static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1747{
1748 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1749 struct ipu_image_convert_priv *priv = chan->priv;
1750 int ret;
1751
1752 /* get IC */
1753 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1754 if (IS_ERR(chan->ic)) {
1755 dev_err(priv->ipu->dev, "could not acquire IC\n");
1756 ret = PTR_ERR(chan->ic);
1757 goto err;
1758 }
1759
1760 /* get IDMAC channels */
1761 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1762 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1763 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1764 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1765 ret = -EBUSY;
1766 goto err;
1767 }
1768
1769 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1770 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1771 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1772 dev_err(priv->ipu->dev,
1773 "could not acquire idmac rotation channels\n");
1774 ret = -EBUSY;
1775 goto err;
1776 }
1777
1778 /* acquire the EOF interrupts */
1779 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1780 chan->out_chan,
1781 IPU_IRQ_EOF);
1782
1783 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1784 0, "ipu-ic", chan);
1785 if (ret < 0) {
1786 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1787 chan->out_eof_irq);
1788 chan->out_eof_irq = -1;
1789 goto err;
1790 }
1791
1792 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1793 chan->rotation_out_chan,
1794 IPU_IRQ_EOF);
1795
1796 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1797 0, "ipu-ic", chan);
1798 if (ret < 0) {
1799 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1800 chan->rot_out_eof_irq);
1801 chan->rot_out_eof_irq = -1;
1802 goto err;
1803 }
1804
1805 return 0;
1806err:
1807 release_ipu_resources(chan);
1808 return ret;
1809}
1810
1811static int fill_image(struct ipu_image_convert_ctx *ctx,
1812 struct ipu_image_convert_image *ic_image,
1813 struct ipu_image *image,
1814 enum ipu_image_convert_type type)
1815{
1816 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1817
1818 ic_image->base = *image;
1819 ic_image->type = type;
1820
1821 ic_image->fmt = get_format(image->pix.pixelformat);
1822 if (!ic_image->fmt) {
1823 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1824 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1825 return -EINVAL;
1826 }
1827
1828 if (ic_image->fmt->planar)
1829 ic_image->stride = ic_image->base.pix.width;
1830 else
1831 ic_image->stride = ic_image->base.pix.bytesperline;
1832
Philipp Zabel26ddd032018-09-18 11:34:13 +02001833 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001834}
1835
1836/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1837static unsigned int clamp_align(unsigned int x, unsigned int min,
1838 unsigned int max, unsigned int align)
1839{
1840 /* Bits that must be zero to be aligned */
1841 unsigned int mask = ~((1 << align) - 1);
1842
1843 /* Clamp to aligned min and max */
1844 x = clamp(x, (min + ~mask) & mask, max & mask);
1845
1846 /* Round to nearest aligned value */
1847 if (align)
1848 x = (x + (1 << (align - 1))) & mask;
1849
1850 return x;
1851}
1852
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001853/* Adjusts input/output images to IPU restrictions */
1854void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1855 enum ipu_rotate_mode rot_mode)
1856{
1857 const struct ipu_image_pixfmt *infmt, *outfmt;
1858 unsigned int num_in_rows, num_in_cols;
1859 unsigned int num_out_rows, num_out_cols;
1860 u32 w_align, h_align;
1861
1862 infmt = get_format(in->pix.pixelformat);
1863 outfmt = get_format(out->pix.pixelformat);
1864
1865 /* set some default pixel formats if needed */
1866 if (!infmt) {
1867 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1868 infmt = get_format(V4L2_PIX_FMT_RGB24);
1869 }
1870 if (!outfmt) {
1871 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1872 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1873 }
1874
1875 /* image converter does not handle fields */
1876 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1877
1878 /* resizer cannot downsize more than 4:1 */
1879 if (ipu_rot_mode_is_irt(rot_mode)) {
1880 out->pix.height = max_t(__u32, out->pix.height,
1881 in->pix.width / 4);
1882 out->pix.width = max_t(__u32, out->pix.width,
1883 in->pix.height / 4);
1884 } else {
1885 out->pix.width = max_t(__u32, out->pix.width,
1886 in->pix.width / 4);
1887 out->pix.height = max_t(__u32, out->pix.height,
1888 in->pix.height / 4);
1889 }
1890
1891 /* get tiling rows/cols from output format */
1892 num_out_rows = num_stripes(out->pix.height);
1893 num_out_cols = num_stripes(out->pix.width);
1894 if (ipu_rot_mode_is_irt(rot_mode)) {
1895 num_in_rows = num_out_cols;
1896 num_in_cols = num_out_rows;
1897 } else {
1898 num_in_rows = num_out_rows;
1899 num_in_cols = num_out_cols;
1900 }
1901
1902 /* align input width/height */
1903 w_align = ilog2(tile_width_align(infmt) * num_in_cols);
1904 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
1905 num_in_rows);
1906 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
1907 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
1908
1909 /* align output width/height */
1910 w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
1911 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
1912 num_out_rows);
1913 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
1914 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
1915
1916 /* set input/output strides and image sizes */
1917 in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
1918 in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
1919 out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
1920 out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
1921}
1922EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
1923
1924/*
1925 * this is used by ipu_image_convert_prepare() to verify set input and
1926 * output images are valid before starting the conversion. Clients can
1927 * also call it before calling ipu_image_convert_prepare().
1928 */
1929int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
1930 enum ipu_rotate_mode rot_mode)
1931{
1932 struct ipu_image testin, testout;
1933
1934 testin = *in;
1935 testout = *out;
1936
1937 ipu_image_convert_adjust(&testin, &testout, rot_mode);
1938
1939 if (testin.pix.width != in->pix.width ||
1940 testin.pix.height != in->pix.height ||
1941 testout.pix.width != out->pix.width ||
1942 testout.pix.height != out->pix.height)
1943 return -EINVAL;
1944
1945 return 0;
1946}
1947EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
1948
1949/*
1950 * Call ipu_image_convert_prepare() to prepare for the conversion of
1951 * given images and rotation mode. Returns a new conversion context.
1952 */
1953struct ipu_image_convert_ctx *
1954ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1955 struct ipu_image *in, struct ipu_image *out,
1956 enum ipu_rotate_mode rot_mode,
1957 ipu_image_convert_cb_t complete,
1958 void *complete_context)
1959{
1960 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
1961 struct ipu_image_convert_image *s_image, *d_image;
1962 struct ipu_image_convert_chan *chan;
1963 struct ipu_image_convert_ctx *ctx;
1964 unsigned long flags;
1965 bool get_res;
1966 int ret;
1967
1968 if (!in || !out || !complete ||
1969 (ic_task != IC_TASK_VIEWFINDER &&
1970 ic_task != IC_TASK_POST_PROCESSOR))
1971 return ERR_PTR(-EINVAL);
1972
1973 /* verify the in/out images before continuing */
1974 ret = ipu_image_convert_verify(in, out, rot_mode);
1975 if (ret) {
1976 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
1977 __func__);
1978 return ERR_PTR(ret);
1979 }
1980
1981 chan = &priv->chan[ic_task];
1982
1983 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1984 if (!ctx)
1985 return ERR_PTR(-ENOMEM);
1986
1987 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
1988 chan->ic_task, ctx);
1989
1990 ctx->chan = chan;
1991 init_completion(&ctx->aborted);
1992
1993 s_image = &ctx->in;
1994 d_image = &ctx->out;
1995
1996 /* set tiling and rotation */
1997 d_image->num_rows = num_stripes(out->pix.height);
1998 d_image->num_cols = num_stripes(out->pix.width);
1999 if (ipu_rot_mode_is_irt(rot_mode)) {
2000 s_image->num_rows = d_image->num_cols;
2001 s_image->num_cols = d_image->num_rows;
2002 } else {
2003 s_image->num_rows = d_image->num_rows;
2004 s_image->num_cols = d_image->num_cols;
2005 }
2006
2007 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
2008 ctx->rot_mode = rot_mode;
2009
2010 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
2011 if (ret)
2012 goto out_free;
2013 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
2014 if (ret)
2015 goto out_free;
2016
Philipp Zabel26ddd032018-09-18 11:34:13 +02002017 ret = calc_image_resize_coefficients(ctx, in, out);
2018 if (ret)
2019 goto out_free;
2020
Philipp Zabel64fbae52018-09-18 11:34:15 +02002021 calc_out_tile_map(ctx);
2022
2023 find_seams(ctx, s_image, d_image);
2024
Philipp Zabel26ddd032018-09-18 11:34:13 +02002025 calc_tile_dimensions(ctx, s_image);
2026 ret = calc_tile_offsets(ctx, s_image);
2027 if (ret)
2028 goto out_free;
2029
2030 calc_tile_dimensions(ctx, d_image);
2031 ret = calc_tile_offsets(ctx, d_image);
2032 if (ret)
2033 goto out_free;
2034
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02002035 calc_tile_resize_coefficients(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002036
2037 dump_format(ctx, s_image);
2038 dump_format(ctx, d_image);
2039
2040 ctx->complete = complete;
2041 ctx->complete_context = complete_context;
2042
2043 /*
2044 * Can we use double-buffering for this operation? If there is
2045 * only one tile (the whole image can be converted in a single
2046 * operation) there's no point in using double-buffering. Also,
2047 * the IPU's IDMAC channels allow only a single U and V plane
2048 * offset shared between both buffers, but these offsets change
2049 * for every tile, and therefore would have to be updated for
2050 * each buffer which is not possible. So double-buffering is
2051 * impossible when either the source or destination images are
2052 * a planar format (YUV420, YUV422P, etc.).
2053 */
2054 ctx->double_buffering = (ctx->num_tiles > 1 &&
2055 !s_image->fmt->planar &&
2056 !d_image->fmt->planar);
2057
2058 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02002059 unsigned long intermediate_size = d_image->tile[0].size;
2060 unsigned int i;
2061
2062 for (i = 1; i < ctx->num_tiles; i++) {
2063 if (d_image->tile[i].size > intermediate_size)
2064 intermediate_size = d_image->tile[i].size;
2065 }
2066
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002067 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02002068 intermediate_size);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002069 if (ret)
2070 goto out_free;
2071 if (ctx->double_buffering) {
2072 ret = alloc_dma_buf(priv,
2073 &ctx->rot_intermediate[1],
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02002074 intermediate_size);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002075 if (ret)
2076 goto out_free_dmabuf0;
2077 }
2078 }
2079
2080 spin_lock_irqsave(&chan->irqlock, flags);
2081
2082 get_res = list_empty(&chan->ctx_list);
2083
2084 list_add_tail(&ctx->list, &chan->ctx_list);
2085
2086 spin_unlock_irqrestore(&chan->irqlock, flags);
2087
2088 if (get_res) {
2089 ret = get_ipu_resources(chan);
2090 if (ret)
2091 goto out_free_dmabuf1;
2092 }
2093
2094 return ctx;
2095
2096out_free_dmabuf1:
2097 free_dma_buf(priv, &ctx->rot_intermediate[1]);
2098 spin_lock_irqsave(&chan->irqlock, flags);
2099 list_del(&ctx->list);
2100 spin_unlock_irqrestore(&chan->irqlock, flags);
2101out_free_dmabuf0:
2102 free_dma_buf(priv, &ctx->rot_intermediate[0]);
2103out_free:
2104 kfree(ctx);
2105 return ERR_PTR(ret);
2106}
2107EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
2108
2109/*
2110 * Carry out a single image conversion run. Only the physaddr's of the input
2111 * and output image buffers are needed. The conversion context must have
2112 * been created previously with ipu_image_convert_prepare().
2113 */
2114int ipu_image_convert_queue(struct ipu_image_convert_run *run)
2115{
2116 struct ipu_image_convert_chan *chan;
2117 struct ipu_image_convert_priv *priv;
2118 struct ipu_image_convert_ctx *ctx;
2119 unsigned long flags;
2120 int ret = 0;
2121
2122 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
2123 return -EINVAL;
2124
2125 ctx = run->ctx;
2126 chan = ctx->chan;
2127 priv = chan->priv;
2128
2129 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
2130 chan->ic_task, ctx, run);
2131
2132 INIT_LIST_HEAD(&run->list);
2133
2134 spin_lock_irqsave(&chan->irqlock, flags);
2135
2136 if (ctx->aborting) {
2137 ret = -EIO;
2138 goto unlock;
2139 }
2140
2141 list_add_tail(&run->list, &chan->pending_q);
2142
2143 if (!chan->current_run) {
2144 ret = do_run(run);
2145 if (ret)
2146 chan->current_run = NULL;
2147 }
2148unlock:
2149 spin_unlock_irqrestore(&chan->irqlock, flags);
2150 return ret;
2151}
2152EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
2153
2154/* Abort any active or pending conversions for this context */
Steve Longerbeam819bec32018-09-19 16:07:18 -07002155static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002156{
2157 struct ipu_image_convert_chan *chan = ctx->chan;
2158 struct ipu_image_convert_priv *priv = chan->priv;
2159 struct ipu_image_convert_run *run, *active_run, *tmp;
2160 unsigned long flags;
2161 int run_count, ret;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002162
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002163 spin_lock_irqsave(&chan->irqlock, flags);
2164
2165 /* move all remaining pending runs in this context to done_q */
2166 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
2167 if (run->ctx != ctx)
2168 continue;
2169 run->status = -EIO;
2170 list_move_tail(&run->list, &chan->done_q);
2171 }
2172
2173 run_count = get_run_count(ctx, &chan->done_q);
2174 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
2175 chan->current_run : NULL;
2176
Steve Longerbeamaa60b262018-09-19 16:17:15 -07002177 if (active_run)
2178 reinit_completion(&ctx->aborted);
2179
Steve Longerbeam819bec32018-09-19 16:07:18 -07002180 ctx->aborting = true;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002181
2182 spin_unlock_irqrestore(&chan->irqlock, flags);
2183
Steve Longerbeamb288ada2018-09-19 16:20:43 -07002184 if (!run_count && !active_run) {
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002185 dev_dbg(priv->ipu->dev,
2186 "%s: task %u: no abort needed for ctx %p\n",
2187 __func__, chan->ic_task, ctx);
2188 return;
2189 }
2190
Steve Longerbeam920340a2018-09-19 16:13:03 -07002191 if (!active_run) {
2192 empty_done_q(chan);
2193 return;
2194 }
2195
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002196 dev_dbg(priv->ipu->dev,
Steve Longerbeam920340a2018-09-19 16:13:03 -07002197 "%s: task %u: wait for completion: %d runs\n",
2198 __func__, chan->ic_task, run_count);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002199
2200 ret = wait_for_completion_timeout(&ctx->aborted,
2201 msecs_to_jiffies(10000));
2202 if (ret == 0) {
2203 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
2204 force_abort(ctx);
2205 }
Steve Longerbeam819bec32018-09-19 16:07:18 -07002206}
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002207
Steve Longerbeam819bec32018-09-19 16:07:18 -07002208void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
2209{
2210 __ipu_image_convert_abort(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002211 ctx->aborting = false;
2212}
2213EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
2214
2215/* Unprepare image conversion context */
2216void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
2217{
2218 struct ipu_image_convert_chan *chan = ctx->chan;
2219 struct ipu_image_convert_priv *priv = chan->priv;
2220 unsigned long flags;
2221 bool put_res;
2222
2223 /* make sure no runs are hanging around */
Steve Longerbeam819bec32018-09-19 16:07:18 -07002224 __ipu_image_convert_abort(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002225
2226 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
2227 chan->ic_task, ctx);
2228
2229 spin_lock_irqsave(&chan->irqlock, flags);
2230
2231 list_del(&ctx->list);
2232
2233 put_res = list_empty(&chan->ctx_list);
2234
2235 spin_unlock_irqrestore(&chan->irqlock, flags);
2236
2237 if (put_res)
2238 release_ipu_resources(chan);
2239
2240 free_dma_buf(priv, &ctx->rot_intermediate[1]);
2241 free_dma_buf(priv, &ctx->rot_intermediate[0]);
2242
2243 kfree(ctx);
2244}
2245EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
2246
2247/*
2248 * "Canned" asynchronous single image conversion. Allocates and returns
2249 * a new conversion run. On successful return the caller must free the
2250 * run and call ipu_image_convert_unprepare() after conversion completes.
2251 */
2252struct ipu_image_convert_run *
2253ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2254 struct ipu_image *in, struct ipu_image *out,
2255 enum ipu_rotate_mode rot_mode,
2256 ipu_image_convert_cb_t complete,
2257 void *complete_context)
2258{
2259 struct ipu_image_convert_ctx *ctx;
2260 struct ipu_image_convert_run *run;
2261 int ret;
2262
2263 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
2264 complete, complete_context);
2265 if (IS_ERR(ctx))
Wei Yongjun4ad3e922016-09-21 15:12:24 +00002266 return ERR_CAST(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07002267
2268 run = kzalloc(sizeof(*run), GFP_KERNEL);
2269 if (!run) {
2270 ipu_image_convert_unprepare(ctx);
2271 return ERR_PTR(-ENOMEM);
2272 }
2273
2274 run->ctx = ctx;
2275 run->in_phys = in->phys0;
2276 run->out_phys = out->phys0;
2277
2278 ret = ipu_image_convert_queue(run);
2279 if (ret) {
2280 ipu_image_convert_unprepare(ctx);
2281 kfree(run);
2282 return ERR_PTR(ret);
2283 }
2284
2285 return run;
2286}
2287EXPORT_SYMBOL_GPL(ipu_image_convert);
2288
2289/* "Canned" synchronous single image conversion */
2290static void image_convert_sync_complete(struct ipu_image_convert_run *run,
2291 void *data)
2292{
2293 struct completion *comp = data;
2294
2295 complete(comp);
2296}
2297
2298int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2299 struct ipu_image *in, struct ipu_image *out,
2300 enum ipu_rotate_mode rot_mode)
2301{
2302 struct ipu_image_convert_run *run;
2303 struct completion comp;
2304 int ret;
2305
2306 init_completion(&comp);
2307
2308 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
2309 image_convert_sync_complete, &comp);
2310 if (IS_ERR(run))
2311 return PTR_ERR(run);
2312
2313 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
2314 ret = (ret == 0) ? -ETIMEDOUT : 0;
2315
2316 ipu_image_convert_unprepare(run->ctx);
2317 kfree(run);
2318
2319 return ret;
2320}
2321EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
2322
2323int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
2324{
2325 struct ipu_image_convert_priv *priv;
2326 int i;
2327
2328 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2329 if (!priv)
2330 return -ENOMEM;
2331
2332 ipu->image_convert_priv = priv;
2333 priv->ipu = ipu;
2334
2335 for (i = 0; i < IC_NUM_TASKS; i++) {
2336 struct ipu_image_convert_chan *chan = &priv->chan[i];
2337
2338 chan->ic_task = i;
2339 chan->priv = priv;
2340 chan->dma_ch = &image_convert_dma_chan[i];
2341 chan->out_eof_irq = -1;
2342 chan->rot_out_eof_irq = -1;
2343
2344 spin_lock_init(&chan->irqlock);
2345 INIT_LIST_HEAD(&chan->ctx_list);
2346 INIT_LIST_HEAD(&chan->pending_q);
2347 INIT_LIST_HEAD(&chan->done_q);
2348 }
2349
2350 return 0;
2351}
2352
2353void ipu_image_convert_exit(struct ipu_soc *ipu)
2354{
2355}