Newer
Older
unscaled = (srcW == dstW && srcH == dstH);
c->srcRange |= handle_jpeg(&c->srcFormat);
c->dstRange |= handle_jpeg(&c->dstFormat);
if(srcFormat!=c->srcFormat || dstFormat!=c->dstFormat)
av_log(c, AV_LOG_WARNING, "deprecated pixel format used, make sure you did set range correctly\n");
if (!c->contrast && !c->saturation && !c->dstFormatBpp)
sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], c->srcRange,
ff_yuv2rgb_coeffs[SWS_CS_DEFAULT],
c->dstRange, 0, 1 << 16, 1 << 16);
handle_formats(c);
srcFormat = c->srcFormat;
dstFormat = c->dstFormat;
desc_src = av_pix_fmt_desc_get(srcFormat);
desc_dst = av_pix_fmt_desc_get(dstFormat);
if (!(unscaled && sws_isSupportedEndiannessConversion(srcFormat) &&
av_pix_fmt_swap_endianness(srcFormat) == dstFormat)) {
if (!sws_isSupportedInput(srcFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n",
av_get_pix_fmt_name(srcFormat));
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
if (!sws_isSupportedOutput(dstFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n",
av_get_pix_fmt_name(dstFormat));
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
i = flags & (SWS_POINT |
SWS_AREA |
SWS_BILINEAR |
SWS_FAST_BILINEAR |
SWS_BICUBIC |
SWS_X |
SWS_GAUSS |
SWS_LANCZOS |
SWS_SINC |
SWS_SPLINE |
SWS_BICUBLIN);
/* provide a default scaler if not set by caller */
if (!i) {
if (dstW < srcW && dstH < srcH)
flags |= SWS_BICUBIC;
else if (dstW > srcW && dstH > srcH)
flags |= SWS_BICUBIC;
flags |= SWS_BICUBIC;
c->flags = flags;
} else if (i & (i - 1)) {
"Exactly one scaler algorithm must be chosen, got %X\n", i);
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
/* sanity check */
if (srcW < 1 || srcH < 1 || dstW < 1 || dstH < 1) {
/* FIXME check if these are enough and try to lower them after
* fixing the relevant parts of the code */
av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n",
srcW, srcH, dstW, dstH);
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
if (!dstFilter)
dstFilter = &dummyFilter;
if (!srcFilter)
srcFilter = &dummyFilter;
c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW;
c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH;
c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
c->vRounder = 4 * 0x0001000100010001ULL;
usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) ||
(srcFilter->chrV && srcFilter->chrV->length > 1) ||
(dstFilter->lumV && dstFilter->lumV->length > 1) ||
(dstFilter->chrV && dstFilter->chrV->length > 1);
usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) ||
(srcFilter->chrH && srcFilter->chrH->length > 1) ||
(dstFilter->lumH && dstFilter->lumH->length > 1) ||
(dstFilter->chrH && dstFilter->chrH->length > 1);
av_pix_fmt_get_chroma_sub_sample(srcFormat, &c->chrSrcHSubSample, &c->chrSrcVSubSample);
av_pix_fmt_get_chroma_sub_sample(dstFormat, &c->chrDstHSubSample, &c->chrDstVSubSample);
Michael Niedermayer
committed
if (isAnyRGB(dstFormat) && !(flags&SWS_FULL_CHR_H_INT)) {
if (dstW&1) {
av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to odd output size\n");
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
Michael Niedermayer
committed
}
Michael Niedermayer
committed
if ( c->chrSrcHSubSample == 0
&& c->chrSrcVSubSample == 0
&& c->dither != SWS_DITHER_BAYER //SWS_FULL_CHR_H_INT is currently not supported with SWS_DITHER_BAYER
&& !(c->flags & SWS_FAST_BILINEAR)
) {
av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to input having non subsampled chroma\n");
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
Michael Niedermayer
committed
}
if (c->dither == SWS_DITHER_AUTO) {
if (flags & SWS_ERROR_DIFFUSION)
c->dither = SWS_DITHER_ED;
}
if(dstFormat == AV_PIX_FMT_BGR4_BYTE ||
dstFormat == AV_PIX_FMT_RGB4_BYTE ||
dstFormat == AV_PIX_FMT_BGR8 ||
dstFormat == AV_PIX_FMT_RGB8) {
if (c->dither == SWS_DITHER_AUTO)
c->dither = (flags & SWS_FULL_CHR_H_INT) ? SWS_DITHER_ED : SWS_DITHER_BAYER;
if (!(flags & SWS_FULL_CHR_H_INT)) {
if (c->dither == SWS_DITHER_ED || c->dither == SWS_DITHER_A_DITHER || c->dither == SWS_DITHER_X_DITHER) {
av_log(c, AV_LOG_DEBUG,
"Desired dithering only supported in full chroma interpolation for destination format '%s'\n",
av_get_pix_fmt_name(dstFormat));
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
if (flags & SWS_FULL_CHR_H_INT) {
if (c->dither == SWS_DITHER_BAYER) {
av_log(c, AV_LOG_DEBUG,
"Ordered dither is not supported in full chroma interpolation for destination format '%s'\n",
av_get_pix_fmt_name(dstFormat));
c->dither = SWS_DITHER_ED;
}
if (isPlanarRGB(dstFormat)) {
if (!(flags & SWS_FULL_CHR_H_INT)) {
av_log(c, AV_LOG_DEBUG,
"%s output is not supported with half chroma resolution, switching to full\n",
av_get_pix_fmt_name(dstFormat));
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
}
/* reuse chroma for 2 pixels RGB/BGR unless user wants full
* chroma interpolation */
Michael Niedermayer
committed
if (flags & SWS_FULL_CHR_H_INT &&
!isPlanarRGB(dstFormat) &&
dstFormat != AV_PIX_FMT_RGBA &&
dstFormat != AV_PIX_FMT_ARGB &&
dstFormat != AV_PIX_FMT_BGRA &&
dstFormat != AV_PIX_FMT_ABGR &&
dstFormat != AV_PIX_FMT_RGB24 &&
dstFormat != AV_PIX_FMT_BGR24 &&
dstFormat != AV_PIX_FMT_BGR4_BYTE &&
dstFormat != AV_PIX_FMT_RGB4_BYTE &&
dstFormat != AV_PIX_FMT_BGR8 &&
dstFormat != AV_PIX_FMT_RGB8
) {
Michael Niedermayer
committed
av_log(c, AV_LOG_WARNING,
"full chroma interpolation for destination format '%s' not yet implemented\n",
av_get_pix_fmt_name(dstFormat));
flags &= ~SWS_FULL_CHR_H_INT;
Michael Niedermayer
committed
c->flags = flags;
Michael Niedermayer
committed
}
if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT))
c->chrDstHSubSample = 1;
// drop some chroma lines if the user wants it
c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >>
SWS_SRC_V_CHR_DROP_SHIFT;
c->chrSrcVSubSample += c->vChrDrop;
/* drop every other pixel for chroma calculation unless user
* wants full chroma */
if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) &&
srcFormat != AV_PIX_FMT_RGB8 && srcFormat != AV_PIX_FMT_BGR8 &&
srcFormat != AV_PIX_FMT_RGB4 && srcFormat != AV_PIX_FMT_BGR4 &&
srcFormat != AV_PIX_FMT_RGB4_BYTE && srcFormat != AV_PIX_FMT_BGR4_BYTE &&
srcFormat != AV_PIX_FMT_GBRP9BE && srcFormat != AV_PIX_FMT_GBRP9LE &&
srcFormat != AV_PIX_FMT_GBRP10BE && srcFormat != AV_PIX_FMT_GBRP10LE &&
srcFormat != AV_PIX_FMT_GBRP12BE && srcFormat != AV_PIX_FMT_GBRP12LE &&
srcFormat != AV_PIX_FMT_GBRP14BE && srcFormat != AV_PIX_FMT_GBRP14LE &&
srcFormat != AV_PIX_FMT_GBRP16BE && srcFormat != AV_PIX_FMT_GBRP16LE &&
((dstW >> c->chrDstHSubSample) <= (srcW >> 1) ||
(flags & SWS_FAST_BILINEAR)))
c->chrSrcHSubSample = 1;
// Note the FF_CEIL_RSHIFT is so that we always round toward +inf.
c->chrSrcW = FF_CEIL_RSHIFT(srcW, c->chrSrcHSubSample);
c->chrSrcH = FF_CEIL_RSHIFT(srcH, c->chrSrcVSubSample);
c->chrDstW = FF_CEIL_RSHIFT(dstW, c->chrDstHSubSample);
c->chrDstH = FF_CEIL_RSHIFT(dstH, c->chrDstVSubSample);
FF_ALLOCZ_OR_GOTO(c, c->formatConvBuffer, FFALIGN(srcW*2+78, 16) * 2, fail);
c->srcBpc = 1 + desc_src->comp[0].depth_minus1;
if (c->srcBpc < 8)
c->srcBpc = 8;
c->dstBpc = 1 + desc_dst->comp[0].depth_minus1;
if (c->dstBpc < 8)
c->dstBpc = 8;
if (isAnyRGB(srcFormat) || srcFormat == AV_PIX_FMT_PAL8)
if (c->dstBpc == 16)
if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 14) {
c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
c->chrDstW >= c->chrSrcW &&
if (!c->canMMXEXTBeUsed && dstW >= srcW && c->chrDstW >= c->chrSrcW && (srcW & 15) == 0
&& (flags & SWS_FAST_BILINEAR)) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"output width is not a multiple of 32 -> no MMXEXT scaler\n");
}
if (usesHFilter || isNBPS(c->srcFormat) || is16BPS(c->srcFormat) || isAnyRGB(c->srcFormat))
c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW;
c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH;
/* Match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src
* to pixel n-2 of dst, but only for the FAST_BILINEAR mode otherwise do
* correct scaling.
* n-2 is the last chrominance sample available.
* This is not perfect, but no one should notice the difference, the more
* correct variant would be like the vertical one, but that would require
* some special code for the first and last pixel */
if (flags & SWS_FAST_BILINEAR) {
c->lumXInc += 20;
c->chrXInc += 20;
}
// we don't use the x86 asm scaler if MMX is available
else if (INLINE_MMX(cpu_flags) && c->dstBpc <= 14) {
c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20;
}
}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
// hardcoded for now
c->gamma_value = 2.2;
tmpFmt = AV_PIX_FMT_RGBA64LE;
if (!unscaled && c->gamma_flag && (srcFormat != tmpFmt || dstFormat != tmpFmt)) {
c->cascaded_context[0] = NULL;
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
srcW, srcH, tmpFmt, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
srcW, srcH, tmpFmt,
flags, NULL, NULL, c->param);
if (!c->cascaded_context[0]) {
return -1;
}
c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFmt,
dstW, dstH, tmpFmt,
flags | SWS_GAMMA_CORRECT, srcFilter, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
c->cascaded_context[2] = NULL;
if (dstFormat != tmpFmt) {
ret = av_image_alloc(c->cascaded1_tmp, c->cascaded1_tmpStride,
dstW, dstH, tmpFmt, 64);
if (ret < 0)
return ret;
c->cascaded_context[2] = sws_getContext(dstW, dstH, tmpFmt,
dstW, dstH, dstFormat,
flags, NULL, NULL, c->param);
if (!c->cascaded_context[2])
return -1;
}
return 0;
}
c->gamma = NULL;
c->inv_gamma = NULL;
if (c->flags & SWS_GAMMA_CORRECT) {
c->gamma = alloc_gamma_tbl(c->gamma_value);
if (!c->gamma)
return AVERROR(ENOMEM);
c->inv_gamma = alloc_gamma_tbl(1.f/c->gamma_value);
if (!c->inv_gamma) {
return AVERROR(ENOMEM);
}
}
Michael Niedermayer
committed
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
if (isBayer(srcFormat)) {
if (!unscaled ||
(dstFormat != AV_PIX_FMT_RGB24 && dstFormat != AV_PIX_FMT_YUV420P)) {
enum AVPixelFormat tmpFormat = AV_PIX_FMT_RGB24;
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
srcW, srcH, tmpFormat, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
srcW, srcH, tmpFormat,
flags, srcFilter, NULL, c->param);
if (!c->cascaded_context[0])
return -1;
c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFormat,
dstW, dstH, dstFormat,
flags, NULL, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
return 0;
}
}
#define USE_MMAP (HAVE_MMAP && HAVE_MPROTECT && defined MAP_ANONYMOUS)
/* precalculate horizontal scaler filter coefficients */
{
// can't downscale !!!
if (c->canMMXEXTBeUsed && (flags & SWS_FAST_BILINEAR)) {
c->lumMmxextFilterCodeSize = ff_init_hscaler_mmxext(dstW, c->lumXInc, NULL,
c->chrMmxextFilterCodeSize = ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc,
c->lumMmxextFilterCode = mmap(NULL, c->lumMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
c->chrMmxextFilterCode = mmap(NULL, c->chrMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
#elif HAVE_VIRTUALALLOC
c->lumMmxextFilterCode = VirtualAlloc(NULL,
c->lumMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
c->chrMmxextFilterCode = VirtualAlloc(NULL,
c->chrMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
#else
c->lumMmxextFilterCode = av_malloc(c->lumMmxextFilterCodeSize);
c->chrMmxextFilterCode = av_malloc(c->chrMmxextFilterCodeSize);
#endif
if (c->lumMmxextFilterCode == MAP_FAILED || c->chrMmxextFilterCode == MAP_FAILED)
if (!c->lumMmxextFilterCode || !c->chrMmxextFilterCode)
{
av_log(c, AV_LOG_ERROR, "Failed to allocate MMX2FilterCode\n");
Michael Niedermayer
committed
return AVERROR(ENOMEM);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilter, (dstW / 8 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilter, (c->chrDstW / 4 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW / 2 / 8 + 8) * sizeof(int32_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW / 2 / 4 + 8) * sizeof(int32_t), fail);
ff_init_hscaler_mmxext( dstW, c->lumXInc, c->lumMmxextFilterCode,
c->hLumFilter, (uint32_t*)c->hLumFilterPos, 8);
ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc, c->chrMmxextFilterCode,
c->hChrFilter, (uint32_t*)c->hChrFilterPos, 4);
if ( mprotect(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1
|| mprotect(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1) {
av_log(c, AV_LOG_ERROR, "mprotect failed, cannot use fast bilinear scaler\n");
goto fail;
}
#endif
} else
#endif /* HAVE_MMXEXT_INLINE */
{
Diego Biurrun
committed
const int filterAlign = X86_MMX(cpu_flags) ? 4 :
PPC_ALTIVEC(cpu_flags) ? 8 : 1;
if ((ret = initFilter(&c->hLumFilter, &c->hLumFilterPos,
&c->hLumFilterSize, c->lumXInc,
srcW, dstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumH, dstFilter->lumH,
c->param,
get_local_pos(c, 0, 0, 0),
get_local_pos(c, 0, 0, 0))) < 0)
goto fail;
if ((ret = initFilter(&c->hChrFilter, &c->hChrFilterPos,
&c->hChrFilterSize, c->chrXInc,
c->chrSrcW, c->chrDstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrH, dstFilter->chrH,
c->param,
get_local_pos(c, c->chrSrcHSubSample, c->src_h_chr_pos, 0),
get_local_pos(c, c->chrDstHSubSample, c->dst_h_chr_pos, 0))) < 0)
goto fail;
}
} // initialize horizontal stuff
/* precalculate vertical scaler filter coefficients */
{
Diego Biurrun
committed
const int filterAlign = X86_MMX(cpu_flags) ? 2 :
PPC_ALTIVEC(cpu_flags) ? 8 : 1;
if ((ret = initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize,
c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumV, dstFilter->lumV,
c->param,
get_local_pos(c, 0, 0, 1),
get_local_pos(c, 0, 0, 1))) < 0)
goto fail;
if ((ret = initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize,
c->chrYInc, c->chrSrcH, c->chrDstH,
filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrV, dstFilter->chrV,
c->param,
get_local_pos(c, c->chrSrcVSubSample, c->src_v_chr_pos, 1),
get_local_pos(c, c->chrDstVSubSample, c->dst_v_chr_pos, 1))) < 0)
goto fail;
#if HAVE_ALTIVEC
FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof(vector signed short) * c->vLumFilterSize * c->dstH, fail);
FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof(vector signed short) * c->vChrFilterSize * c->chrDstH, fail);
for (i = 0; i < c->vLumFilterSize * c->dstH; i++) {
int j;
short *p = (short *)&c->vYCoeffsBank[i];
p[j] = c->vLumFilter[i];
}
for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) {
int j;
short *p = (short *)&c->vCCoeffsBank[i];
p[j] = c->vChrFilter[i];
}
#endif
}
// calculate buffer sizes so that they won't run out while handling these damn slices
c->vLumBufSize = c->vLumFilterSize;
c->vChrBufSize = c->vChrFilterSize;
for (i = 0; i < dstH; i++) {
int chrI = (int64_t)i * c->chrDstH / dstH;
int nextSlice = FFMAX(c->vLumFilterPos[i] + c->vLumFilterSize - 1,
((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)
<< c->chrSrcVSubSample));
nextSlice >>= c->chrSrcVSubSample;
nextSlice <<= c->chrSrcVSubSample;
if (c->vLumFilterPos[i] + c->vLumBufSize < nextSlice)
c->vLumBufSize = nextSlice - c->vLumFilterPos[i];
if (c->vChrFilterPos[chrI] + c->vChrBufSize <
(nextSlice >> c->chrSrcVSubSample))
c->vChrBufSize = (nextSlice >> c->chrSrcVSubSample) -
c->vChrFilterPos[chrI];
}
for (i = 0; i < 4; i++)
FF_ALLOCZ_OR_GOTO(c, c->dither_error[i], (c->dstW+2) * sizeof(int), fail);
/* Allocate pixbufs (we use dynamic allocation because otherwise we would
* need to allocate several megabytes to handle all possible cases) */
FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
FF_ALLOC_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
FF_ALLOC_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
/* Note we need at least one pixel more at the end because of the MMX code
* (just in case someone wants to replace the 4000/8000). */
/* align at 16 bytes for AltiVec */
for (i = 0; i < c->vLumBufSize; i++) {
FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i + c->vLumBufSize],
dst_stride + 16, fail);
c->lumPixBuf[i] = c->lumPixBuf[i + c->vLumBufSize];
}
// 64 / c->scalingBpp is the same as 16 / sizeof(scaling_intermediate)
c->uv_off = (dst_stride>>1) + 64 / (c->dstBpc &~ 7);
c->uv_offx2 = dst_stride + 16;
for (i = 0; i < c->vChrBufSize; i++) {
FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i + c->vChrBufSize],
dst_stride * 2 + 32, fail);
c->chrUPixBuf[i] = c->chrUPixBuf[i + c->vChrBufSize];
c->chrVPixBuf[i] = c->chrVPixBuf[i + c->vChrBufSize]
= c->chrUPixBuf[i] + (dst_stride >> 1) + 8;
}
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
for (i = 0; i < c->vLumBufSize; i++) {
FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i + c->vLumBufSize],
dst_stride + 16, fail);
c->alpPixBuf[i] = c->alpPixBuf[i + c->vLumBufSize];
}
// try to avoid drawing green stuff between the right end and the stride end
for (i = 0; i < c->vChrBufSize; i++)
if(desc_dst->comp[0].depth_minus1 == 15){
av_assert0(c->dstBpc > 14);
for(j=0; j<dst_stride/2+1; j++)
((int32_t*)(c->chrUPixBuf[i]))[j] = 1<<18;
} else
for(j=0; j<dst_stride+1; j++)
((int16_t*)(c->chrUPixBuf[i]))[j] = 1<<14;
if (flags & SWS_PRINT_INFO) {
const char *scaler = NULL, *cpucaps;
for (i = 0; i < FF_ARRAY_ELEMS(scale_algorithms); i++) {
if (flags & scale_algorithms[i].flag) {
scaler = scale_algorithms[i].description;
break;
}
}
if (!scaler)
scaler = "ehh flags invalid?!";
av_log(c, AV_LOG_INFO, "%s scaler, from %s to %s%s ",
#ifdef DITHER1XBPP
dstFormat == AV_PIX_FMT_BGR555 || dstFormat == AV_PIX_FMT_BGR565 ||
dstFormat == AV_PIX_FMT_RGB444BE || dstFormat == AV_PIX_FMT_RGB444LE ||
dstFormat == AV_PIX_FMT_BGR444BE || dstFormat == AV_PIX_FMT_BGR444LE ?
#else
"",
#endif
if (INLINE_MMXEXT(cpu_flags))
cpucaps = "MMXEXT";
else if (INLINE_AMD3DNOW(cpu_flags))
cpucaps = "3DNOW";
else if (INLINE_MMX(cpu_flags))
else if (PPC_ALTIVEC(cpu_flags))
cpucaps = "AltiVec";
cpucaps = "C";
av_log(c, AV_LOG_INFO, "using %s\n", cpucaps);
av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
av_log(c, AV_LOG_DEBUG,
"lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
av_log(c, AV_LOG_DEBUG,
"chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH,
c->chrXInc, c->chrYInc);
}
Michael Niedermayer
committed
/* unscaled special cases */
if (unscaled && !usesHFilter && !usesVFilter &&
(c->srcRange == c->dstRange || isAnyRGB(dstFormat))) {
ff_get_unscaled_swscale(c);
if (c->swscale) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"using unscaled %s -> %s special converter\n",
av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
return 0;
}
}
c->swscale = ff_getSwsFunc(c);
Michael Niedermayer
committed
return 0;
fail: // FIXME replace things by appropriate error codes
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
if (ret == RETCODE_USE_CASCADE) {
int tmpW = sqrt(srcW * (int64_t)dstW);
int tmpH = sqrt(srcH * (int64_t)dstH);
enum AVPixelFormat tmpFormat = AV_PIX_FMT_YUV420P;
if (srcW*(int64_t)srcH <= 4LL*dstW*dstH)
return AVERROR(EINVAL);
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
tmpW, tmpH, tmpFormat, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
tmpW, tmpH, tmpFormat,
flags, srcFilter, NULL, c->param);
if (!c->cascaded_context[0])
return -1;
c->cascaded_context[1] = sws_getContext(tmpW, tmpH, tmpFormat,
dstW, dstH, dstFormat,
flags, NULL, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
return 0;
}
Michael Niedermayer
committed
return -1;
}
SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param)
Michael Niedermayer
committed
{
SwsContext *c;
if (!(c = sws_alloc_context()))
Michael Niedermayer
committed
return NULL;
c->flags = flags;
c->srcW = srcW;
c->srcH = srcH;
c->dstW = dstW;
c->dstH = dstH;
c->srcFormat = srcFormat;
c->dstFormat = dstFormat;
Michael Niedermayer
committed
if (param) {
c->param[0] = param[0];
c->param[1] = param[1];
}
if (sws_init_context(c, srcFilter, dstFilter) < 0) {
Michael Niedermayer
committed
sws_freeContext(c);
return NULL;
}
return c;
}
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
float lumaSharpen, float chromaSharpen,
float chromaHShift, float chromaVShift,
int verbose)
{
SwsFilter *filter = av_malloc(sizeof(SwsFilter));
if (!filter)
return NULL;
if (lumaGBlur != 0.0) {
filter->lumH = sws_getGaussianVec(lumaGBlur, 3.0);
filter->lumV = sws_getGaussianVec(lumaGBlur, 3.0);
} else {
filter->lumH = sws_getIdentityVec();
filter->lumV = sws_getIdentityVec();
}
if (chromaGBlur != 0.0) {
filter->chrH = sws_getGaussianVec(chromaGBlur, 3.0);
filter->chrV = sws_getGaussianVec(chromaGBlur, 3.0);
} else {
filter->chrH = sws_getIdentityVec();
filter->chrV = sws_getIdentityVec();
}
if (!filter->lumH || !filter->lumV || !filter->chrH || !filter->chrV)
goto fail;
if (chromaSharpen != 0.0) {
SwsVector *id = sws_getIdentityVec();
sws_scaleVec(filter->chrH, -chromaSharpen);
sws_scaleVec(filter->chrV, -chromaSharpen);
sws_addVec(filter->chrH, id);
sws_addVec(filter->chrV, id);
sws_freeVec(id);
}
if (lumaSharpen != 0.0) {
SwsVector *id = sws_getIdentityVec();
sws_scaleVec(filter->lumH, -lumaSharpen);
sws_scaleVec(filter->lumV, -lumaSharpen);
sws_addVec(filter->lumH, id);
sws_addVec(filter->lumV, id);
sws_freeVec(id);
}
if (chromaHShift != 0.0)
sws_shiftVec(filter->chrH, (int)(chromaHShift + 0.5));
if (chromaVShift != 0.0)
sws_shiftVec(filter->chrV, (int)(chromaVShift + 0.5));
sws_normalizeVec(filter->chrH, 1.0);
sws_normalizeVec(filter->chrV, 1.0);
sws_normalizeVec(filter->lumH, 1.0);
sws_normalizeVec(filter->lumV, 1.0);
if (verbose)
sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
if (verbose)
sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
return filter;
fail:
sws_freeVec(filter->lumH);
sws_freeVec(filter->lumV);
sws_freeVec(filter->chrH);
sws_freeVec(filter->chrV);
av_freep(&filter);
return NULL;
}
SwsVector *sws_allocVec(int length)
{
SwsVector *vec;
if(length <= 0 || length > INT_MAX/ sizeof(double))
return NULL;
vec = av_malloc(sizeof(SwsVector));
if (!vec)
return NULL;
vec->length = length;
vec->coeff = av_malloc(sizeof(double) * length);
if (!vec->coeff)
av_freep(&vec);
return vec;
}
SwsVector *sws_getGaussianVec(double variance, double quality)
{
const int length = (int)(variance * quality + 0.5) | 1;
int i;
double middle = (length - 1) * 0.5;
SwsVector *vec;
if(variance < 0 || quality < 0)
return NULL;
vec = sws_allocVec(length);
if (!vec)
return NULL;
for (i = 0; i < length; i++) {
double dist = i - middle;
vec->coeff[i] = exp(-dist * dist / (2 * variance * variance)) /
sqrt(2 * variance * M_PI);
}
sws_normalizeVec(vec, 1.0);
return vec;
}
SwsVector *sws_getConstVec(double c, int length)
{
int i;
SwsVector *vec = sws_allocVec(length);
if (!vec)
return NULL;
for (i = 0; i < length; i++)
vec->coeff[i] = c;
return vec;
}
SwsVector *sws_getIdentityVec(void)
{
return sws_getConstVec(1.0, 1);
}
static double sws_dcVec(SwsVector *a)
{
int i;
for (i = 0; i < a->length; i++)
sum += a->coeff[i];
return sum;
}
void sws_scaleVec(SwsVector *a, double scalar)
{
int i;
for (i = 0; i < a->length; i++)
a->coeff[i] *= scalar;
}
void sws_normalizeVec(SwsVector *a, double height)
{
sws_scaleVec(a, height / sws_dcVec(a));
}
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b)
{
int length = a->length + b->length - 1;
int i, j;
SwsVector *vec = sws_getConstVec(0.0, length);
if (!vec)
return NULL;
for (i = 0; i < a->length; i++) {
for (j = 0; j < b->length; j++) {
vec->coeff[i + j] += a->coeff[i] * b->coeff[j];
}
}
return vec;
}
static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b)
{
int length = FFMAX(a->length, b->length);
int i;
SwsVector *vec = sws_getConstVec(0.0, length);
if (!vec)
return NULL;
for (i = 0; i < a->length; i++)
vec->coeff[i + (length - 1) / 2 - (a->length - 1) / 2] += a->coeff[i];
for (i = 0; i < b->length; i++)
vec->coeff[i + (length - 1) / 2 - (b->length - 1) / 2] += b->coeff[i];
return vec;
}
static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b)
{
int length = FFMAX(a->length, b->length);
int i;
SwsVector *vec = sws_getConstVec(0.0, length);
if (!vec)
return NULL;
for (i = 0; i < a->length; i++)
vec->coeff[i + (length - 1) / 2 - (a->length - 1) / 2] += a->coeff[i];
for (i = 0; i < b->length; i++)
vec->coeff[i + (length - 1) / 2 - (b->length - 1) / 2] -= b->coeff[i];
return vec;
}
/* shift left / or right if "shift" is negative */
static SwsVector *sws_getShiftedVec(SwsVector *a, int shift)
{
int length = a->length + FFABS(shift) * 2;
int i;
SwsVector *vec = sws_getConstVec(0.0, length);
if (!vec)
return NULL;
for (i = 0; i < a->length; i++) {
vec->coeff[i + (length - 1) / 2 -
(a->length - 1) / 2 - shift] = a->coeff[i];
}
return vec;
}
void sws_shiftVec(SwsVector *a, int shift)
{
SwsVector *shifted = sws_getShiftedVec(a, shift);
av_free(a->coeff);
a->coeff = shifted->coeff;
a->length = shifted->length;
av_free(shifted);
}
void sws_addVec(SwsVector *a, SwsVector *b)
{
SwsVector *sum = sws_sumVec(a, b);
av_free(a->coeff);
a->coeff = sum->coeff;
a->length = sum->length;
av_free(sum);
}
void sws_subVec(SwsVector *a, SwsVector *b)
{
SwsVector *diff = sws_diffVec(a, b);
av_free(a->coeff);
a->coeff = diff->coeff;
a->length = diff->length;
av_free(diff);
}
void sws_convVec(SwsVector *a, SwsVector *b)
{
SwsVector *conv = sws_getConvVec(a, b);
av_free(a->coeff);
a->coeff = conv->coeff;
a->length = conv->length;
av_free(conv);
}
SwsVector *sws_cloneVec(SwsVector *a)
{
SwsVector *vec = sws_allocVec(a->length);
if (!vec)
return NULL;
memcpy(vec->coeff, a->coeff, a->length * sizeof(*a->coeff));
return vec;
}
void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level)
{
int i;
double max = 0;
double min = 0;
double range;
for (i = 0; i < a->length; i++)
if (a->coeff[i] > max)
max = a->coeff[i];
for (i = 0; i < a->length; i++)
if (a->coeff[i] < min)
min = a->coeff[i];
for (i = 0; i < a->length; i++) {
int x = (int)((a->coeff[i] - min) * 60.0 / range + 0.5);
av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]);
for (; x > 0; x--)
av_log(log_ctx, log_level, " ");
av_log(log_ctx, log_level, "|\n");
}
}
void sws_freeVec(SwsVector *a)
{
av_freep(&a->coeff);
av_free(a);
}
void sws_freeFilter(SwsFilter *filter)
{
if (!filter)
return;
sws_freeVec(filter->lumH);
sws_freeVec(filter->lumV);
sws_freeVec(filter->chrH);
sws_freeVec(filter->chrV);
av_free(filter);
}
void sws_freeContext(SwsContext *c)
{
int i;
if (c->lumPixBuf) {
for (i = 0; i < c->vLumBufSize; i++)
av_freep(&c->lumPixBuf[i]);
av_freep(&c->lumPixBuf);
}
if (c->chrUPixBuf) {
for (i = 0; i < c->vChrBufSize; i++)
av_freep(&c->chrUPixBuf[i]);
av_freep(&c->chrUPixBuf);
av_freep(&c->chrVPixBuf);