Newer
Older
}
static int handle_jpeg(enum AVPixelFormat *format)
{
switch (*format) {
case AV_PIX_FMT_YUVJ420P:
*format = AV_PIX_FMT_YUV420P;
case AV_PIX_FMT_YUVJ411P:
*format = AV_PIX_FMT_YUV411P;
return 1;
case AV_PIX_FMT_YUVJ422P:
*format = AV_PIX_FMT_YUV422P;
case AV_PIX_FMT_YUVJ444P:
*format = AV_PIX_FMT_YUV444P;
case AV_PIX_FMT_YUVJ440P:
*format = AV_PIX_FMT_YUV440P;
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_YA8:
case AV_PIX_FMT_GRAY9LE:
case AV_PIX_FMT_GRAY9BE:
case AV_PIX_FMT_GRAY10LE:
case AV_PIX_FMT_GRAY10BE:
case AV_PIX_FMT_GRAY12LE:
case AV_PIX_FMT_GRAY12BE:
case AV_PIX_FMT_GRAY14LE:
case AV_PIX_FMT_GRAY14BE:
case AV_PIX_FMT_GRAY16LE:
case AV_PIX_FMT_GRAY16BE:
case AV_PIX_FMT_YA16BE:
case AV_PIX_FMT_YA16LE:
static int handle_0alpha(enum AVPixelFormat *format)
{
switch (*format) {
case AV_PIX_FMT_0BGR : *format = AV_PIX_FMT_ABGR ; return 1;
case AV_PIX_FMT_BGR0 : *format = AV_PIX_FMT_BGRA ; return 4;
case AV_PIX_FMT_0RGB : *format = AV_PIX_FMT_ARGB ; return 1;
case AV_PIX_FMT_RGB0 : *format = AV_PIX_FMT_RGBA ; return 4;
Stefano Sabatini
committed
default: return 0;
}
}
static int handle_xyz(enum AVPixelFormat *format)
{
switch (*format) {
case AV_PIX_FMT_XYZ12BE : *format = AV_PIX_FMT_RGB48BE; return 1;
case AV_PIX_FMT_XYZ12LE : *format = AV_PIX_FMT_RGB48LE; return 1;
default: return 0;
}
}
static void handle_formats(SwsContext *c)
{
c->src0Alpha |= handle_0alpha(&c->srcFormat);
c->dst0Alpha |= handle_0alpha(&c->dstFormat);
c->srcXYZ |= handle_xyz(&c->srcFormat);
c->dstXYZ |= handle_xyz(&c->dstFormat);
if (c->srcXYZ || c->dstXYZ)
fill_xyztables(c);
SwsContext *sws_alloc_context(void)
{
SwsContext *c = av_mallocz(sizeof(SwsContext));
Michael Niedermayer
committed
Michael Niedermayer
committed
av_assert0(offsetof(SwsContext, redDither) + DITHER32_INT == offsetof(SwsContext, dither32));
c->av_class = &ff_sws_context_class;
av_opt_set_defaults(c);
}
Michael Niedermayer
committed
return c;
}
static uint16_t * alloc_gamma_tbl(double e)
{
int i = 0;
uint16_t * tbl;
tbl = (uint16_t*)av_malloc(sizeof(uint16_t) * 1 << 16);
if (!tbl)
return NULL;
for (i = 0; i < 65536; ++i) {
tbl[i] = pow(i / 65535.0, e) * 65535.0;
}
return tbl;
}
static enum AVPixelFormat alphaless_fmt(enum AVPixelFormat fmt)
{
switch(fmt) {
case AV_PIX_FMT_ARGB: return AV_PIX_FMT_RGB24;
case AV_PIX_FMT_RGBA: return AV_PIX_FMT_RGB24;
case AV_PIX_FMT_ABGR: return AV_PIX_FMT_BGR24;
case AV_PIX_FMT_BGRA: return AV_PIX_FMT_BGR24;
case AV_PIX_FMT_YA8: return AV_PIX_FMT_GRAY8;
case AV_PIX_FMT_YUVA420P: return AV_PIX_FMT_YUV420P;
case AV_PIX_FMT_YUVA422P: return AV_PIX_FMT_YUV422P;
case AV_PIX_FMT_YUVA444P: return AV_PIX_FMT_YUV444P;
case AV_PIX_FMT_GBRAP: return AV_PIX_FMT_GBRP;
case AV_PIX_FMT_GBRAP10LE: return AV_PIX_FMT_GBRP10;
case AV_PIX_FMT_GBRAP10BE: return AV_PIX_FMT_GBRP10;
case AV_PIX_FMT_GBRAP12LE: return AV_PIX_FMT_GBRP12;
case AV_PIX_FMT_GBRAP12BE: return AV_PIX_FMT_GBRP12;
case AV_PIX_FMT_GBRAP16LE: return AV_PIX_FMT_GBRP16;
case AV_PIX_FMT_GBRAP16BE: return AV_PIX_FMT_GBRP16;
case AV_PIX_FMT_RGBA64LE: return AV_PIX_FMT_RGB48;
case AV_PIX_FMT_RGBA64BE: return AV_PIX_FMT_RGB48;
case AV_PIX_FMT_BGRA64LE: return AV_PIX_FMT_BGR48;
case AV_PIX_FMT_BGRA64BE: return AV_PIX_FMT_BGR48;
case AV_PIX_FMT_YA16BE: return AV_PIX_FMT_GRAY16;
case AV_PIX_FMT_YA16LE: return AV_PIX_FMT_GRAY16;
case AV_PIX_FMT_YUVA420P9BE: return AV_PIX_FMT_YUV420P9;
case AV_PIX_FMT_YUVA422P9BE: return AV_PIX_FMT_YUV422P9;
case AV_PIX_FMT_YUVA444P9BE: return AV_PIX_FMT_YUV444P9;
case AV_PIX_FMT_YUVA420P9LE: return AV_PIX_FMT_YUV420P9;
case AV_PIX_FMT_YUVA422P9LE: return AV_PIX_FMT_YUV422P9;
case AV_PIX_FMT_YUVA444P9LE: return AV_PIX_FMT_YUV444P9;
case AV_PIX_FMT_YUVA420P10BE: return AV_PIX_FMT_YUV420P10;
case AV_PIX_FMT_YUVA422P10BE: return AV_PIX_FMT_YUV422P10;
case AV_PIX_FMT_YUVA444P10BE: return AV_PIX_FMT_YUV444P10;
case AV_PIX_FMT_YUVA420P10LE: return AV_PIX_FMT_YUV420P10;
case AV_PIX_FMT_YUVA422P10LE: return AV_PIX_FMT_YUV422P10;
case AV_PIX_FMT_YUVA444P10LE: return AV_PIX_FMT_YUV444P10;
case AV_PIX_FMT_YUVA420P16BE: return AV_PIX_FMT_YUV420P16;
case AV_PIX_FMT_YUVA422P16BE: return AV_PIX_FMT_YUV422P16;
case AV_PIX_FMT_YUVA444P16BE: return AV_PIX_FMT_YUV444P16;
case AV_PIX_FMT_YUVA420P16LE: return AV_PIX_FMT_YUV420P16;
case AV_PIX_FMT_YUVA422P16LE: return AV_PIX_FMT_YUV422P16;
case AV_PIX_FMT_YUVA444P16LE: return AV_PIX_FMT_YUV444P16;
// case AV_PIX_FMT_AYUV64LE:
// case AV_PIX_FMT_AYUV64BE:
// case AV_PIX_FMT_PAL8:
default: return AV_PIX_FMT_NONE;
}
}
av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
SwsFilter *dstFilter)
int usesVFilter, usesHFilter;
int unscaled;
SwsFilter dummyFilter = { NULL, NULL, NULL, NULL };
int srcW = c->srcW;
int srcH = c->srcH;
int dstW = c->dstW;
int dstH = c->dstH;
int dst_stride = FFALIGN(dstW * sizeof(int16_t) + 66, 16);
int flags, cpu_flags;
enum AVPixelFormat srcFormat = c->srcFormat;
enum AVPixelFormat dstFormat = c->dstFormat;
const AVPixFmtDescriptor *desc_src;
const AVPixFmtDescriptor *desc_dst;
enum AVPixelFormat tmpFmt;
Michael Niedermayer
committed
cpu_flags = av_get_cpu_flags();
flags = c->flags;
ff_sws_rgb2rgb_init();
unscaled = (srcW == dstW && srcH == dstH);
c->srcRange |= handle_jpeg(&c->srcFormat);
c->dstRange |= handle_jpeg(&c->dstFormat);
if(srcFormat!=c->srcFormat || dstFormat!=c->dstFormat)
av_log(c, AV_LOG_WARNING, "deprecated pixel format used, make sure you did set range correctly\n");
if (!c->contrast && !c->saturation && !c->dstFormatBpp)
sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], c->srcRange,
ff_yuv2rgb_coeffs[SWS_CS_DEFAULT],
c->dstRange, 0, 1 << 16, 1 << 16);
handle_formats(c);
srcFormat = c->srcFormat;
dstFormat = c->dstFormat;
desc_src = av_pix_fmt_desc_get(srcFormat);
desc_dst = av_pix_fmt_desc_get(dstFormat);
Michael Niedermayer
committed
// If the source has no alpha then disable alpha blendaway
if (c->src0Alpha)
c->alphablend = SWS_ALPHA_BLEND_NONE;
if (!(unscaled && sws_isSupportedEndiannessConversion(srcFormat) &&
av_pix_fmt_swap_endianness(srcFormat) == dstFormat)) {
if (!sws_isSupportedInput(srcFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n",
av_get_pix_fmt_name(srcFormat));
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
if (!sws_isSupportedOutput(dstFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n",
av_get_pix_fmt_name(dstFormat));
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
av_assert2(desc_src && desc_dst);
i = flags & (SWS_POINT |
SWS_AREA |
SWS_BILINEAR |
SWS_FAST_BILINEAR |
SWS_BICUBIC |
SWS_X |
SWS_GAUSS |
SWS_LANCZOS |
SWS_SINC |
SWS_SPLINE |
SWS_BICUBLIN);
/* provide a default scaler if not set by caller */
if (!i) {
if (dstW < srcW && dstH < srcH)
flags |= SWS_BICUBIC;
else if (dstW > srcW && dstH > srcH)
flags |= SWS_BICUBIC;
flags |= SWS_BICUBIC;
c->flags = flags;
} else if (i & (i - 1)) {
"Exactly one scaler algorithm must be chosen, got %X\n", i);
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
/* sanity check */
if (srcW < 1 || srcH < 1 || dstW < 1 || dstH < 1) {
/* FIXME check if these are enough and try to lower them after
* fixing the relevant parts of the code */
av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n",
srcW, srcH, dstW, dstH);
Michael Niedermayer
committed
return AVERROR(EINVAL);
}
Michael Niedermayer
committed
if (flags & SWS_FAST_BILINEAR) {
if (srcW < 8 || dstW < 8) {
flags ^= SWS_FAST_BILINEAR | SWS_BILINEAR;
c->flags = flags;
}
}
if (!dstFilter)
dstFilter = &dummyFilter;
if (!srcFilter)
srcFilter = &dummyFilter;
c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW;
c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH;
c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
c->vRounder = 4 * 0x0001000100010001ULL;
usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) ||
(srcFilter->chrV && srcFilter->chrV->length > 1) ||
(dstFilter->lumV && dstFilter->lumV->length > 1) ||
(dstFilter->chrV && dstFilter->chrV->length > 1);
usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) ||
(srcFilter->chrH && srcFilter->chrH->length > 1) ||
(dstFilter->lumH && dstFilter->lumH->length > 1) ||
(dstFilter->chrH && dstFilter->chrH->length > 1);
av_pix_fmt_get_chroma_sub_sample(srcFormat, &c->chrSrcHSubSample, &c->chrSrcVSubSample);
av_pix_fmt_get_chroma_sub_sample(dstFormat, &c->chrDstHSubSample, &c->chrDstVSubSample);
Michael Niedermayer
committed
if (isAnyRGB(dstFormat) && !(flags&SWS_FULL_CHR_H_INT)) {
if (dstW&1) {
av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to odd output size\n");
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
Michael Niedermayer
committed
}
Michael Niedermayer
committed
if ( c->chrSrcHSubSample == 0
&& c->chrSrcVSubSample == 0
&& c->dither != SWS_DITHER_BAYER //SWS_FULL_CHR_H_INT is currently not supported with SWS_DITHER_BAYER
&& !(c->flags & SWS_FAST_BILINEAR)
) {
av_log(c, AV_LOG_DEBUG, "Forcing full internal H chroma due to input having non subsampled chroma\n");
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
Michael Niedermayer
committed
}
if (c->dither == SWS_DITHER_AUTO) {
if (flags & SWS_ERROR_DIFFUSION)
c->dither = SWS_DITHER_ED;
}
if(dstFormat == AV_PIX_FMT_BGR4_BYTE ||
dstFormat == AV_PIX_FMT_RGB4_BYTE ||
dstFormat == AV_PIX_FMT_BGR8 ||
dstFormat == AV_PIX_FMT_RGB8) {
if (c->dither == SWS_DITHER_AUTO)
c->dither = (flags & SWS_FULL_CHR_H_INT) ? SWS_DITHER_ED : SWS_DITHER_BAYER;
if (!(flags & SWS_FULL_CHR_H_INT)) {
if (c->dither == SWS_DITHER_ED || c->dither == SWS_DITHER_A_DITHER || c->dither == SWS_DITHER_X_DITHER) {
av_log(c, AV_LOG_DEBUG,
"Desired dithering only supported in full chroma interpolation for destination format '%s'\n",
av_get_pix_fmt_name(dstFormat));
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
if (flags & SWS_FULL_CHR_H_INT) {
if (c->dither == SWS_DITHER_BAYER) {
av_log(c, AV_LOG_DEBUG,
"Ordered dither is not supported in full chroma interpolation for destination format '%s'\n",
av_get_pix_fmt_name(dstFormat));
c->dither = SWS_DITHER_ED;
}
if (isPlanarRGB(dstFormat)) {
if (!(flags & SWS_FULL_CHR_H_INT)) {
av_log(c, AV_LOG_DEBUG,
"%s output is not supported with half chroma resolution, switching to full\n",
av_get_pix_fmt_name(dstFormat));
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
}
/* reuse chroma for 2 pixels RGB/BGR unless user wants full
* chroma interpolation */
Michael Niedermayer
committed
if (flags & SWS_FULL_CHR_H_INT &&
!isPlanarRGB(dstFormat) &&
Michael Niedermayer
committed
dstFormat != AV_PIX_FMT_RGBA64LE &&
dstFormat != AV_PIX_FMT_RGBA64BE &&
dstFormat != AV_PIX_FMT_BGRA64LE &&
dstFormat != AV_PIX_FMT_BGRA64BE &&
dstFormat != AV_PIX_FMT_RGB48LE &&
dstFormat != AV_PIX_FMT_RGB48BE &&
dstFormat != AV_PIX_FMT_BGR48LE &&
dstFormat != AV_PIX_FMT_BGR48BE &&
dstFormat != AV_PIX_FMT_RGBA &&
dstFormat != AV_PIX_FMT_ARGB &&
dstFormat != AV_PIX_FMT_BGRA &&
dstFormat != AV_PIX_FMT_ABGR &&
dstFormat != AV_PIX_FMT_RGB24 &&
dstFormat != AV_PIX_FMT_BGR24 &&
dstFormat != AV_PIX_FMT_BGR4_BYTE &&
dstFormat != AV_PIX_FMT_RGB4_BYTE &&
dstFormat != AV_PIX_FMT_BGR8 &&
dstFormat != AV_PIX_FMT_RGB8
) {
Michael Niedermayer
committed
av_log(c, AV_LOG_WARNING,
"full chroma interpolation for destination format '%s' not yet implemented\n",
av_get_pix_fmt_name(dstFormat));
flags &= ~SWS_FULL_CHR_H_INT;
Michael Niedermayer
committed
c->flags = flags;
Michael Niedermayer
committed
}
if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT))
c->chrDstHSubSample = 1;
// drop some chroma lines if the user wants it
c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >>
SWS_SRC_V_CHR_DROP_SHIFT;
c->chrSrcVSubSample += c->vChrDrop;
/* drop every other pixel for chroma calculation unless user
* wants full chroma */
if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) &&
srcFormat != AV_PIX_FMT_RGB8 && srcFormat != AV_PIX_FMT_BGR8 &&
srcFormat != AV_PIX_FMT_RGB4 && srcFormat != AV_PIX_FMT_BGR4 &&
srcFormat != AV_PIX_FMT_RGB4_BYTE && srcFormat != AV_PIX_FMT_BGR4_BYTE &&
srcFormat != AV_PIX_FMT_GBRP9BE && srcFormat != AV_PIX_FMT_GBRP9LE &&
srcFormat != AV_PIX_FMT_GBRP10BE && srcFormat != AV_PIX_FMT_GBRP10LE &&
srcFormat != AV_PIX_FMT_GBRAP10BE && srcFormat != AV_PIX_FMT_GBRAP10LE &&
srcFormat != AV_PIX_FMT_GBRP12BE && srcFormat != AV_PIX_FMT_GBRP12LE &&
srcFormat != AV_PIX_FMT_GBRAP12BE && srcFormat != AV_PIX_FMT_GBRAP12LE &&
srcFormat != AV_PIX_FMT_GBRP14BE && srcFormat != AV_PIX_FMT_GBRP14LE &&
srcFormat != AV_PIX_FMT_GBRP16BE && srcFormat != AV_PIX_FMT_GBRP16LE &&
srcFormat != AV_PIX_FMT_GBRAP16BE && srcFormat != AV_PIX_FMT_GBRAP16LE &&
((dstW >> c->chrDstHSubSample) <= (srcW >> 1) ||
(flags & SWS_FAST_BILINEAR)))
c->chrSrcHSubSample = 1;
// Note the AV_CEIL_RSHIFT is so that we always round toward +inf.
c->chrSrcW = AV_CEIL_RSHIFT(srcW, c->chrSrcHSubSample);
c->chrSrcH = AV_CEIL_RSHIFT(srcH, c->chrSrcVSubSample);
c->chrDstW = AV_CEIL_RSHIFT(dstW, c->chrDstHSubSample);
c->chrDstH = AV_CEIL_RSHIFT(dstH, c->chrDstVSubSample);
FF_ALLOCZ_OR_GOTO(c, c->formatConvBuffer, FFALIGN(srcW*2+78, 16) * 2, fail);
c->srcBpc = desc_src->comp[0].depth;
if (c->srcBpc < 8)
c->srcBpc = 8;
c->dstBpc = desc_dst->comp[0].depth;
if (c->dstBpc < 8)
c->dstBpc = 8;
if (isAnyRGB(srcFormat) || srcFormat == AV_PIX_FMT_PAL8)
if (c->dstBpc == 16)
if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 14) {
c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
c->chrDstW >= c->chrSrcW &&
if (!c->canMMXEXTBeUsed && dstW >= srcW && c->chrDstW >= c->chrSrcW && (srcW & 15) == 0
&& (flags & SWS_FAST_BILINEAR)) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"output width is not a multiple of 32 -> no MMXEXT scaler\n");
}
if (usesHFilter || isNBPS(c->srcFormat) || is16BPS(c->srcFormat) || isAnyRGB(c->srcFormat))
c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW;
c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH;
/* Match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src
* to pixel n-2 of dst, but only for the FAST_BILINEAR mode otherwise do
* correct scaling.
* n-2 is the last chrominance sample available.
* This is not perfect, but no one should notice the difference, the more
* correct variant would be like the vertical one, but that would require
* some special code for the first and last pixel */
if (flags & SWS_FAST_BILINEAR) {
c->lumXInc += 20;
c->chrXInc += 20;
}
// we don't use the x86 asm scaler if MMX is available
else if (INLINE_MMX(cpu_flags) && c->dstBpc <= 14) {
c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20;
}
}
// hardcoded for now
c->gamma_value = 2.2;
tmpFmt = AV_PIX_FMT_RGBA64LE;
if (!unscaled && c->gamma_flag && (srcFormat != tmpFmt || dstFormat != tmpFmt)) {
SwsContext *c2;
c->cascaded_context[0] = NULL;
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
srcW, srcH, tmpFmt, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
srcW, srcH, tmpFmt,
flags, NULL, NULL, c->param);
if (!c->cascaded_context[0]) {
return -1;
}
c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFmt,
dstW, dstH, tmpFmt,
flags, srcFilter, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
c2 = c->cascaded_context[1];
c2->is_internal_gamma = 1;
c2->gamma = alloc_gamma_tbl( c->gamma_value);
c2->inv_gamma = alloc_gamma_tbl(1.f/c->gamma_value);
if (!c2->gamma || !c2->inv_gamma)
return AVERROR(ENOMEM);
// is_internal_flag is set after creating the context
// to properly create the gamma convert FilterDescriptor
// we have to re-initialize it
ff_free_filters(c2);
if (ff_init_filters(c2) < 0) {
sws_freeContext(c2);
return -1;
}
c->cascaded_context[2] = NULL;
if (dstFormat != tmpFmt) {
ret = av_image_alloc(c->cascaded1_tmp, c->cascaded1_tmpStride,
dstW, dstH, tmpFmt, 64);
if (ret < 0)
return ret;
c->cascaded_context[2] = sws_getContext(dstW, dstH, tmpFmt,
dstW, dstH, dstFormat,
flags, NULL, NULL, c->param);
if (!c->cascaded_context[2])
return -1;
}
return 0;
}
Michael Niedermayer
committed
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
if (isBayer(srcFormat)) {
if (!unscaled ||
(dstFormat != AV_PIX_FMT_RGB24 && dstFormat != AV_PIX_FMT_YUV420P)) {
enum AVPixelFormat tmpFormat = AV_PIX_FMT_RGB24;
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
srcW, srcH, tmpFormat, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
srcW, srcH, tmpFormat,
flags, srcFilter, NULL, c->param);
if (!c->cascaded_context[0])
return -1;
c->cascaded_context[1] = sws_getContext(srcW, srcH, tmpFormat,
dstW, dstH, dstFormat,
flags, NULL, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
return 0;
}
}
if (CONFIG_SWSCALE_ALPHA && isALPHA(srcFormat) && !isALPHA(dstFormat)) {
enum AVPixelFormat tmpFormat = alphaless_fmt(srcFormat);
if (tmpFormat != AV_PIX_FMT_NONE && c->alphablend != SWS_ALPHA_BLEND_NONE)
if (!unscaled ||
dstFormat != tmpFormat ||
usesHFilter || usesVFilter ||
c->srcRange != c->dstRange
) {
Michael Niedermayer
committed
c->cascaded_mainindex = 1;
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
srcW, srcH, tmpFormat, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_alloc_set_opts(srcW, srcH, srcFormat,
srcW, srcH, tmpFormat,
flags, c->param);
if (!c->cascaded_context[0])
return -1;
c->cascaded_context[0]->alphablend = c->alphablend;
ret = sws_init_context(c->cascaded_context[0], NULL , NULL);
if (ret < 0)
return ret;
Michael Niedermayer
committed
c->cascaded_context[1] = sws_alloc_set_opts(srcW, srcH, tmpFormat,
dstW, dstH, dstFormat,
flags, c->param);
if (!c->cascaded_context[1])
return -1;
Michael Niedermayer
committed
c->cascaded_context[1]->srcRange = c->srcRange;
c->cascaded_context[1]->dstRange = c->dstRange;
ret = sws_init_context(c->cascaded_context[1], srcFilter , dstFilter);
if (ret < 0)
return ret;
return 0;
}
}
#if HAVE_MMAP && HAVE_MPROTECT && defined(MAP_ANONYMOUS)
#define USE_MMAP 1
#else
#define USE_MMAP 0
#endif
/* precalculate horizontal scaler filter coefficients */
{
// can't downscale !!!
if (c->canMMXEXTBeUsed && (flags & SWS_FAST_BILINEAR)) {
c->lumMmxextFilterCodeSize = ff_init_hscaler_mmxext(dstW, c->lumXInc, NULL,
c->chrMmxextFilterCodeSize = ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc,
c->lumMmxextFilterCode = mmap(NULL, c->lumMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
c->chrMmxextFilterCode = mmap(NULL, c->chrMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
#elif HAVE_VIRTUALALLOC
c->lumMmxextFilterCode = VirtualAlloc(NULL,
c->lumMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
c->chrMmxextFilterCode = VirtualAlloc(NULL,
c->chrMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
#else
c->lumMmxextFilterCode = av_malloc(c->lumMmxextFilterCodeSize);
c->chrMmxextFilterCode = av_malloc(c->chrMmxextFilterCodeSize);
#endif
if (c->lumMmxextFilterCode == MAP_FAILED || c->chrMmxextFilterCode == MAP_FAILED)
if (!c->lumMmxextFilterCode || !c->chrMmxextFilterCode)
{
av_log(c, AV_LOG_ERROR, "Failed to allocate MMX2FilterCode\n");
Michael Niedermayer
committed
return AVERROR(ENOMEM);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilter, (dstW / 8 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilter, (c->chrDstW / 4 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW / 2 / 8 + 8) * sizeof(int32_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW / 2 / 4 + 8) * sizeof(int32_t), fail);
ff_init_hscaler_mmxext( dstW, c->lumXInc, c->lumMmxextFilterCode,
c->hLumFilter, (uint32_t*)c->hLumFilterPos, 8);
ff_init_hscaler_mmxext(c->chrDstW, c->chrXInc, c->chrMmxextFilterCode,
c->hChrFilter, (uint32_t*)c->hChrFilterPos, 4);
if ( mprotect(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1
|| mprotect(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ) == -1) {
av_log(c, AV_LOG_ERROR, "mprotect failed, cannot use fast bilinear scaler\n");
goto fail;
}
#endif
} else
#endif /* HAVE_MMXEXT_INLINE */
{
Diego Biurrun
committed
const int filterAlign = X86_MMX(cpu_flags) ? 4 :
have_neon(cpu_flags) ? 8 : 1;
if ((ret = initFilter(&c->hLumFilter, &c->hLumFilterPos,
&c->hLumFilterSize, c->lumXInc,
srcW, dstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumH, dstFilter->lumH,
c->param,
get_local_pos(c, 0, 0, 0),
get_local_pos(c, 0, 0, 0))) < 0)
goto fail;
if ((ret = initFilter(&c->hChrFilter, &c->hChrFilterPos,
&c->hChrFilterSize, c->chrXInc,
c->chrSrcW, c->chrDstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrH, dstFilter->chrH,
c->param,
get_local_pos(c, c->chrSrcHSubSample, c->src_h_chr_pos, 0),
get_local_pos(c, c->chrDstHSubSample, c->dst_h_chr_pos, 0))) < 0)
goto fail;
}
} // initialize horizontal stuff
/* precalculate vertical scaler filter coefficients */
{
Diego Biurrun
committed
const int filterAlign = X86_MMX(cpu_flags) ? 2 :
PPC_ALTIVEC(cpu_flags) ? 8 :
have_neon(cpu_flags) ? 2 : 1;
if ((ret = initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize,
c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumV, dstFilter->lumV,
c->param,
get_local_pos(c, 0, 0, 1),
get_local_pos(c, 0, 0, 1))) < 0)
goto fail;
if ((ret = initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize,
c->chrYInc, c->chrSrcH, c->chrDstH,
filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrV, dstFilter->chrV,
c->param,
get_local_pos(c, c->chrSrcVSubSample, c->src_v_chr_pos, 1),
get_local_pos(c, c->chrDstVSubSample, c->dst_v_chr_pos, 1))) < 0)
goto fail;
#if HAVE_ALTIVEC
FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof(vector signed short) * c->vLumFilterSize * c->dstH, fail);
FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof(vector signed short) * c->vChrFilterSize * c->chrDstH, fail);
for (i = 0; i < c->vLumFilterSize * c->dstH; i++) {
int j;
short *p = (short *)&c->vYCoeffsBank[i];
p[j] = c->vLumFilter[i];
}
for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) {
int j;
short *p = (short *)&c->vCCoeffsBank[i];
p[j] = c->vChrFilter[i];
}
#endif
}
for (i = 0; i < 4; i++)
FF_ALLOCZ_OR_GOTO(c, c->dither_error[i], (c->dstW+2) * sizeof(int), fail);
c->needAlpha = (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) ? 1 : 0;
// 64 / c->scalingBpp is the same as 16 / sizeof(scaling_intermediate)
c->uv_off = (dst_stride>>1) + 64 / (c->dstBpc &~ 7);
c->uv_offx2 = dst_stride + 16;
if (flags & SWS_PRINT_INFO) {
const char *scaler = NULL, *cpucaps;
for (i = 0; i < FF_ARRAY_ELEMS(scale_algorithms); i++) {
if (flags & scale_algorithms[i].flag) {
scaler = scale_algorithms[i].description;
break;
}
}
if (!scaler)
scaler = "ehh flags invalid?!";
av_log(c, AV_LOG_INFO, "%s scaler, from %s to %s%s ",
#ifdef DITHER1XBPP
dstFormat == AV_PIX_FMT_BGR555 || dstFormat == AV_PIX_FMT_BGR565 ||
dstFormat == AV_PIX_FMT_RGB444BE || dstFormat == AV_PIX_FMT_RGB444LE ||
dstFormat == AV_PIX_FMT_BGR444BE || dstFormat == AV_PIX_FMT_BGR444LE ?
#else
"",
#endif
if (INLINE_MMXEXT(cpu_flags))
cpucaps = "MMXEXT";
else if (INLINE_AMD3DNOW(cpu_flags))
cpucaps = "3DNOW";
else if (INLINE_MMX(cpu_flags))
else if (PPC_ALTIVEC(cpu_flags))
cpucaps = "AltiVec";
cpucaps = "C";
av_log(c, AV_LOG_INFO, "using %s\n", cpucaps);
av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
av_log(c, AV_LOG_DEBUG,
"lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
av_log(c, AV_LOG_DEBUG,
"chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH,
c->chrXInc, c->chrYInc);
}
/* alpha blend special case, note this has been split via cascaded contexts if its scaled */
if (unscaled && !usesHFilter && !usesVFilter &&
c->alphablend != SWS_ALPHA_BLEND_NONE &&
isALPHA(srcFormat) &&
(c->srcRange == c->dstRange || isAnyRGB(dstFormat)) &&
alphaless_fmt(srcFormat) == dstFormat
) {
c->swscale = ff_sws_alphablendaway;
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"using alpha blendaway %s -> %s special converter\n",
av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
return 0;
}
Michael Niedermayer
committed
/* unscaled special cases */
if (unscaled && !usesHFilter && !usesVFilter &&
(c->srcRange == c->dstRange || isAnyRGB(dstFormat))) {
ff_get_unscaled_swscale(c);
if (c->swscale) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"using unscaled %s -> %s special converter\n",
av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
return 0;
}
}
c->swscale = ff_getSwsFunc(c);
fail: // FIXME replace things by appropriate error codes
if (ret == RETCODE_USE_CASCADE) {
int tmpW = sqrt(srcW * (int64_t)dstW);
int tmpH = sqrt(srcH * (int64_t)dstH);
enum AVPixelFormat tmpFormat = AV_PIX_FMT_YUV420P;
if (isALPHA(srcFormat))
tmpFormat = AV_PIX_FMT_YUVA420P;
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
if (srcW*(int64_t)srcH <= 4LL*dstW*dstH)
return AVERROR(EINVAL);
ret = av_image_alloc(c->cascaded_tmp, c->cascaded_tmpStride,
tmpW, tmpH, tmpFormat, 64);
if (ret < 0)
return ret;
c->cascaded_context[0] = sws_getContext(srcW, srcH, srcFormat,
tmpW, tmpH, tmpFormat,
flags, srcFilter, NULL, c->param);
if (!c->cascaded_context[0])
return -1;
c->cascaded_context[1] = sws_getContext(tmpW, tmpH, tmpFormat,
dstW, dstH, dstFormat,
flags, NULL, dstFilter, c->param);
if (!c->cascaded_context[1])
return -1;
return 0;
}
Michael Niedermayer
committed
return -1;
}
SwsContext *sws_alloc_set_opts(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, const double *param)
Michael Niedermayer
committed
{
SwsContext *c;
if (!(c = sws_alloc_context()))
Michael Niedermayer
committed
return NULL;
c->flags = flags;
c->srcW = srcW;
c->srcH = srcH;
c->dstW = dstW;
c->dstH = dstH;
c->srcFormat = srcFormat;
c->dstFormat = dstFormat;
Michael Niedermayer
committed
if (param) {
c->param[0] = param[0];
c->param[1] = param[1];
}
return c;
}
SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param)
{
SwsContext *c;
c = sws_alloc_set_opts(srcW, srcH, srcFormat,
dstW, dstH, dstFormat,
flags, param);
if (!c)
return NULL;
if (sws_init_context(c, srcFilter, dstFilter) < 0) {
Michael Niedermayer
committed
sws_freeContext(c);
return NULL;
}
return c;
}
Michael Niedermayer
committed
static int isnan_vec(SwsVector *a)
{
int i;
for (i=0; i<a->length; i++)
if (isnan(a->coeff[i]))
return 1;
return 0;
}
static void makenan_vec(SwsVector *a)
{
int i;
for (i=0; i<a->length; i++)
a->coeff[i] = NAN;
}
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
float lumaSharpen, float chromaSharpen,
float chromaHShift, float chromaVShift,
int verbose)
{
SwsFilter *filter = av_malloc(sizeof(SwsFilter));
if (!filter)
return NULL;
if (lumaGBlur != 0.0) {
filter->lumH = sws_getGaussianVec(lumaGBlur, 3.0);
filter->lumV = sws_getGaussianVec(lumaGBlur, 3.0);
} else {
filter->lumH = sws_getIdentityVec();
filter->lumV = sws_getIdentityVec();
}
if (chromaGBlur != 0.0) {
filter->chrH = sws_getGaussianVec(chromaGBlur, 3.0);
filter->chrV = sws_getGaussianVec(chromaGBlur, 3.0);
} else {
filter->chrH = sws_getIdentityVec();
filter->chrV = sws_getIdentityVec();
}
if (!filter->lumH || !filter->lumV || !filter->chrH || !filter->chrV)
goto fail;
if (chromaSharpen != 0.0) {
SwsVector *id = sws_getIdentityVec();
sws_scaleVec(filter->chrH, -chromaSharpen);
sws_scaleVec(filter->chrV, -chromaSharpen);
sws_addVec(filter->chrH, id);
sws_addVec(filter->chrV, id);
sws_freeVec(id);
}
if (lumaSharpen != 0.0) {
SwsVector *id = sws_getIdentityVec();
sws_scaleVec(filter->lumH, -lumaSharpen);
sws_scaleVec(filter->lumV, -lumaSharpen);
sws_addVec(filter->lumH, id);
sws_addVec(filter->lumV, id);
sws_freeVec(id);
}
if (chromaHShift != 0.0)
sws_shiftVec(filter->chrH, (int)(chromaHShift + 0.5));
if (chromaVShift != 0.0)
sws_shiftVec(filter->chrV, (int)(chromaVShift + 0.5));
sws_normalizeVec(filter->chrH, 1.0);
sws_normalizeVec(filter->chrV, 1.0);
sws_normalizeVec(filter->lumH, 1.0);
sws_normalizeVec(filter->lumV, 1.0);
Michael Niedermayer
committed
if (isnan_vec(filter->chrH) ||
isnan_vec(filter->chrV) ||
isnan_vec(filter->lumH) ||
isnan_vec(filter->lumV))
goto fail;
if (verbose)
sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
if (verbose)
sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
return filter;
fail:
sws_freeVec(filter->lumH);
sws_freeVec(filter->lumV);
sws_freeVec(filter->chrH);
sws_freeVec(filter->chrV);
av_freep(&filter);
return NULL;
}
SwsVector *sws_allocVec(int length)
{
SwsVector *vec;
if(length <= 0 || length > INT_MAX/ sizeof(double))
return NULL;
vec = av_malloc(sizeof(SwsVector));
if (!vec)
return NULL;
vec->length = length;
vec->coeff = av_malloc(sizeof(double) * length);
if (!vec->coeff)