summaryrefslogtreecommitdiff
path: root/libswscale/swscale_unscaled.c
diff options
context:
space:
mode:
authorPhilip Langdale <philipl@overt.org>2019-05-09 21:02:09 -0700
committerPhilip Langdale <philipl@overt.org>2019-05-12 07:51:02 -0700
commitcd483180356c8f206f32393acc52a85c5b76758b (patch)
tree06d6ff23172861b896fe3de72b4a431192ed4522 /libswscale/swscale_unscaled.c
parent5de4f1d871d60886b9630531fa8c34cad13cc9dd (diff)
downloadffmpeg-cd483180356c8f206f32393acc52a85c5b76758b.tar.gz
swscale: Add support for NV24 and NV42
The implementation is pretty straight-forward. Most of the existing NV12 codepaths work regardless of subsampling and are re-used as is. Where necessary I wrote the slightly different NV24 versions. Finally, the one thing that confused me for a long time was the asm specific x86 path that did an explicit exclusion check for NV12. I replaced that with a semi-planar check and also updated the equivalent PPC code, which Lauri kindly checked.
Diffstat (limited to 'libswscale/swscale_unscaled.c')
-rw-r--r--libswscale/swscale_unscaled.c51
1 files changed, 51 insertions, 0 deletions
diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c
index be04a236d8..e0b9e99373 100644
--- a/libswscale/swscale_unscaled.c
+++ b/libswscale/swscale_unscaled.c
@@ -180,6 +180,47 @@ static int nv12ToPlanarWrapper(SwsContext *c, const uint8_t *src[],
return srcSliceH;
}
+static int planarToNv24Wrapper(SwsContext *c, const uint8_t *src[],
+ int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t *dstParam[],
+ int dstStride[])
+{
+ uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY;
+
+ copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
+ dstParam[0], dstStride[0]);
+
+ if (c->dstFormat == AV_PIX_FMT_NV24)
+ interleaveBytes(src[1], src[2], dst, c->chrSrcW, srcSliceH,
+ srcStride[1], srcStride[2], dstStride[1]);
+ else
+ interleaveBytes(src[2], src[1], dst, c->chrSrcW, srcSliceH,
+ srcStride[2], srcStride[1], dstStride[1]);
+
+ return srcSliceH;
+}
+
+static int nv24ToPlanarWrapper(SwsContext *c, const uint8_t *src[],
+ int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t *dstParam[],
+ int dstStride[])
+{
+ uint8_t *dst1 = dstParam[1] + dstStride[1] * srcSliceY;
+ uint8_t *dst2 = dstParam[2] + dstStride[2] * srcSliceY;
+
+ copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
+ dstParam[0], dstStride[0]);
+
+ if (c->srcFormat == AV_PIX_FMT_NV24)
+ deinterleaveBytes(src[1], dst1, dst2, c->chrSrcW, srcSliceH,
+ srcStride[1], dstStride[1], dstStride[2]);
+ else
+ deinterleaveBytes(src[1], dst2, dst1, c->chrSrcW, srcSliceH,
+ srcStride[1], dstStride[2], dstStride[1]);
+
+ return srcSliceH;
+}
+
static int planarToP01xWrapper(SwsContext *c, const uint8_t *src8[],
int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *dstParam8[],
@@ -1872,11 +1913,21 @@ void ff_get_unscaled_swscale(SwsContext *c)
(dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)) {
c->swscale = planarToNv12Wrapper;
}
+ /* yv24_to_nv24 */
+ if ((srcFormat == AV_PIX_FMT_YUV444P || srcFormat == AV_PIX_FMT_YUVA444P) &&
+ (dstFormat == AV_PIX_FMT_NV24 || dstFormat == AV_PIX_FMT_NV42)) {
+ c->swscale = planarToNv24Wrapper;
+ }
/* nv12_to_yv12 */
if (dstFormat == AV_PIX_FMT_YUV420P &&
(srcFormat == AV_PIX_FMT_NV12 || srcFormat == AV_PIX_FMT_NV21)) {
c->swscale = nv12ToPlanarWrapper;
}
+ /* nv24_to_yv24 */
+ if (dstFormat == AV_PIX_FMT_YUV444P &&
+ (srcFormat == AV_PIX_FMT_NV24 || srcFormat == AV_PIX_FMT_NV42)) {
+ c->swscale = nv24ToPlanarWrapper;
+ }
/* yuv2bgr */
if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUV422P ||
srcFormat == AV_PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) &&