Line 0
Link Here
|
|
|
1 |
From: Martin T. H. Sandsmark <martin.sandsmark@kde.org> |
2 |
Date: Fri, 27 May 2016 20:29:11 +0000 |
3 |
Subject: Port to libavfilter for deinterlacing. |
4 |
X-Git-Tag: v16.07.80 |
5 |
X-Git-Url: http://quickgit.kde.org/?p=ffmpegthumbs.git&a=commitdiff&h=3978c762072b7bc16b2096819b7cfa2052deaf5e |
6 |
--- |
7 |
Port to libavfilter for deinterlacing. |
8 |
|
9 |
Based on a patch from Andreas Cadhalpun |
10 |
<andreas.cadhalpun@googlemail.com>. |
11 |
--- |
12 |
|
13 |
|
14 |
--- CMakeLists.txt |
15 |
+++ CMakeLists.txt |
16 |
@@ -36,7 +36,7 @@ |
17 |
|
18 |
kde4_add_plugin(ffmpegthumbs ${ffmpegthumbs_PART_SRCS}) |
19 |
|
20 |
-target_link_libraries(ffmpegthumbs ${KDE4_KIO_LIBS} ${AVUTIL_LIBRARIES} ${AVFORMAT_LIBRARIES} ${AVCODEC_LIBRARIES} ${SWSCALE_LIBRARIES} ) |
21 |
+target_link_libraries(ffmpegthumbs ${KDE4_KIO_LIBS} ${AVUTIL_LIBRARIES} ${AVFILTER_LIBRARIES} ${AVFORMAT_LIBRARIES} ${AVCODEC_LIBRARIES} ${SWSCALE_LIBRARIES} ) |
22 |
|
23 |
install(TARGETS ffmpegthumbs DESTINATION ${PLUGIN_INSTALL_DIR}) |
24 |
|
25 |
|
26 |
--- cmake/FindFFmpeg.cmake |
27 |
+++ cmake/FindFFmpeg.cmake |
28 |
@@ -99,6 +99,7 @@ |
29 |
|
30 |
# Check for all possible component. |
31 |
find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h) |
32 |
+ find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h) |
33 |
find_component(AVFORMAT libavformat avformat libavformat/avformat.h) |
34 |
find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h) |
35 |
find_component(AVUTIL libavutil avutil libavutil/avutil.h) |
36 |
|
37 |
--- ffmpegthumbnailer/moviedecoder.cpp |
38 |
+++ ffmpegthumbnailer/moviedecoder.cpp |
39 |
@@ -40,6 +40,10 @@ |
40 |
, m_FormatContextWasGiven(pavContext != NULL) |
41 |
, m_AllowSeek(true) |
42 |
, m_initialized(false) |
43 |
+ , m_bufferSinkContext(NULL) |
44 |
+ , m_bufferSourceContext(NULL) |
45 |
+ , m_filterGraph(NULL) |
46 |
+ , m_filterFrame(NULL) |
47 |
{ |
48 |
initialize(filename); |
49 |
} |
50 |
@@ -51,6 +55,9 @@ |
51 |
|
52 |
void MovieDecoder::initialize(const QString& filename) |
53 |
{ |
54 |
+ m_lastWidth = -1; |
55 |
+ m_lastHeight = -1; |
56 |
+ m_lastPixfmt = AV_PIX_FMT_NONE; |
57 |
av_register_all(); |
58 |
avcodec_register_all(); |
59 |
|
60 |
@@ -67,7 +74,7 @@ |
61 |
} |
62 |
|
63 |
initializeVideo(); |
64 |
- m_pFrame = avcodec_alloc_frame(); |
65 |
+ m_pFrame = av_frame_alloc(); |
66 |
|
67 |
if (m_pFrame) { |
68 |
m_initialized=true; |
69 |
@@ -82,6 +89,7 @@ |
70 |
|
71 |
void MovieDecoder::destroy() |
72 |
{ |
73 |
+ deleteFilterGraph(); |
74 |
if (m_pVideoCodecContext) { |
75 |
avcodec_close(m_pVideoCodecContext); |
76 |
m_pVideoCodecContext = NULL; |
77 |
@@ -93,13 +101,13 @@ |
78 |
} |
79 |
|
80 |
if (m_pPacket) { |
81 |
- av_free_packet(m_pPacket); |
82 |
+ av_packet_unref(m_pPacket); |
83 |
delete m_pPacket; |
84 |
m_pPacket = NULL; |
85 |
} |
86 |
|
87 |
if (m_pFrame) { |
88 |
- av_free(m_pFrame); |
89 |
+ av_frame_free(&m_pFrame); |
90 |
m_pFrame = NULL; |
91 |
} |
92 |
|
93 |
@@ -239,7 +247,7 @@ |
94 |
return false; |
95 |
} |
96 |
|
97 |
- avcodec_get_frame_defaults(m_pFrame); |
98 |
+ av_frame_unref(m_pFrame); |
99 |
|
100 |
int frameFinished = 0; |
101 |
|
102 |
@@ -264,7 +272,7 @@ |
103 |
int attempts = 0; |
104 |
|
105 |
if (m_pPacket) { |
106 |
- av_free_packet(m_pPacket); |
107 |
+ av_packet_unref(m_pPacket); |
108 |
delete m_pPacket; |
109 |
} |
110 |
|
111 |
@@ -275,7 +283,7 @@ |
112 |
if (framesAvailable) { |
113 |
frameDecoded = m_pPacket->stream_index == m_VideoStream; |
114 |
if (!frameDecoded) { |
115 |
- av_free_packet(m_pPacket); |
116 |
+ av_packet_unref(m_pPacket); |
117 |
} |
118 |
} |
119 |
} |
120 |
@@ -283,15 +291,100 @@ |
121 |
return frameDecoded; |
122 |
} |
123 |
|
124 |
+void MovieDecoder::deleteFilterGraph() |
125 |
+{ |
126 |
+ if (m_filterGraph) { |
127 |
+ av_frame_free(&m_filterFrame); |
128 |
+ avfilter_graph_free(&m_filterGraph); |
129 |
+ m_filterGraph = NULL; |
130 |
+ } |
131 |
+} |
132 |
+ |
133 |
+bool MovieDecoder::initFilterGraph(enum AVPixelFormat pixfmt, int width, int height) |
134 |
+{ |
135 |
+ AVFilterInOut *inputs = NULL, *outputs = NULL; |
136 |
+ |
137 |
+ deleteFilterGraph(); |
138 |
+ m_filterGraph = avfilter_graph_alloc(); |
139 |
+ |
140 |
+ QByteArray arguments("buffer="); |
141 |
+ arguments += "video_size=" + QByteArray::number(width) + "x" + QByteArray::number(height) + ":"; |
142 |
+ arguments += "pix_fmt=" + QByteArray::number(pixfmt) + ":"; |
143 |
+ arguments += "time_base=1/1:pixel_aspect=0/1[in];"; |
144 |
+ arguments += "[in]yadif[out];"; |
145 |
+ arguments += "[out]buffersink"; |
146 |
+ |
147 |
+ int ret = avfilter_graph_parse2(m_filterGraph, arguments.constData(), &inputs, &outputs); |
148 |
+ if (ret < 0) { |
149 |
+ qWarning() << "Unable to parse filter graph"; |
150 |
+ return false; |
151 |
+ } |
152 |
+ |
153 |
+ if(inputs || outputs) |
154 |
+ return -1; |
155 |
+ |
156 |
+ ret = avfilter_graph_config(m_filterGraph, NULL); |
157 |
+ if (ret < 0) { |
158 |
+ qWarning() << "Unable to validate filter graph"; |
159 |
+ return false; |
160 |
+ } |
161 |
+ |
162 |
+ m_bufferSourceContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffer_0"); |
163 |
+ m_bufferSinkContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffersink_2"); |
164 |
+ if (!m_bufferSourceContext || !m_bufferSinkContext) { |
165 |
+ qWarning() << "Unable to get source or sink"; |
166 |
+ return false; |
167 |
+ } |
168 |
+ m_filterFrame = av_frame_alloc(); |
169 |
+ m_lastWidth = width; |
170 |
+ m_lastHeight = height; |
171 |
+ m_lastPixfmt = pixfmt; |
172 |
+ |
173 |
+ return true; |
174 |
+} |
175 |
+ |
176 |
+bool MovieDecoder::processFilterGraph(AVPicture *dst, const AVPicture *src, |
177 |
+ enum AVPixelFormat pixfmt, int width, int height) |
178 |
+{ |
179 |
+ if (!m_filterGraph || width != m_lastWidth || |
180 |
+ height != m_lastHeight || pixfmt != m_lastPixfmt) { |
181 |
+ |
182 |
+ if (!initFilterGraph(pixfmt, width, height)) { |
183 |
+ return false; |
184 |
+ } |
185 |
+ } |
186 |
+ |
187 |
+ memcpy(m_filterFrame->data, src->data, sizeof(src->data)); |
188 |
+ memcpy(m_filterFrame->linesize, src->linesize, sizeof(src->linesize)); |
189 |
+ m_filterFrame->width = width; |
190 |
+ m_filterFrame->height = height; |
191 |
+ m_filterFrame->format = pixfmt; |
192 |
+ |
193 |
+ int ret = av_buffersrc_add_frame(m_bufferSourceContext, m_filterFrame); |
194 |
+ if (ret < 0) { |
195 |
+ return false; |
196 |
+ } |
197 |
+ |
198 |
+ ret = av_buffersink_get_frame(m_bufferSinkContext, m_filterFrame); |
199 |
+ if (ret < 0) { |
200 |
+ return false; |
201 |
+ } |
202 |
+ |
203 |
+ av_picture_copy(dst, (const AVPicture *) m_filterFrame, pixfmt, width, height); |
204 |
+ av_frame_unref(m_filterFrame); |
205 |
+ |
206 |
+ return true; |
207 |
+} |
208 |
+ |
209 |
void MovieDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, VideoFrame& videoFrame) |
210 |
{ |
211 |
if (m_pFrame->interlaced_frame) { |
212 |
- avpicture_deinterlace((AVPicture*) m_pFrame, (AVPicture*) m_pFrame, m_pVideoCodecContext->pix_fmt, |
213 |
+ processFilterGraph((AVPicture*) m_pFrame, (AVPicture*) m_pFrame, m_pVideoCodecContext->pix_fmt, |
214 |
m_pVideoCodecContext->width, m_pVideoCodecContext->height); |
215 |
} |
216 |
|
217 |
int scaledWidth, scaledHeight; |
218 |
- convertAndScaleFrame(PIX_FMT_RGB24, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); |
219 |
+ convertAndScaleFrame(AV_PIX_FMT_RGB24, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); |
220 |
|
221 |
videoFrame.width = scaledWidth; |
222 |
videoFrame.height = scaledHeight; |
223 |
@@ -302,7 +395,7 @@ |
224 |
memcpy((&(videoFrame.frameData.front())), m_pFrame->data[0], videoFrame.lineSize * videoFrame.height); |
225 |
} |
226 |
|
227 |
-void MovieDecoder::convertAndScaleFrame(PixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight) |
228 |
+void MovieDecoder::convertAndScaleFrame(AVPixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight) |
229 |
{ |
230 |
calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); |
231 |
SwsContext* scaleContext = sws_getContext(m_pVideoCodecContext->width, m_pVideoCodecContext->height, |
232 |
@@ -323,7 +416,7 @@ |
233 |
convertedFrame->data, convertedFrame->linesize); |
234 |
sws_freeContext(scaleContext); |
235 |
|
236 |
- av_free(m_pFrame); |
237 |
+ av_frame_free(&m_pFrame); |
238 |
av_free(m_pFrameBuffer); |
239 |
|
240 |
m_pFrame = convertedFrame; |
241 |
@@ -355,9 +448,9 @@ |
242 |
} |
243 |
} |
244 |
|
245 |
-void MovieDecoder::createAVFrame(AVFrame** avFrame, quint8** frameBuffer, int width, int height, PixelFormat format) |
246 |
-{ |
247 |
- *avFrame = avcodec_alloc_frame(); |
248 |
+void MovieDecoder::createAVFrame(AVFrame** avFrame, quint8** frameBuffer, int width, int height, AVPixelFormat format) |
249 |
+{ |
250 |
+ *avFrame = av_frame_alloc(); |
251 |
|
252 |
int numBytes = avpicture_get_size(format, width, height); |
253 |
*frameBuffer = reinterpret_cast<quint8*>(av_malloc(numBytes)); |
254 |
|
255 |
--- ffmpegthumbnailer/moviedecoder.h |
256 |
+++ ffmpegthumbnailer/moviedecoder.h |
257 |
@@ -23,6 +23,9 @@ |
258 |
extern "C" { |
259 |
#include <libavcodec/avcodec.h> |
260 |
#include <libavformat/avformat.h> |
261 |
+#include <libavfilter/avfilter.h> |
262 |
+#include <libavfilter/buffersrc.h> |
263 |
+#include <libavfilter/buffersink.h> |
264 |
} |
265 |
|
266 |
namespace ffmpegthumbnailer |
267 |
@@ -52,9 +55,13 @@ |
268 |
|
269 |
bool decodeVideoPacket(); |
270 |
bool getVideoPacket(); |
271 |
- void convertAndScaleFrame(PixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight); |
272 |
- void createAVFrame(AVFrame** avFrame, quint8** frameBuffer, int width, int height, PixelFormat format); |
273 |
+ void convertAndScaleFrame(AVPixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight); |
274 |
+ void createAVFrame(AVFrame** avFrame, quint8** frameBuffer, int width, int height, AVPixelFormat format); |
275 |
void calculateDimensions(int squareSize, bool maintainAspectRatio, int& destWidth, int& destHeight); |
276 |
+ |
277 |
+ void deleteFilterGraph(); |
278 |
+ bool initFilterGraph(enum AVPixelFormat pixfmt, int width, int height); |
279 |
+ bool processFilterGraph(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pixfmt, int width, int height); |
280 |
|
281 |
private: |
282 |
int m_VideoStream; |
283 |
@@ -68,6 +75,13 @@ |
284 |
bool m_FormatContextWasGiven; |
285 |
bool m_AllowSeek; |
286 |
bool m_initialized; |
287 |
+ AVFilterContext* m_bufferSinkContext; |
288 |
+ AVFilterContext* m_bufferSourceContext; |
289 |
+ AVFilterGraph* m_filterGraph; |
290 |
+ AVFrame* m_filterFrame; |
291 |
+ int m_lastWidth; |
292 |
+ int m_lastHeight; |
293 |
+ enum AVPixelFormat m_lastPixfmt; |
294 |
}; |
295 |
|
296 |
} |
297 |
|
298 |
--- tests/CMakeLists.txt |
299 |
+++ tests/CMakeLists.txt |
300 |
@@ -19,7 +19,7 @@ |
301 |
|
302 |
kde4_add_executable(ffmpegthumbtest ${ffmpegthumbtest_SRCS} ) |
303 |
|
304 |
-target_link_libraries(ffmpegthumbtest ${KDE4_KDECORE_LIBS} ${KDE4_KIO_LIBS} ${AVUTIL_LIBRARIES} ${AVFORMAT_LIBRARIES} ${AVCODEC_LIBRARIES} ${SWSCALE_LIBRARIES}) |
305 |
+target_link_libraries(ffmpegthumbtest ${KDE4_KDECORE_LIBS} ${KDE4_KIO_LIBS} ${AVUTIL_LIBRARIES} ${AVFILTER_LIBRARIES} ${AVFORMAT_LIBRARIES} ${AVCODEC_LIBRARIES} ${SWSCALE_LIBRARIES}) |
306 |
|
307 |
|
308 |
|