KPipewire

encoder.cpp
1/*
2 SPDX-FileCopyrightText: 2023 Aleix Pol Gonzalez <aleixpol@kde.org>
3 SPDX-FileCopyrightText: 2023 Marco Martin <mart@kde.org>
4 SPDX-FileCopyrightText: 2023 Arjen Hiemstra <ahiemstra@heimr.nl>
5
6 SPDX-License-Identifier: LGPL-2.1-only OR LGPL-3.0-only OR LicenseRef-KDE-Accepted-LGPL
7*/
8
9#include "encoder_p.h"
10
11#include <mutex>
12
13extern "C" {
14#include <libavcodec/avcodec.h>
15#include <libavfilter/avfilter.h>
16#include <libavfilter/buffersink.h>
17#include <libavfilter/buffersrc.h>
18#include <libavutil/avutil.h>
19#include <libavutil/hwcontext.h>
20#include <libavutil/hwcontext_drm.h>
21#include <libavutil/imgutils.h>
22}
23
24#include <libdrm/drm_fourcc.h>
25
26#include "vaapiutils_p.h"
27
28#include "logging_record.h"
29
30#undef av_err2str
31// The one provided by libav fails to compile on GCC due to passing data from the function scope outside
32char str[AV_ERROR_MAX_STRING_SIZE];
33char *av_err2str(int errnum)
34{
35 return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
36}
37
38static AVPixelFormat convertQImageFormatToAVPixelFormat(QImage::Format format)
39{
40 // Listing those handed by SpaToQImageFormat
41 switch (format) {
43 return AV_PIX_FMT_RGB24;
45 return AV_PIX_FMT_BGR24;
48 return AV_PIX_FMT_RGBA;
51 return AV_PIX_FMT_RGB32;
52 default:
53 qDebug() << "Unexpected pixel format" << format;
54 return AV_PIX_FMT_RGB32;
55 }
56}
57
58static int percentageToFrameQuality(quint8 quality)
59{
60 return std::max(1, int(FF_LAMBDA_MAX - (quality / 100.0) * FF_LAMBDA_MAX));
61}
62
63Encoder::Encoder(PipeWireProduce *produce)
64 : QObject(nullptr)
65 , m_produce(produce)
66{
67}
68
69Encoder::~Encoder()
70{
71 if (m_avFilterGraph) {
72 avfilter_graph_free(&m_avFilterGraph);
73 }
74
75 if (m_avCodecContext) {
76 avcodec_close(m_avCodecContext);
77 av_free(m_avCodecContext);
78 }
79}
80
81std::pair<int, int> Encoder::encodeFrame(int maximumFrames)
82{
83 auto frame = av_frame_alloc();
84 if (!frame) {
85 qFatal("Failed to allocate memory");
86 }
87
88 int filtered = 0;
89 int queued = 0;
90
91 for (;;) {
92 if (auto result = av_buffersink_get_frame(m_outputFilter, frame); result < 0) {
93 if (result != AVERROR_EOF && result != AVERROR(EAGAIN)) {
94 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed receiving filtered frame:" << av_err2str(result);
95 }
96 break;
97 }
98
99 filtered++;
100
101 if (queued + 1 < maximumFrames) {
102 auto ret = -1;
103 {
104 std::lock_guard guard(m_avCodecMutex);
105 ret = avcodec_send_frame(m_avCodecContext, frame);
106 }
107 if (ret < 0) {
108 if (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)) {
109 qCWarning(PIPEWIRERECORD_LOGGING) << "Error sending a frame for encoding:" << av_err2str(ret);
110 }
111 break;
112 }
113 queued++;
114 } else {
115 qCWarning(PIPEWIRERECORD_LOGGING) << "Encode queue is full, discarding filtered frame" << frame->pts;
116 }
117 av_frame_unref(frame);
118 }
119
120 av_frame_free(&frame);
121
122 return std::make_pair(filtered, queued);
123}
124
125int Encoder::receivePacket()
126{
127 auto packet = av_packet_alloc();
128 if (!packet) {
129 qFatal("Failed to allocate memory");
130 }
131
132 int received = 0;
133
134 for (;;) {
135 auto ret = -1;
136 {
137 std::lock_guard guard(m_avCodecMutex);
138 ret = avcodec_receive_packet(m_avCodecContext, packet);
139 }
140 if (ret < 0) {
141 if (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)) {
142 qCWarning(PIPEWIRERECORD_LOGGING) << "Error encoding a frame: " << av_err2str(ret);
143 }
144 av_packet_unref(packet);
145 break;
146 }
147
148 received++;
149
150 m_produce->processPacket(packet);
151 av_packet_unref(packet);
152 }
153
154 av_packet_free(&packet);
155
156 return received;
157}
158
159void Encoder::finish()
160{
161 std::lock_guard guard(m_avCodecMutex);
162 avcodec_send_frame(m_avCodecContext, nullptr);
163}
164
165AVCodecContext *Encoder::avCodecContext() const
166{
167 return m_avCodecContext;
168}
169
170void Encoder::setQuality(std::optional<quint8> quality)
171{
172 m_quality = quality;
173 if (m_avCodecContext) {
174 m_avCodecContext->global_quality = percentageToAbsoluteQuality(quality);
175 }
176}
177
178bool Encoder::supportsHardwareEncoding()
179{
180 return !VaapiUtils::instance()->devicePath().isEmpty();
181}
182
183void Encoder::setEncodingPreference(PipeWireBaseEncodedStream::EncodingPreference preference)
184{
185 m_encodingPreference = preference;
186}
187
188void Encoder::applyEncodingPreference(AVDictionary *options)
189{
190 switch (m_encodingPreference) {
191 case PipeWireBaseEncodedStream::EncodingPreference::NoPreference:
192 av_dict_set(&options, "preset", "veryfast", 0);
193 break;
194 case PipeWireBaseEncodedStream::EncodingPreference::Quality:
195 av_dict_set(&options, "preset", "medium", 0);
196 break;
197 case PipeWireBaseEncodedStream::EncodingPreference::Speed:
198 av_dict_set(&options, "preset", "ultrafast", 0);
199 av_dict_set(&options, "tune", "zerolatency", 0);
200 break;
201 case PipeWireBaseEncodedStream::EncodingPreference::Size:
202 av_dict_set(&options, "preset", "slow", 0);
203 break;
204 default: // Same as NoPreference
205 av_dict_set(&options, "preset", "veryfast", 0);
206 break;
207 }
208}
209
210SoftwareEncoder::SoftwareEncoder(PipeWireProduce *produce)
211 : Encoder(produce)
212{
213}
214
215bool SoftwareEncoder::filterFrame(const PipeWireFrame &frame)
216{
217 auto size = m_produce->m_stream->size();
218
219 QImage image;
220 if (frame.dmabuf) {
221 image = QImage(m_produce->m_stream->size(), QImage::Format_RGBA8888_Premultiplied);
222 if (!m_dmaBufHandler.downloadFrame(image, frame)) {
223 m_produce->m_stream->renegotiateModifierFailed(frame.format, frame.dmabuf->modifier);
224 return false;
225 }
226 } else if (frame.dataFrame) {
227 image = frame.dataFrame->toImage();
228 } else {
229 return false;
230 }
231
232 AVFrame *avFrame = av_frame_alloc();
233 if (!avFrame) {
234 qFatal("Failed to allocate memory");
235 }
236 avFrame->format = convertQImageFormatToAVPixelFormat(image.format());
237 avFrame->width = size.width();
238 avFrame->height = size.height();
239 if (m_quality) {
240 avFrame->quality = percentageToFrameQuality(m_quality.value());
241 }
242
243 av_frame_get_buffer(avFrame, 32);
244
245 const std::uint8_t *buffers[] = {image.constBits(), nullptr};
246 const int strides[] = {static_cast<int>(image.bytesPerLine()), 0, 0, 0};
247
248 av_image_copy(avFrame->data, avFrame->linesize, buffers, strides, static_cast<AVPixelFormat>(avFrame->format), size.width(), size.height());
249
250 if (frame.presentationTimestamp) {
251 avFrame->pts = m_produce->framePts(frame.presentationTimestamp);
252 }
253
254 if (auto result = av_buffersrc_add_frame(m_inputFilter, avFrame); result < 0) {
255 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to submit frame for filtering";
256 }
257
258 return true;
259}
260
261bool SoftwareEncoder::createFilterGraph(const QSize &size)
262{
263 m_avFilterGraph = avfilter_graph_alloc();
264 if (!m_avFilterGraph) {
265 qFatal("Failed to allocate memory");
266 }
267
268 int ret = avfilter_graph_create_filter(&m_inputFilter,
269 avfilter_get_by_name("buffer"),
270 "in",
271 "width=1:height=1:pix_fmt=rgba:time_base=1/1",
272 nullptr,
273 m_avFilterGraph);
274 if (ret < 0) {
275 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create the buffer filter";
276 return false;
277 }
278
279 auto parameters = av_buffersrc_parameters_alloc();
280 if (!parameters) {
281 qFatal("Failed to allocate memory");
282 }
283
284 parameters->format = AV_PIX_FMT_RGBA;
285 parameters->width = size.width();
286 parameters->height = size.height();
287 parameters->time_base = {1, 1000};
288
289 av_buffersrc_parameters_set(m_inputFilter, parameters);
290 av_free(parameters);
291 parameters = nullptr;
292
293 ret = avfilter_graph_create_filter(&m_outputFilter, avfilter_get_by_name("buffersink"), "out", nullptr, nullptr, m_avFilterGraph);
294 if (ret < 0) {
295 qCWarning(PIPEWIRERECORD_LOGGING) << "Could not create buffer output filter";
296 return false;
297 }
298
299 auto inputs = avfilter_inout_alloc();
300 if (!inputs) {
301 qFatal("Failed to allocate memory");
302 }
303 inputs->name = av_strdup("in");
304 inputs->filter_ctx = m_inputFilter;
305 inputs->pad_idx = 0;
306 inputs->next = nullptr;
307
308 auto outputs = avfilter_inout_alloc();
309 if (!outputs) {
310 qFatal("Failed to allocate memory");
311 }
312 outputs->name = av_strdup("out");
313 outputs->filter_ctx = m_outputFilter;
314 outputs->pad_idx = 0;
315 outputs->next = nullptr;
316
317 ret = avfilter_graph_parse(m_avFilterGraph, m_filterGraphToParse.toUtf8().data(), outputs, inputs, NULL);
318 if (ret < 0) {
319 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed creating filter graph";
320 return false;
321 }
322
323 ret = avfilter_graph_config(m_avFilterGraph, nullptr);
324 if (ret < 0) {
325 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed configuring filter graph";
326 return false;
327 }
328
329 return true;
330}
331
332HardwareEncoder::HardwareEncoder(PipeWireProduce *produce)
333 : Encoder(produce)
334{
335}
336
337HardwareEncoder::~HardwareEncoder()
338{
339 if (m_drmFramesContext) {
340 av_free(m_drmFramesContext);
341 }
342
343 if (m_drmContext) {
344 av_free(m_drmContext);
345 }
346}
347
348bool HardwareEncoder::filterFrame(const PipeWireFrame &frame)
349{
350 if (!frame.dmabuf) {
351 return false;
352 }
353
354 auto attribs = frame.dmabuf.value();
355
356 auto drmFrame = av_frame_alloc();
357 if (!drmFrame) {
358 qFatal("Failed to allocate memory");
359 }
360 drmFrame->format = AV_PIX_FMT_DRM_PRIME;
361 drmFrame->width = attribs.width;
362 drmFrame->height = attribs.height;
363 if (m_quality) {
364 drmFrame->quality = percentageToFrameQuality(m_quality.value());
365 }
366
367 auto frameDesc = new AVDRMFrameDescriptor;
368 frameDesc->nb_layers = 1;
369 frameDesc->layers[0].nb_planes = attribs.planes.count();
370 frameDesc->layers[0].format = attribs.format;
371 for (int i = 0; i < attribs.planes.count(); ++i) {
372 const auto &plane = attribs.planes[i];
373 frameDesc->layers[0].planes[i].object_index = 0;
374 frameDesc->layers[0].planes[i].offset = plane.offset;
375 frameDesc->layers[0].planes[i].pitch = plane.stride;
376 }
377 frameDesc->nb_objects = 1;
378 frameDesc->objects[0].fd = attribs.planes[0].fd;
379 frameDesc->objects[0].format_modifier = attribs.modifier;
380 frameDesc->objects[0].size = attribs.width * attribs.height * 4;
381
382 drmFrame->data[0] = reinterpret_cast<uint8_t *>(frameDesc);
383 drmFrame->buf[0] = av_buffer_create(reinterpret_cast<uint8_t *>(frameDesc), sizeof(*frameDesc), av_buffer_default_free, nullptr, 0);
384 if (frame.presentationTimestamp) {
385 drmFrame->pts = m_produce->framePts(frame.presentationTimestamp);
386 }
387
388 if (auto result = av_buffersrc_add_frame(m_inputFilter, drmFrame); result < 0) {
389 qCDebug(PIPEWIRERECORD_LOGGING) << "Failed sending frame for encoding" << av_err2str(result);
390 av_frame_unref(drmFrame);
391 return false;
392 }
393
394 av_frame_free(&drmFrame);
395 return true;
396}
397
398QByteArray HardwareEncoder::checkVaapi(const QSize &size)
399{
400 auto utils = VaapiUtils::instance();
401 if (utils->devicePath().isEmpty()) {
402 qCWarning(PIPEWIRERECORD_LOGGING) << "Hardware encoding is not supported on this device.";
403 return QByteArray{};
404 }
405
406 auto minSize = utils->minimumSize();
407 if (size.width() < minSize.width() || size.height() < minSize.height()) {
408 qCWarning(PIPEWIRERECORD_LOGGING) << "Requested size" << size << "less than minimum supported hardware size" << minSize;
409 return QByteArray{};
410 }
411
412 auto maxSize = utils->maximumSize();
413 if (size.width() > maxSize.width() || size.height() > maxSize.height()) {
414 qCWarning(PIPEWIRERECORD_LOGGING) << "Requested size" << size << "exceeds maximum supported hardware size" << maxSize;
415 return QByteArray{};
416 }
417
418 return utils->devicePath();
419}
420
421bool HardwareEncoder::createDrmContext(const QSize &size)
422{
423 auto path = checkVaapi(size);
424 if (path.isEmpty()) {
425 return false;
426 }
427
428 int err = av_hwdevice_ctx_create(&m_drmContext, AV_HWDEVICE_TYPE_DRM, path.data(), NULL, AV_HWFRAME_MAP_READ);
429 if (err < 0) {
430 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create DRM device. Error" << av_err2str(err);
431 return false;
432 }
433
434 m_drmFramesContext = av_hwframe_ctx_alloc(m_drmContext);
435 if (!m_drmFramesContext) {
436 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create DRM frames context";
437 return false;
438 }
439
440 auto framesContext = reinterpret_cast<AVHWFramesContext *>(m_drmFramesContext->data);
441 framesContext->format = AV_PIX_FMT_DRM_PRIME;
442 framesContext->sw_format = AV_PIX_FMT_0BGR;
443 framesContext->width = size.width();
444 framesContext->height = size.height();
445
446 if (auto result = av_hwframe_ctx_init(m_drmFramesContext); result < 0) {
447 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed initializing DRM frames context" << av_err2str(result);
448 av_buffer_unref(&m_drmFramesContext);
449 return false;
450 }
451
452 return true;
453}
454
455#include "moc_encoder_p.cpp"
QString path(const QString &relativePath)
qsizetype bytesPerLine() const const
const uchar * constBits() const const
Format format() const const
int height() const const
int width() const const
QChar * data()
bool isEmpty() const const
QFuture< typename qValueType< Iterator >::value_type > filtered(Iterator begin, Iterator end, KeepFunctor &&filterFunction)
This file is part of the KDE documentation.
Documentation copyright © 1996-2024 The KDE developers.
Generated on Mon Nov 18 2024 12:15:17 by doxygen 1.12.0 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.