diff -Nru forked-daapd-25.0/debian/changelog forked-daapd-25.0/debian/changelog --- forked-daapd-25.0/debian/changelog 2017-10-22 22:46:13.000000000 +0000 +++ forked-daapd-25.0/debian/changelog 2018-07-26 13:42:48.000000000 +0000 @@ -1,3 +1,10 @@ +forked-daapd (25.0-2+rpi1) buster-staging; urgency=medium + + * Backport upstream patches for ffmpeg 4.0 + * Fix clean target. + + -- Peter Michael Green Thu, 26 Jul 2018 13:42:48 +0000 + forked-daapd (25.0-2) unstable; urgency=medium * Remove upstart support diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-10.patch forked-daapd-25.0/debian/patches/ffmpeg4-10.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-10.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-10.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,21 @@ +commit 548d9ada2e54321f1e3841412fcb3e7fc8ceebe0 +Author: ejurgensen +Date: Sat Mar 4 18:30:19 2017 +0100 + + [transcode] Fixup dead assignment + +diff --git a/src/transcode.c b/src/transcode.c +index 4df09e5a..e1369ee4 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -1467,9 +1467,7 @@ transcode(struct evbuffer *evbuf, int *icy_timer, struct transcode_ctx *ctx, int + if (icy_timer && ctx->encode_ctx->icy_interval) + *icy_timer = (ctx->encode_ctx->total_bytes % ctx->encode_ctx->icy_interval < processed); + +- if (ret == AVERROR_EOF) +- ret = 0; +- else if (ret < 0) ++ if ((ret < 0) && (ret != AVERROR_EOF)) + return ret; + + return processed; diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-11.patch forked-daapd-25.0/debian/patches/ffmpeg4-11.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-11.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-11.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,122 @@ +Modified by Peter Michael Green to fix hunks that did not apply and to remove buildopts change +(buildopts seems to have been largely redesigned and doesn't seem terribly important) + +commit eec98e3b7e889e5976e610451f199a716bb22aaa +Author: ejurgensen +Date: Sat Sep 16 23:01:42 2017 +0200 + + [main/transcode] Fix some libav compability + log what ffmpeg/libav we are using + +Index: forked-daapd-25.0/configure.ac +=================================================================== +--- forked-daapd-25.0.orig/configure.ac ++++ forked-daapd-25.0/configure.ac +@@ -243,15 +243,24 @@ FORK_MODULES_CHECK([FORKED], [LIBAV], + [libavfilter/buffersink.h]) + FORK_CHECK_DECLS([avfilter_graph_parse_ptr], + [libavfilter/avfilter.h]) +- FORK_CHECK_DECLS([av_packet_unref], [libavcodec/avcodec.h]) +- FORK_CHECK_DECLS([av_packet_rescale_ts], [libavcodec/avcodec.h]) ++ FORK_CHECK_DECLS([av_packet_unref], ++ [libavcodec/avcodec.h]) ++ FORK_CHECK_DECLS([av_packet_rescale_ts], ++ [libavcodec/avcodec.h]) + FORK_CHECK_DECLS([avformat_alloc_output_context2], + [libavformat/avformat.h]) +- FORK_CHECK_DECLS([av_frame_alloc], [libavutil/frame.h]) ++ FORK_CHECK_DECLS([avformat_network_init], ++ [libavformat/avformat.h]) ++ FORK_CHECK_DECLS([av_version_info], ++ [libavutil/avutil.h]) ++ FORK_CHECK_DECLS([av_frame_alloc], ++ [libavutil/frame.h]) + FORK_CHECK_DECLS([av_frame_get_best_effort_timestamp], + [libavutil/frame.h]) +- FORK_CHECK_DECLS([av_image_fill_arrays], [libavutil/imgutils.h]) +- FORK_CHECK_DECLS([av_image_get_buffer_size], [libavutil/imgutils.h]) ++ FORK_CHECK_DECLS([av_image_fill_arrays], ++ [libavutil/imgutils.h]) ++ FORK_CHECK_DECLS([av_image_get_buffer_size], ++ [libavutil/imgutils.h]) + AC_CHECK_HEADERS([libavutil/channel_layout.h libavutil/mathematics.h]) + ]) + +Index: forked-daapd-25.0/src/main.c +=================================================================== +--- forked-daapd-25.0.orig/src/main.c ++++ forked-daapd-25.0/src/main.c +@@ -49,6 +49,7 @@ + #ifdef HAVE_LIBEVENT_PTHREADS + # include + #endif ++#include + #include + #include + #include +@@ -466,6 +467,7 @@ main(int argc, char **argv) + char *ffid; + char *pidfile; + char buildopts[256]; ++ const char *av_version; + const char *gcry_version; + sigset_t sigs; + int sigfd; +@@ -620,6 +622,18 @@ main(int argc, char **argv) + + DPRINTF(E_LOG, L_MAIN, "Built %s with:%s\n", __DATE__, buildopts); + ++#if HAVE_DECL_AV_VERSION_INFO ++ av_version = av_version_info(); ++#else ++ av_version = "(unknown version)"; ++#endif ++ ++#ifdef HAVE_FFMPEG ++ DPRINTF(E_INFO, L_MAIN, "Initialized with ffmpeg %s\n", av_version); ++#else ++ DPRINTF(E_INFO, L_MAIN, "Initialized with libav %s\n", av_version); ++#endif ++ + ret = av_lockmgr_register(ffmpeg_lockmgr); + if (ret < 0) + { +@@ -631,7 +645,7 @@ main(int argc, char **argv) + + av_register_all(); + avfilter_register_all(); +-#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 13) ++#if HAVE_DECL_AVFORMAT_NETWORK_INIT + avformat_network_init(); + #endif + av_log_set_callback(logger_ffmpeg); +@@ -937,7 +951,7 @@ main(int argc, char **argv) + #ifdef HAVE_LIBCURL + curl_global_cleanup(); + #endif +-#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 13) ++#if HAVE_DECL_AVFORMAT_NETWORK_INIT + avformat_network_deinit(); + #endif + av_lockmgr_register(NULL); +Index: forked-daapd-25.0/src/transcode.c +=================================================================== +--- forked-daapd-25.0.orig/src/transcode.c ++++ forked-daapd-25.0/src/transcode.c +@@ -27,7 +27,7 @@ + + #include + #include +-#include ++#include + #include + #include + #include +@@ -370,7 +370,7 @@ stream_add(struct encode_ctx *ctx, struc + } + + if (ctx->ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) +- s->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; ++ s->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + + ret = avcodec_open2(s->codec, NULL, NULL); + if (ret < 0) diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-1.patch forked-daapd-25.0/debian/patches/ffmpeg4-1.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-1.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-1.patch 2018-07-26 11:37:29.000000000 +0000 @@ -0,0 +1,1882 @@ +commit 25c1795af27afb41708a2be94e473db49ba3f6b5 +Author: ejurgensen +Date: Sun Feb 26 15:32:37 2017 +0100 + + [transcode] Update to new ffmpeg api - part 1 + - no more use of AVStream.codec + - ditch some backwards compability + - move closer to being able do video, at least for artwork + +diff --git a/src/httpd.c b/src/httpd.c +index af45ecc4..5b01681a 100644 +--- a/src/httpd.c ++++ b/src/httpd.c +@@ -552,7 +552,7 @@ httpd_stream_file(struct evhttp_request *req, int id) + + stream_cb = stream_chunk_xcode_cb; + +- st->xcode = transcode_setup(mfi->data_kind, mfi->path, mfi->song_length, XCODE_PCM16_HEADER, &st->size); ++ st->xcode = transcode_setup(XCODE_PCM16_HEADER, mfi->data_kind, mfi->path, mfi->song_length, &st->size); + if (!st->xcode) + { + DPRINTF(E_WARN, L_HTTPD, "Transcoding setup failed, aborting streaming\n"); +diff --git a/src/httpd_streaming.c b/src/httpd_streaming.c +index b9340fca..115f4021 100644 +--- a/src/httpd_streaming.c ++++ b/src/httpd_streaming.c +@@ -299,7 +299,7 @@ streaming_init(void) + return -1; + } + +- streaming_encode_ctx = transcode_encode_setup(decode_ctx, XCODE_MP3, NULL); ++ streaming_encode_ctx = transcode_encode_setup(XCODE_MP3, decode_ctx, NULL); + transcode_decode_cleanup(decode_ctx); + if (!streaming_encode_ctx) + { +diff --git a/src/inputs/file_http.c b/src/inputs/file_http.c +index 220c7a94..6335a5b3 100644 +--- a/src/inputs/file_http.c ++++ b/src/inputs/file_http.c +@@ -31,7 +31,7 @@ + static int + setup(struct player_source *ps) + { +- ps->input_ctx = transcode_setup(ps->data_kind, ps->path, ps->len_ms, XCODE_PCM16_NOHEADER, NULL); ++ ps->input_ctx = transcode_setup(XCODE_PCM16_NOHEADER, ps->data_kind, ps->path, ps->len_ms, NULL); + if (!ps->input_ctx) + return -1; + +diff --git a/src/transcode.c b/src/transcode.c +index 3d1de189..29d4b9a4 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (C) 2015 Espen Jurgensen ++ * Copyright (C) 2015-17 Espen Jurgensen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -21,6 +21,7 @@ + #endif + + #include ++#include + #include + #include + +@@ -57,20 +58,54 @@ static const char *itunes_codecs = "mpeg,mp4a,mp4v,alac,wav"; + // Used for passing errors to DPRINTF (can't count on av_err2str being present) + static char errbuf[64]; + +-struct filter_ctx { ++// The settings struct will be filled out based on the profile enum ++struct settings_ctx ++{ ++ bool encode_video; ++ bool encode_audio; ++ ++ // Output format (for the muxer) ++ const char *format; ++ ++ // Audio settings ++ enum AVCodecID audio_codec; ++ const char *audio_codec_name; ++ int sample_rate; ++ uint64_t channel_layout; ++ int channels; ++ enum AVSampleFormat sample_format; ++ int byte_depth; ++ bool wavheader; ++ ++ // Video settings ++ enum AVCodecID video_codec; ++ const char *video_codec_name; ++ enum AVPixelFormat pix_fmt; ++ int height; ++ int width; ++}; ++ ++struct stream_ctx ++{ ++ AVStream *stream; ++ AVCodecContext *codec; ++ + AVFilterContext *buffersink_ctx; + AVFilterContext *buffersrc_ctx; + AVFilterGraph *filter_graph; + }; + +-struct decode_ctx { ++struct decode_ctx ++{ ++ // Settings derived from the profile ++ struct settings_ctx settings; ++ + // Input format context + AVFormatContext *ifmt_ctx; + +- // Will point to the max 3 streams that we will transcode +- AVStream *audio_stream; +- AVStream *video_stream; +- AVStream *subtitle_stream; ++ // Stream and decoder data ++ struct stream_ctx audio_stream; ++ struct stream_ctx video_stream; + + // Duration (used to make wav header) + uint32_t duration; +@@ -89,45 +124,25 @@ struct decode_ctx { + int64_t timestamp; + }; + +-struct encode_ctx { ++struct encode_ctx ++{ ++ // Settings derived from the profile ++ struct settings_ctx settings; ++ + // Output format context + AVFormatContext *ofmt_ctx; + +- // We use filters to resample +- struct filter_ctx *filter_ctx; ++ // Stream, filter and decoder data ++ struct stream_ctx audio_stream; ++ struct stream_ctx video_stream; + + // The ffmpeg muxer writes to this buffer using the avio_evbuffer interface + struct evbuffer *obuf; + +- // Maps input stream number -> output stream number +- // So if we are decoding audio stream 3 and encoding it to 0, then +- // out_stream_map[3] is 0. A value of -1 means the stream is ignored. +- int out_stream_map[MAX_STREAMS]; +- +- // Maps output stream number -> input stream number +- unsigned int in_stream_map[MAX_STREAMS]; +- + // Used for seeking + int64_t prev_pts[MAX_STREAMS]; + int64_t offset_pts[MAX_STREAMS]; + +- // Settings for encoding and muxing +- const char *format; +- int encode_video; +- +- // Audio settings +- enum AVCodecID audio_codec; +- int sample_rate; +- uint64_t channel_layout; +- int channels; +- enum AVSampleFormat sample_format; +- int byte_depth; +- +- // Video settings +- enum AVCodecID video_codec; +- int video_height; +- int video_width; +- + // How many output bytes we have processed in total + off_t total_bytes; + +@@ -136,11 +151,11 @@ struct encode_ctx { + uint32_t icy_hash; + + // WAV header +- int wavhdr; + uint8_t header[44]; + }; + +-struct transcode_ctx { ++struct transcode_ctx ++{ + struct decode_ctx *decode_ctx; + struct encode_ctx *encode_ctx; + }; +@@ -148,48 +163,95 @@ struct transcode_ctx { + struct decoded_frame + { + AVFrame *frame; +- unsigned int stream_index; ++ enum AVMediaType type; + }; + + + /* -------------------------- PROFILE CONFIGURATION ------------------------ */ + + static int +-init_profile(struct encode_ctx *ctx, enum transcode_profile profile) ++init_settings(struct settings_ctx *settings, enum transcode_profile profile) + { ++ const AVCodecDescriptor *codec_desc; ++ ++ memset(settings, 0, sizeof(struct settings_ctx)); ++ + switch (profile) + { +- case XCODE_PCM16_NOHEADER: + case XCODE_PCM16_HEADER: +- ctx->encode_video = 0; +- ctx->format = "s16le"; +- ctx->audio_codec = AV_CODEC_ID_PCM_S16LE; +- ctx->sample_rate = 44100; +- ctx->channel_layout = AV_CH_LAYOUT_STEREO; +- ctx->channels = 2; +- ctx->sample_format = AV_SAMPLE_FMT_S16; +- ctx->byte_depth = 2; // Bytes per sample = 16/8 +- return 0; ++ settings->wavheader = 1; ++ case XCODE_PCM16_NOHEADER: ++ settings->encode_audio = 1; ++ settings->format = "s16le"; ++ settings->audio_codec = AV_CODEC_ID_PCM_S16LE; ++ settings->sample_rate = 44100; ++ settings->channel_layout = AV_CH_LAYOUT_STEREO; ++ settings->channels = 2; ++ settings->sample_format = AV_SAMPLE_FMT_S16; ++ settings->byte_depth = 2; // Bytes per sample = 16/8 ++ break; + + case XCODE_MP3: +- ctx->encode_video = 0; +- ctx->format = "mp3"; +- ctx->audio_codec = AV_CODEC_ID_MP3; +- ctx->sample_rate = 44100; +- ctx->channel_layout = AV_CH_LAYOUT_STEREO; +- ctx->channels = 2; +- ctx->sample_format = AV_SAMPLE_FMT_S16P; +- ctx->byte_depth = 2; // Bytes per sample = 16/8 +- return 0; +- +- case XCODE_H264_AAC: +- ctx->encode_video = 1; +- return 0; ++ settings->encode_audio = 1; ++ settings->format = "mp3"; ++ settings->audio_codec = AV_CODEC_ID_MP3; ++ settings->sample_rate = 44100; ++ settings->channel_layout = AV_CH_LAYOUT_STEREO; ++ settings->channels = 2; ++ settings->sample_format = AV_SAMPLE_FMT_S16P; ++ settings->byte_depth = 2; // Bytes per sample = 16/8 ++ break; ++ ++ case XCODE_JPEG: ++ settings->encode_video = 1; ++ settings->format = "image2"; ++ settings->video_codec = AV_CODEC_ID_MJPEG; ++ break; ++ ++ case XCODE_PNG: ++ settings->encode_video = 1; ++ settings->format = "image2"; ++ settings->video_codec = AV_CODEC_ID_PNG; ++ break; + + default: + DPRINTF(E_LOG, L_XCODE, "Bug! Unknown transcoding profile\n"); + return -1; + } ++ ++ if (settings->audio_codec) ++ { ++ codec_desc = avcodec_descriptor_get(settings->audio_codec); ++ settings->audio_codec_name = codec_desc->name; ++ } ++ ++ if (settings->video_codec) ++ { ++ codec_desc = avcodec_descriptor_get(settings->video_codec); ++ settings->video_codec_name = codec_desc->name; ++ } ++ ++ return 0; ++} ++ ++static void ++stream_settings_set(struct stream_ctx *s, struct settings_ctx *settings, enum AVMediaType type) ++{ ++ if (type == AVMEDIA_TYPE_AUDIO) ++ { ++ s->codec->sample_rate = settings->sample_rate; ++ s->codec->channel_layout = settings->channel_layout; ++ s->codec->channels = settings->channels; ++ s->codec->sample_fmt = settings->sample_format; ++ s->codec->time_base = (AVRational){1, settings->sample_rate}; ++ } ++ else if (type == AVMEDIA_TYPE_AUDIO) ++ { ++ s->codec->height = settings->height; ++ s->codec->width = settings->width; ++ s->codec->pix_fmt = settings->pix_fmt; ++ s->codec->time_base = (AVRational){1, 25}; ++ } + } + + +@@ -229,7 +291,7 @@ make_wav_header(struct encode_ctx *ctx, struct decode_ctx *src_ctx, off_t *est_s + else + duration = 3 * 60 * 1000; /* 3 minutes, in ms */ + +- wav_len = ctx->channels * ctx->byte_depth * ctx->sample_rate * (duration / 1000); ++ wav_len = ctx->settings.channels * ctx->settings.byte_depth * ctx->settings.sample_rate * (duration / 1000); + + *est_size = wav_len + sizeof(ctx->header); + +@@ -238,28 +300,82 @@ make_wav_header(struct encode_ctx *ctx, struct decode_ctx *src_ctx, off_t *est_s + memcpy(ctx->header + 8, "WAVEfmt ", 8); + add_le32(ctx->header + 16, 16); + add_le16(ctx->header + 20, 1); +- add_le16(ctx->header + 22, ctx->channels); /* channels */ +- add_le32(ctx->header + 24, ctx->sample_rate); /* samplerate */ +- add_le32(ctx->header + 28, ctx->sample_rate * ctx->channels * ctx->byte_depth); /* byte rate */ +- add_le16(ctx->header + 32, ctx->channels * ctx->byte_depth); /* block align */ +- add_le16(ctx->header + 34, ctx->byte_depth * 8); /* bits per sample */ ++ add_le16(ctx->header + 22, ctx->settings.channels); /* channels */ ++ add_le32(ctx->header + 24, ctx->settings.sample_rate); /* samplerate */ ++ add_le32(ctx->header + 28, ctx->settings.sample_rate * ctx->settings.channels * ctx->settings.byte_depth); /* byte rate */ ++ add_le16(ctx->header + 32, ctx->settings.channels * ctx->settings.byte_depth); /* block align */ ++ add_le16(ctx->header + 34, ctx->settings.byte_depth * 8); /* bits per sample */ + memcpy(ctx->header + 36, "data", 4); + add_le32(ctx->header + 40, wav_len); + } + + /* +- * Returns true if in_stream is a stream we should decode, otherwise false ++ * Checks if this stream index is one that we are decoding + * + * @in ctx Decode context +- * @in in_stream Pointer to AVStream +- * @return True if stream should be decoded, otherwise false ++ * @in stream_index Index of stream to check ++ * @return Type of stream, unknown if we are not decoding the stream ++ */ ++static enum AVMediaType ++stream_find(struct decode_ctx *ctx, unsigned int stream_index) ++{ ++ if (ctx->audio_stream.stream && (stream_index == ctx->audio_stream.stream->index)) ++ return AVMEDIA_TYPE_AUDIO; ++ ++ if (ctx->video_stream.stream && (stream_index == ctx->video_stream.stream->index)) ++ return AVMEDIA_TYPE_VIDEO; ++ ++ return AVMEDIA_TYPE_UNKNOWN; ++} ++ ++/* ++ * Adds a stream to an output ++ * ++ * @out ctx A pre-allocated stream ctx where we save stream and codec info ++ * @in output Output to add the stream to ++ * @in codec_id What kind of codec should we use ++ * @in codec_name Name of codec (only used for logging) ++ * @return Negative on failure, otherwise zero + */ + static int +-decode_stream(struct decode_ctx *ctx, AVStream *in_stream) ++stream_add(struct encode_ctx *ctx, struct stream_ctx *s, enum AVCodecID codec_id, const char *codec_name) + { +- return ((in_stream == ctx->audio_stream) || +- (in_stream == ctx->video_stream) || +- (in_stream == ctx->subtitle_stream)); ++ AVCodec *encoder; ++ int ret; ++ ++ encoder = avcodec_find_encoder(codec_id); ++ if (!encoder) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Necessary encoder (%s) not found\n", codec_name); ++ return -1; ++ } ++ ++ CHECK_NULL(L_XCODE, s->stream = avformat_new_stream(ctx->ofmt_ctx, NULL)); ++ CHECK_NULL(L_XCODE, s->codec = avcodec_alloc_context3(encoder)); ++ ++ stream_settings_set(s, &ctx->settings, encoder->type); ++ ++ if (ctx->ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) ++ s->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; ++ ++ ret = avcodec_open2(s->codec, NULL, NULL); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Cannot open encoder (%s): %s\n", codec_name, err2str(ret)); ++ avcodec_free_context(&s->codec); ++ return -1; ++ } ++ ++ // Copy the codec parameters we just set to the stream, so the muxer knows them ++ ret = avcodec_parameters_from_context(s->stream->codecpar, s->codec); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Cannot copy stream parameters (%s): %s\n", codec_name, err2str(ret)); ++ avcodec_free_context(&s->codec); ++ return -1; ++ } ++ ++ return 0; + } + + /* +@@ -295,16 +411,13 @@ static int decode_interrupt_cb(void *arg) + * returned by av_read_frame(). The packet struct is owned by the + * caller, but *not* packet->data, so don't free the packet with + * av_free_packet()/av_packet_unref() +- * @out stream Set to the input AVStream corresponding to the packet +- * @out stream_index +- * Set to the input stream index corresponding to the packet ++ * @out type Media type of packet + * @in ctx Decode context + * @return 0 if OK, < 0 on error or end of file + */ + static int +-read_packet(AVPacket *packet, AVStream **stream, unsigned int *stream_index, struct decode_ctx *ctx) ++read_packet(AVPacket *packet, enum AVMediaType *type, struct decode_ctx *ctx) + { +- AVStream *in_stream; + int ret; + + do +@@ -337,40 +450,35 @@ read_packet(AVPacket *packet, AVStream **stream, unsigned int *stream_index, str + *packet = ctx->packet; + } + +- in_stream = ctx->ifmt_ctx->streams[packet->stream_index]; ++ *type = stream_find(ctx, packet->stream_index); + } +- while (!decode_stream(ctx, in_stream)); +- +- av_packet_rescale_ts(packet, in_stream->time_base, in_stream->codec->time_base); +- +- *stream = in_stream; +- *stream_index = packet->stream_index; ++ while (*type == AVMEDIA_TYPE_UNKNOWN); + + return 0; + } + + static int +-encode_write_frame(struct encode_ctx *ctx, AVFrame *filt_frame, unsigned int stream_index, int *got_frame) ++encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *filt_frame, int *got_frame) + { +- AVStream *out_stream; + AVPacket enc_pkt; ++ unsigned int stream_index; + int ret; + int got_frame_local; + + if (!got_frame) + got_frame = &got_frame_local; + +- out_stream = ctx->ofmt_ctx->streams[stream_index]; ++ stream_index = s->stream->index; + + // Encode filtered frame + enc_pkt.data = NULL; + enc_pkt.size = 0; + av_init_packet(&enc_pkt); + +- if (out_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) +- ret = avcodec_encode_audio2(out_stream->codec, &enc_pkt, filt_frame, got_frame); +- else if (out_stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) +- ret = avcodec_encode_video2(out_stream->codec, &enc_pkt, filt_frame, got_frame); ++ if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) ++ ret = avcodec_encode_audio2(s->codec, &enc_pkt, filt_frame, got_frame); ++ else if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) ++ ret = avcodec_encode_video2(s->codec, &enc_pkt, filt_frame, got_frame); + else + return -1; + +@@ -394,7 +502,7 @@ encode_write_frame(struct encode_ctx *ctx, AVFrame *filt_frame, unsigned int str + ctx->prev_pts[stream_index] = enc_pkt.pts; + enc_pkt.dts = enc_pkt.pts; //FIXME + +- av_packet_rescale_ts(&enc_pkt, out_stream->codec->time_base, out_stream->time_base); ++ av_packet_rescale_ts(&enc_pkt, s->codec->time_base, s->stream->time_base); + + // Mux encoded frame + ret = av_interleaved_write_frame(ctx->ofmt_ctx, &enc_pkt); +@@ -403,7 +511,7 @@ encode_write_frame(struct encode_ctx *ctx, AVFrame *filt_frame, unsigned int str + + #if HAVE_DECL_AV_BUFFERSRC_ADD_FRAME_FLAGS && HAVE_DECL_AV_BUFFERSINK_GET_FRAME + static int +-filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int stream_index) ++filter_encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *frame) + { + AVFrame *filt_frame; + int ret; +@@ -411,7 +519,7 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + // Push the decoded frame into the filtergraph + if (frame) + { +- ret = av_buffersrc_add_frame_flags(ctx->filter_ctx[stream_index].buffersrc_ctx, frame, 0); ++ ret = av_buffersrc_add_frame_flags(s->buffersrc_ctx, frame, 0); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error while feeding the filtergraph: %s\n", err2str(ret)); +@@ -429,7 +537,7 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + return -1; + } + +- ret = av_buffersink_get_frame(ctx->filter_ctx[stream_index].buffersink_ctx, filt_frame); ++ ret = av_buffersink_get_frame(s->buffersink_ctx, filt_frame); + if (ret < 0) + { + /* if no more frames for output - returns AVERROR(EAGAIN) +@@ -443,7 +551,7 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + } + + filt_frame->pict_type = AV_PICTURE_TYPE_NONE; +- ret = encode_write_frame(ctx, filt_frame, stream_index, NULL); ++ ret = encode_write_frame(ctx, s, filt_frame, NULL); + av_frame_free(&filt_frame); + if (ret < 0) + break; +@@ -453,19 +561,16 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + } + #else + static int +-filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int stream_index) ++filter_encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *frame) + { + AVFilterBufferRef *picref; +- AVCodecContext *enc_ctx; + AVFrame *filt_frame; + int ret; + +- enc_ctx = ctx->ofmt_ctx->streams[stream_index]->codec; +- + // Push the decoded frame into the filtergraph + if (frame) + { +- ret = av_buffersrc_write_frame(ctx->filter_ctx[stream_index].buffersrc_ctx, frame); ++ ret = av_buffersrc_write_frame(s->buffersrc_ctx, frame); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error while feeding the filtergraph: %s\n", err2str(ret)); +@@ -483,10 +588,10 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + return -1; + } + +- if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO && !(enc_ctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) +- ret = av_buffersink_read_samples(ctx->filter_ctx[stream_index].buffersink_ctx, &picref, enc_ctx->frame_size); ++ if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO && !(s->codec->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) ++ ret = av_buffersink_read_samples(s->buffersink_ctx, &picref, s->codec->frame_size); + else +- ret = av_buffersink_read(ctx->filter_ctx[stream_index].buffersink_ctx, &picref); ++ ret = av_buffersink_read(s->buffersink_ctx, &picref); + + if (ret < 0) + { +@@ -501,7 +606,7 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + } + + avfilter_copy_buf_props(filt_frame, picref); +- ret = encode_write_frame(ctx, filt_frame, stream_index, NULL); ++ ret = encode_write_frame(ctx, s, filt_frame, NULL); + av_frame_free(&filt_frame); + avfilter_unref_buffer(picref); + if (ret < 0) +@@ -518,59 +623,44 @@ filter_encode_write_frame(struct encode_ctx *ctx, AVFrame *frame, unsigned int s + * + * @out frame AVFrame if there was anything to flush, otherwise undefined + * @out stream Set to the AVStream where a decoder returned a frame +- * @out stream_index +- * Set to the stream index of the stream returning a frame + * @in ctx Decode context + * @return Non-zero (true) if frame found, otherwise 0 (false) + */ + static int +-flush_decoder(AVFrame *frame, AVStream **stream, unsigned int *stream_index, struct decode_ctx *ctx) ++flush_decoder(AVFrame *frame, enum AVMediaType *type, struct decode_ctx *ctx) + { +- AVStream *in_stream; +- AVPacket dummypacket; +- int got_frame; +- int i; +- +- memset(&dummypacket, 0, sizeof(AVPacket)); ++ AVPacket dummypacket = { 0 }; ++ int got_frame = 0; + +- for (i = 0; i < ctx->ifmt_ctx->nb_streams; i++) ++ if (ctx->audio_stream.codec) + { +- in_stream = ctx->ifmt_ctx->streams[i]; +- if (!decode_stream(ctx, in_stream)) +- continue; +- +- if (in_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) +- avcodec_decode_audio4(in_stream->codec, frame, &got_frame, &dummypacket); +- else +- avcodec_decode_video2(in_stream->codec, frame, &got_frame, &dummypacket); +- +- if (!got_frame) +- continue; +- +- DPRINTF(E_DBG, L_XCODE, "Flushing decoders produced a frame from stream %d\n", i); ++ *type = AVMEDIA_TYPE_AUDIO; ++ avcodec_decode_audio4(ctx->audio_stream.codec, frame, &got_frame, &dummypacket); ++ } + +- *stream = in_stream; +- *stream_index = i; +- return got_frame; ++ if (!got_frame && ctx->video_stream.codec) ++ { ++ *type = AVMEDIA_TYPE_VIDEO; ++ avcodec_decode_video2(ctx->video_stream.codec, frame, &got_frame, &dummypacket); + } + +- return 0; ++ return got_frame; + } + + static void +-flush_encoder(struct encode_ctx *ctx, unsigned int stream_index) ++flush_encoder(struct encode_ctx *ctx, struct stream_ctx *s) + { + int ret; + int got_frame; + +- DPRINTF(E_DBG, L_XCODE, "Flushing output stream #%u encoder\n", stream_index); ++ DPRINTF(E_DBG, L_XCODE, "Flushing output stream #%u encoder\n", s->stream->index); + +- if (!(ctx->ofmt_ctx->streams[stream_index]->codec->codec->capabilities & CODEC_CAP_DELAY)) ++ if (!(s->codec->codec->capabilities & CODEC_CAP_DELAY)) + return; + + do + { +- ret = encode_write_frame(ctx, NULL, stream_index, &got_frame); ++ ret = encode_write_frame(ctx, s, NULL, &got_frame); + } + while ((ret == 0) && got_frame); + } +@@ -579,28 +669,24 @@ flush_encoder(struct encode_ctx *ctx, unsigned int stream_index) + /* --------------------------- INPUT/OUTPUT INIT --------------------------- */ + + static int +-open_input(struct decode_ctx *ctx, const char *path, int decode_video) ++open_input(struct decode_ctx *ctx, const char *path) + { +- AVDictionary *options; ++ AVDictionary *options = NULL; + AVCodec *decoder; ++ AVCodecContext *dec_ctx; + int stream_index; + int ret; + +- options = NULL; +- ctx->ifmt_ctx = avformat_alloc_context();; +- if (!ctx->ifmt_ctx) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for input format context\n"); +- return -1; +- } ++ CHECK_NULL(L_XCODE, ctx->ifmt_ctx = avformat_alloc_context()); + +-# ifndef HAVE_FFMPEG +- // Without this, libav is slow to probe some internet streams, which leads to RAOP timeouts + if (ctx->data_kind == DATA_KIND_HTTP) +- ctx->ifmt_ctx->probesize = 64000; ++ { ++# ifndef HAVE_FFMPEG ++ // Without this, libav is slow to probe some internet streams, which leads to RAOP timeouts ++ ctx->ifmt_ctx->probesize = 64000; + # endif +- if (ctx->data_kind == DATA_KIND_HTTP) +- av_dict_set(&options, "icy", "1", 0); ++ av_dict_set(&options, "icy", "1", 0); ++ } + + // TODO Newest versions of ffmpeg have timeout and reconnect options we should use + ctx->ifmt_ctx->interrupt_callback.callback = decode_interrupt_cb; +@@ -631,61 +717,70 @@ open_input(struct decode_ctx *ctx, const char *path, int decode_video) + goto out_fail; + } + +- // Find audio stream and open decoder +- stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &decoder, 0); +- if ((stream_index < 0) || (!decoder)) ++ if (ctx->settings.encode_audio) + { +- DPRINTF(E_LOG, L_XCODE, "Did not find audio stream or suitable decoder for %s\n", path); +- goto out_fail; +- } +- +- ctx->ifmt_ctx->streams[stream_index]->codec->request_sample_fmt = AV_SAMPLE_FMT_S16; +- ctx->ifmt_ctx->streams[stream_index]->codec->request_channel_layout = AV_CH_LAYOUT_STEREO; ++ // Find audio stream and open decoder ++ stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &decoder, 0); ++ if ((stream_index < 0) || (!decoder)) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Did not find audio stream or suitable decoder for %s\n", path); ++ goto out_fail; ++ } + +-// Disabled to see if it is still required +-// if (decoder->capabilities & CODEC_CAP_TRUNCATED) +-// ctx->ifmt_ctx->streams[stream_index]->codec->flags |= CODEC_FLAG_TRUNCATED; ++ CHECK_NULL(L_XCODE, dec_ctx = avcodec_alloc_context3(decoder)); + +- ret = avcodec_open2(ctx->ifmt_ctx->streams[stream_index]->codec, decoder, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); +- goto out_fail; +- } ++ // In open_filter() we need to tell the sample rate and format that the decoder ++ // is giving us - however sample rate of dec_ctx will be 0 if we don't prime it ++ // with the streams codecpar data. ++ ret = avcodec_parameters_to_context(dec_ctx, ctx->ifmt_ctx->streams[stream_index]->codecpar); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Failed to copy codecpar for stream #%d: %s\n", stream_index, err2str(ret)); ++ goto out_fail; ++ } + +- ctx->audio_stream = ctx->ifmt_ctx->streams[stream_index]; ++ dec_ctx->request_sample_fmt = ctx->settings.sample_format; ++ dec_ctx->request_channel_layout = ctx->settings.channel_layout; + +- // If no video then we are all done +- if (!decode_video) +- return 0; ++ ret = avcodec_open2(dec_ctx, NULL, NULL); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); ++ goto out_fail; ++ } + +- // Find video stream and open decoder +- stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); +- if ((stream_index < 0) || (!decoder)) +- { +- DPRINTF(E_LOG, L_XCODE, "Did not find video stream or suitable decoder for '%s': %s\n", path, err2str(ret)); +- return 0; ++ ctx->audio_stream.codec = dec_ctx; ++ ctx->audio_stream.stream = ctx->ifmt_ctx->streams[stream_index]; + } + +- ret = avcodec_open2(ctx->ifmt_ctx->streams[stream_index]->codec, decoder, NULL); +- if (ret < 0) ++ if (ctx->settings.encode_video) + { +- DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); +- return 0; +- } ++ // Find video stream and open decoder ++ stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); ++ if ((stream_index < 0) || (!decoder)) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Did not find video stream or suitable decoder for '%s': %s\n", path, err2str(ret)); ++ goto out_fail; ++ } + +- ctx->video_stream = ctx->ifmt_ctx->streams[stream_index]; ++ CHECK_NULL(L_XCODE, dec_ctx = avcodec_alloc_context3(decoder)); + +- // Find a (random) subtitle stream which will be remuxed +- stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0); +- if (stream_index >= 0) +- { +- ctx->subtitle_stream = ctx->ifmt_ctx->streams[stream_index]; ++ ret = avcodec_open2(dec_ctx, NULL, NULL); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); ++ goto out_fail; ++ } ++ ++ ctx->video_stream.codec = dec_ctx; ++ ctx->video_stream.stream = ctx->ifmt_ctx->streams[stream_index]; + } + + return 0; + + out_fail: ++ avcodec_free_context(&ctx->audio_stream.codec); ++ avcodec_free_context(&ctx->video_stream.codec); + avformat_close_input(&ctx->ifmt_ctx); + + return -1; +@@ -694,29 +789,18 @@ open_input(struct decode_ctx *ctx, const char *path, int decode_video) + static void + close_input(struct decode_ctx *ctx) + { +- if (ctx->audio_stream) +- avcodec_close(ctx->audio_stream->codec); +- if (ctx->video_stream) +- avcodec_close(ctx->video_stream->codec); +- ++ avcodec_free_context(&ctx->audio_stream.codec); ++ avcodec_free_context(&ctx->video_stream.codec); + avformat_close_input(&ctx->ifmt_ctx); + } + + static int + open_output(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + { +- AVStream *out_stream; +- AVStream *in_stream; +- AVCodecContext *dec_ctx; +- AVCodecContext *enc_ctx; +- AVCodec *encoder; +- const AVCodecDescriptor *codec_desc; +- enum AVCodecID codec_id; + int ret; +- int i; + + ctx->ofmt_ctx = NULL; +- avformat_alloc_output_context2(&ctx->ofmt_ctx, NULL, ctx->format, NULL); ++ avformat_alloc_output_context2(&ctx->ofmt_ctx, NULL, ctx->settings.format, NULL); + if (!ctx->ofmt_ctx) + { + DPRINTF(E_LOG, L_XCODE, "Could not create output context\n"); +@@ -727,89 +811,28 @@ open_output(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + if (!ctx->obuf) + { + DPRINTF(E_LOG, L_XCODE, "Could not create output evbuffer\n"); +- goto out_fail_evbuf; ++ goto out_free_output; + } + + ctx->ofmt_ctx->pb = avio_output_evbuffer_open(ctx->obuf); + if (!ctx->ofmt_ctx->pb) + { + DPRINTF(E_LOG, L_XCODE, "Could not create output avio pb\n"); +- goto out_fail_pb; ++ goto out_free_evbuf; + } + +- for (i = 0; i < src_ctx->ifmt_ctx->nb_streams; i++) +- { +- in_stream = src_ctx->ifmt_ctx->streams[i]; +- if (!decode_stream(src_ctx, in_stream)) +- { +- ctx->out_stream_map[i] = -1; +- continue; +- } +- +- out_stream = avformat_new_stream(ctx->ofmt_ctx, NULL); +- if (!out_stream) +- { +- DPRINTF(E_LOG, L_XCODE, "Failed allocating output stream\n"); +- goto out_fail_stream; +- } +- +- ctx->out_stream_map[i] = out_stream->index; +- ctx->in_stream_map[out_stream->index] = i; +- +- dec_ctx = in_stream->codec; +- enc_ctx = out_stream->codec; +- +- // TODO Enough to just remux subtitles? +- if (dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) +- { +- avcodec_copy_context(enc_ctx, dec_ctx); +- continue; +- } +- +- if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) +- codec_id = ctx->audio_codec; +- else if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) +- codec_id = ctx->video_codec; +- else +- continue; +- +- codec_desc = avcodec_descriptor_get(codec_id); +- encoder = avcodec_find_encoder(codec_id); +- if (!encoder) +- { +- if (codec_desc) +- DPRINTF(E_LOG, L_XCODE, "Necessary encoder (%s) for input stream %u not found\n", codec_desc->name, i); +- else +- DPRINTF(E_LOG, L_XCODE, "Necessary encoder (unknown) for input stream %u not found\n", i); +- goto out_fail_stream; +- } +- +- if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) +- { +- enc_ctx->sample_rate = ctx->sample_rate; +- enc_ctx->channel_layout = ctx->channel_layout; +- enc_ctx->channels = ctx->channels; +- enc_ctx->sample_fmt = ctx->sample_format; +- enc_ctx->time_base = (AVRational){1, ctx->sample_rate}; +- } +- else +- { +- enc_ctx->height = ctx->video_height; +- enc_ctx->width = ctx->video_width; +- enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; //FIXME +- enc_ctx->pix_fmt = avcodec_find_best_pix_fmt_of_list(encoder->pix_fmts, dec_ctx->pix_fmt, 1, NULL); +- enc_ctx->time_base = dec_ctx->time_base; +- } +- +- ret = avcodec_open2(enc_ctx, encoder, NULL); ++ if (ctx->settings.encode_audio) ++ { ++ ret = stream_add(ctx, &ctx->audio_stream, ctx->settings.audio_codec, ctx->settings.audio_codec_name); + if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot open encoder (%s) for input stream #%u: %s\n", codec_desc->name, i, err2str(ret)); +- goto out_fail_codec; +- } ++ goto out_free_streams; ++ } + +- if (ctx->ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) +- enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; ++ if (ctx->settings.encode_video) ++ { ++ ret = stream_add(ctx, &ctx->video_stream, ctx->settings.video_codec, ctx->settings.video_codec_name); ++ if (ret < 0) ++ goto out_free_streams; + } + + // Notice, this will not write WAV header (so we do that manually) +@@ -817,24 +840,19 @@ open_output(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error writing header to output buffer: %s\n", err2str(ret)); +- goto out_fail_write; ++ goto out_free_streams; + } + + return 0; + +- out_fail_write: +- out_fail_codec: +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) +- { +- enc_ctx = ctx->ofmt_ctx->streams[i]->codec; +- if (enc_ctx) +- avcodec_close(enc_ctx); +- } +- out_fail_stream: ++ out_free_streams: ++ avcodec_free_context(&ctx->audio_stream.codec); ++ avcodec_free_context(&ctx->video_stream.codec); ++ + avio_evbuffer_close(ctx->ofmt_ctx->pb); +- out_fail_pb: ++ out_free_evbuf: + evbuffer_free(ctx->obuf); +- out_fail_evbuf: ++ out_free_output: + avformat_free_context(ctx->ofmt_ctx); + + return -1; +@@ -843,95 +861,50 @@ open_output(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + static void + close_output(struct encode_ctx *ctx) + { +- int i; +- +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) +- { +- if (ctx->ofmt_ctx->streams[i]->codec) +- avcodec_close(ctx->ofmt_ctx->streams[i]->codec); +- } ++ avcodec_free_context(&ctx->audio_stream.codec); ++ avcodec_free_context(&ctx->video_stream.codec); + + avio_evbuffer_close(ctx->ofmt_ctx->pb); + evbuffer_free(ctx->obuf); + avformat_free_context(ctx->ofmt_ctx); + } + +-#if HAVE_DECL_AVFILTER_GRAPH_PARSE_PTR + static int +-open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, const char *filter_spec) ++open_filter(struct stream_ctx *out_stream, struct stream_ctx *in_stream, const char *filter_spec) + { +- AVFilter *buffersrc = NULL; +- AVFilter *buffersink = NULL; +- AVFilterContext *buffersrc_ctx = NULL; +- AVFilterContext *buffersink_ctx = NULL; +- AVFilterInOut *outputs = avfilter_inout_alloc(); +- AVFilterInOut *inputs = avfilter_inout_alloc(); +- AVFilterGraph *filter_graph = avfilter_graph_alloc(); ++ AVFilter *buffersrc; ++ AVFilter *format; ++ AVFilter *scale; ++ AVFilter *buffersink; ++ AVFilterContext *buffersrc_ctx; ++ AVFilterContext *format_ctx; ++ AVFilterContext *scale_ctx; ++ AVFilterContext *buffersink_ctx; ++ AVFilterGraph *filter_graph; + char args[512]; + int ret; + +- if (!outputs || !inputs || !filter_graph) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for filter_graph, input or output\n"); +- goto out_fail; +- } +- +- if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) +- { +- buffersrc = avfilter_get_by_name("buffer"); +- buffersink = avfilter_get_by_name("buffersink"); +- if (!buffersrc || !buffersink) +- { +- DPRINTF(E_LOG, L_XCODE, "Filtering source or sink element not found\n"); +- goto out_fail; +- } +- +- snprintf(args, sizeof(args), +- "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", +- dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, +- dec_ctx->time_base.num, dec_ctx->time_base.den, +- dec_ctx->sample_aspect_ratio.num, +- dec_ctx->sample_aspect_ratio.den); +- +- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot create buffer source: %s\n", err2str(ret)); +- goto out_fail; +- } +- +- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot create buffer sink: %s\n", err2str(ret)); +- goto out_fail; +- } ++ CHECK_NULL(L_XCODE, filter_graph = avfilter_graph_alloc()); + +- ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt), AV_OPT_SEARCH_CHILDREN); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot set output pixel format: %s\n", err2str(ret)); +- goto out_fail; +- } +- } +- else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) ++ if (in_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) + { + buffersrc = avfilter_get_by_name("abuffer"); ++ format = avfilter_get_by_name("aformat"); + buffersink = avfilter_get_by_name("abuffersink"); +- if (!buffersrc || !buffersink) ++ if (!buffersrc || !format || !buffersink) + { +- DPRINTF(E_LOG, L_XCODE, "Filtering source or sink element not found\n"); ++ DPRINTF(E_LOG, L_XCODE, "Filtering source, format or sink element not found\n"); + goto out_fail; + } + +- if (!dec_ctx->channel_layout) +- dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); ++ if (!in_stream->codec->channel_layout) ++ in_stream->codec->channel_layout = av_get_default_channel_layout(in_stream->codec->channels); + + snprintf(args, sizeof(args), + "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, +- dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate, +- av_get_sample_fmt_name(dec_ctx->sample_fmt), +- dec_ctx->channel_layout); ++ in_stream->stream->time_base.num, in_stream->stream->time_base.den, ++ in_stream->codec->sample_rate, av_get_sample_fmt_name(in_stream->codec->sample_fmt), ++ in_stream->codec->channel_layout); + + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); + if (ret < 0) +@@ -940,121 +913,49 @@ open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecConte + goto out_fail; + } + +- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer sink: %s\n", err2str(ret)); +- goto out_fail; +- } ++ snprintf(args, sizeof(args), ++ "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64, ++ av_get_sample_fmt_name(out_stream->codec->sample_fmt), out_stream->codec->sample_rate, ++ out_stream->codec->channel_layout); + +- ret = av_opt_set_bin(buffersink_ctx, "sample_fmts", +- (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN); ++ ret = avfilter_graph_create_filter(&format_ctx, format, "format", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot set output sample format: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create audio format filter: %s\n", err2str(ret)); + goto out_fail; + } + +- ret = av_opt_set_bin(buffersink_ctx, "channel_layouts", +- (uint8_t*)&enc_ctx->channel_layout, sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN); ++ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot set output channel layout: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer sink: %s\n", err2str(ret)); + goto out_fail; + } + +- ret = av_opt_set_bin(buffersink_ctx, "sample_rates", +- (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN); +- if (ret < 0) ++ if ( (ret = avfilter_link(buffersrc_ctx, 0, format_ctx, 0)) < 0 || ++ (ret = avfilter_link(format_ctx, 0, buffersink_ctx, 0)) < 0 ) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot set output sample rate: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Error connecting audio filters: %s\n", err2str(ret)); + goto out_fail; + } + } +- else +- { +- DPRINTF(E_LOG, L_XCODE, "Bug! Unknown type passed to filter graph init\n"); +- goto out_fail; +- } +- +- /* Endpoints for the filter graph. */ +- outputs->name = av_strdup("in"); +- outputs->filter_ctx = buffersrc_ctx; +- outputs->pad_idx = 0; +- outputs->next = NULL; +- inputs->name = av_strdup("out"); +- inputs->filter_ctx = buffersink_ctx; +- inputs->pad_idx = 0; +- inputs->next = NULL; +- if (!outputs->name || !inputs->name) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for outputs/inputs\n"); +- goto out_fail; +- } +- +- ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, &inputs, &outputs, NULL); +- if (ret < 0) +- goto out_fail; +- +- ret = avfilter_graph_config(filter_graph, NULL); +- if (ret < 0) +- goto out_fail; +- +- /* Fill filtering context */ +- filter_ctx->buffersrc_ctx = buffersrc_ctx; +- filter_ctx->buffersink_ctx = buffersink_ctx; +- filter_ctx->filter_graph = filter_graph; +- +- avfilter_inout_free(&inputs); +- avfilter_inout_free(&outputs); +- +- return 0; +- +- out_fail: +- avfilter_graph_free(&filter_graph); +- avfilter_inout_free(&inputs); +- avfilter_inout_free(&outputs); +- +- return -1; +-} +-#else +-static int +-open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, const char *filter_spec) +-{ +- +- AVFilter *buffersrc = NULL; +- AVFilter *format = NULL; +- AVFilter *buffersink = NULL; +- AVFilterContext *buffersrc_ctx = NULL; +- AVFilterContext *format_ctx = NULL; +- AVFilterContext *buffersink_ctx = NULL; +- AVFilterGraph *filter_graph = avfilter_graph_alloc(); +- char args[512]; +- int ret; +- +- if (!filter_graph) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for filter_graph\n"); +- goto out_fail; +- } +- +- if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ++ else if (in_stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) + { + buffersrc = avfilter_get_by_name("buffer"); + format = avfilter_get_by_name("format"); ++ scale = avfilter_get_by_name("scale"); + buffersink = avfilter_get_by_name("buffersink"); + if (!buffersrc || !format || !buffersink) + { +- DPRINTF(E_LOG, L_XCODE, "Filtering source, format or sink element not found\n"); ++ DPRINTF(E_LOG, L_XCODE, "Filtering source, format, scale or sink element not found\n"); + goto out_fail; + } + + snprintf(args, sizeof(args), + "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", +- dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, +- dec_ctx->time_base.num, dec_ctx->time_base.den, +- dec_ctx->sample_aspect_ratio.num, +- dec_ctx->sample_aspect_ratio.den); ++ in_stream->codec->width, in_stream->codec->height, in_stream->codec->pix_fmt, ++ in_stream->stream->time_base.num, in_stream->stream->time_base.den, ++ in_stream->codec->sample_aspect_ratio.num, in_stream->codec->sample_aspect_ratio.den); + + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); + if (ret < 0) +@@ -1064,8 +965,7 @@ open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecConte + } + + snprintf(args, sizeof(args), +- "pix_fmt=%d", +- enc_ctx->pix_fmt); ++ "pix_fmt=%d", out_stream->codec->pix_fmt); + + ret = avfilter_graph_create_filter(&format_ctx, format, "format", args, NULL, filter_graph); + if (ret < 0) +@@ -1074,56 +974,28 @@ open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecConte + goto out_fail; + } + +- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Cannot create buffer sink: %s\n", err2str(ret)); +- goto out_fail; +- } +- } +- else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) +- { +- buffersrc = avfilter_get_by_name("abuffer"); +- format = avfilter_get_by_name("aformat"); +- buffersink = avfilter_get_by_name("abuffersink"); +- if (!buffersrc || !format || !buffersink) +- { +- DPRINTF(E_LOG, L_XCODE, "Filtering source, format or sink element not found\n"); +- goto out_fail; +- } +- +- if (!dec_ctx->channel_layout) +- dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); +- + snprintf(args, sizeof(args), +- "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, +- dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate, +- av_get_sample_fmt_name(dec_ctx->sample_fmt), +- dec_ctx->channel_layout); ++ "width=%d:height=%d", out_stream->codec->width, out_stream->codec->height); + +- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); ++ ret = avfilter_graph_create_filter(&scale_ctx, scale, "scale", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer source: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create scale filter: %s\n", err2str(ret)); + goto out_fail; + } + +- snprintf(args, sizeof(args), +- "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64, +- av_get_sample_fmt_name(enc_ctx->sample_fmt), enc_ctx->sample_rate, +- enc_ctx->channel_layout); +- +- ret = avfilter_graph_create_filter(&format_ctx, format, "format", args, NULL, filter_graph); ++ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio format filter: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create buffer sink: %s\n", err2str(ret)); + goto out_fail; + } + +- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); +- if (ret < 0) ++ if ( (ret = avfilter_link(buffersrc_ctx, 0, format_ctx, 0)) < 0 || ++ (ret = avfilter_link(format_ctx, 0, scale_ctx, 0)) < 0 || ++ (ret = avfilter_link(scale_ctx, 0, buffersink_ctx, 0)) < 0 ) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer sink: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Error connecting video filters: %s\n", err2str(ret)); + goto out_fail; + } + } +@@ -1133,20 +1005,14 @@ open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecConte + goto out_fail; + } + +- ret = avfilter_link(buffersrc_ctx, 0, format_ctx, 0); +- if (ret >= 0) +- ret = avfilter_link(format_ctx, 0, buffersink_ctx, 0); +- if (ret < 0) +- DPRINTF(E_LOG, L_XCODE, "Error connecting filters: %s\n", err2str(ret)); +- + ret = avfilter_graph_config(filter_graph, NULL); + if (ret < 0) + goto out_fail; + + /* Fill filtering context */ +- filter_ctx->buffersrc_ctx = buffersrc_ctx; +- filter_ctx->buffersink_ctx = buffersink_ctx; +- filter_ctx->filter_graph = filter_graph; ++ out_stream->buffersrc_ctx = buffersrc_ctx; ++ out_stream->buffersink_ctx = buffersink_ctx; ++ out_stream->filter_graph = filter_graph; + + return 0; + +@@ -1155,44 +1021,24 @@ open_filter(struct filter_ctx *filter_ctx, AVCodecContext *dec_ctx, AVCodecConte + + return -1; + } +-#endif + + static int + open_filters(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + { +- AVCodecContext *enc_ctx; +- AVCodecContext *dec_ctx; +- const char *filter_spec; +- unsigned int stream_index; +- int i; + int ret; + +- ctx->filter_ctx = av_malloc_array(ctx->ofmt_ctx->nb_streams, sizeof(*ctx->filter_ctx)); +- if (!ctx->filter_ctx) ++ if (ctx->settings.encode_audio) + { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for outputs/inputs\n"); +- return -1; ++ // anull is a passthrough (dummy) filter for audio ++ ret = open_filter(&ctx->audio_stream, &src_ctx->audio_stream, "anull"); ++ if (ret < 0) ++ goto out_fail; + } + +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) ++ if (ctx->settings.encode_video) + { +- ctx->filter_ctx[i].buffersrc_ctx = NULL; +- ctx->filter_ctx[i].buffersink_ctx = NULL; +- ctx->filter_ctx[i].filter_graph = NULL; +- +- stream_index = ctx->in_stream_map[i]; +- +- enc_ctx = ctx->ofmt_ctx->streams[i]->codec; +- dec_ctx = src_ctx->ifmt_ctx->streams[stream_index]->codec; +- +- if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) +- filter_spec = "null"; /* passthrough (dummy) filter for video */ +- else if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) +- filter_spec = "anull"; /* passthrough (dummy) filter for audio */ +- else +- continue; +- +- ret = open_filter(&ctx->filter_ctx[i], dec_ctx, enc_ctx, filter_spec); ++ // null is a passthrough (dummy) filter for video ++ ret = open_filter(&ctx->video_stream, &src_ctx->video_stream, "null"); + if (ret < 0) + goto out_fail; + } +@@ -1200,12 +1046,8 @@ open_filters(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + return 0; + + out_fail: +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) +- { +- if (ctx->filter_ctx && ctx->filter_ctx[i].filter_graph) +- avfilter_graph_free(&ctx->filter_ctx[i].filter_graph); +- } +- av_free(ctx->filter_ctx); ++ avfilter_graph_free(&ctx->audio_stream.filter_graph); ++ avfilter_graph_free(&ctx->video_stream.filter_graph); + + return -1; + } +@@ -1213,14 +1055,8 @@ open_filters(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + static void + close_filters(struct encode_ctx *ctx) + { +- int i; +- +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) +- { +- if (ctx->filter_ctx && ctx->filter_ctx[i].filter_graph) +- avfilter_graph_free(&ctx->filter_ctx[i].filter_graph); +- } +- av_free(ctx->filter_ctx); ++ avfilter_graph_free(&ctx->audio_stream.filter_graph); ++ avfilter_graph_free(&ctx->video_stream.filter_graph); + } + + +@@ -1229,21 +1065,16 @@ close_filters(struct encode_ctx *ctx) + /* Setup */ + + struct decode_ctx * +-transcode_decode_setup(enum data_kind data_kind, const char *path, uint32_t song_length, int decode_video) ++transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length) + { + struct decode_ctx *ctx; + +- ctx = calloc(1, sizeof(struct decode_ctx)); +- if (!ctx) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decode ctx\n"); +- return NULL; +- } ++ CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct decode_ctx))); + + ctx->duration = song_length; + ctx->data_kind = data_kind; + +- if (open_input(ctx, path, decode_video) < 0) ++ if ((init_settings(&ctx->settings, profile) < 0) || (open_input(ctx, path) < 0)) + { + free(ctx); + return NULL; +@@ -1255,18 +1086,13 @@ transcode_decode_setup(enum data_kind data_kind, const char *path, uint32_t song + } + + struct encode_ctx * +-transcode_encode_setup(struct decode_ctx *src_ctx, enum transcode_profile profile, off_t *est_size) ++transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size) + { + struct encode_ctx *ctx; + +- ctx = calloc(1, sizeof(struct encode_ctx)); +- if (!ctx) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for encode ctx\n"); +- return NULL; +- } ++ CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct encode_ctx))); + +- if ((init_profile(ctx, profile) < 0) || (open_output(ctx, src_ctx) < 0)) ++ if ((init_settings(&ctx->settings, profile) < 0) || (open_output(ctx, src_ctx) < 0)) + { + free(ctx); + return NULL; +@@ -1280,37 +1106,29 @@ transcode_encode_setup(struct decode_ctx *src_ctx, enum transcode_profile profil + } + + if (src_ctx->data_kind == DATA_KIND_HTTP) +- ctx->icy_interval = METADATA_ICY_INTERVAL * ctx->channels * ctx->byte_depth * ctx->sample_rate; ++ ctx->icy_interval = METADATA_ICY_INTERVAL * ctx->settings.channels * ctx->settings.byte_depth * ctx->settings.sample_rate; + +- if (profile == XCODE_PCM16_HEADER) +- { +- ctx->wavhdr = 1; +- make_wav_header(ctx, src_ctx, est_size); +- } ++ if (ctx->settings.wavheader) ++ make_wav_header(ctx, src_ctx, est_size); + + return ctx; + } + + struct transcode_ctx * +-transcode_setup(enum data_kind data_kind, const char *path, uint32_t song_length, enum transcode_profile profile, off_t *est_size) ++transcode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length, off_t *est_size) + { + struct transcode_ctx *ctx; + +- ctx = malloc(sizeof(struct transcode_ctx)); +- if (!ctx) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for transcode ctx\n"); +- return NULL; +- } ++ CHECK_NULL(L_XCODE, ctx = malloc(sizeof(struct transcode_ctx))); + +- ctx->decode_ctx = transcode_decode_setup(data_kind, path, song_length, profile & XCODE_HAS_VIDEO); ++ ctx->decode_ctx = transcode_decode_setup(profile, data_kind, path, song_length); + if (!ctx->decode_ctx) + { + free(ctx); + return NULL; + } + +- ctx->encode_ctx = transcode_encode_setup(ctx->decode_ctx, profile, est_size); ++ ctx->encode_ctx = transcode_encode_setup(profile, ctx->decode_ctx, est_size); + if (!ctx->encode_ctx) + { + transcode_decode_cleanup(ctx->decode_ctx); +@@ -1325,41 +1143,49 @@ struct decode_ctx * + transcode_decode_setup_raw(void) + { + struct decode_ctx *ctx; +- struct AVCodec *decoder; ++ AVCodec *decoder; ++ int ret; ++ ++ CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct decode_ctx))); + +- ctx = calloc(1, sizeof(struct decode_ctx)); +- if (!ctx) ++ if (init_settings(&ctx->settings, XCODE_PCM16_NOHEADER) < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decode ctx\n"); +- return NULL; ++ goto out_free_ctx; + } + +- ctx->ifmt_ctx = avformat_alloc_context(); +- if (!ctx->ifmt_ctx) ++ // In raw mode we won't actually need to read or decode, but we still setup ++ // the decode_ctx because transcode_encode_setup() gets info about the input ++ // through this structure (TODO dont' do that) ++ decoder = avcodec_find_decoder(ctx->settings.audio_codec); ++ if (!decoder) + { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decode format ctx\n"); +- free(ctx); +- return NULL; ++ DPRINTF(E_LOG, L_XCODE, "Could not find decoder for: %s\n", ctx->settings.audio_codec_name); ++ goto out_free_ctx; + } + +- decoder = avcodec_find_decoder(AV_CODEC_ID_PCM_S16LE); ++ CHECK_NULL(L_XCODE, ctx->ifmt_ctx = avformat_alloc_context()); ++ CHECK_NULL(L_XCODE, ctx->audio_stream.codec = avcodec_alloc_context3(decoder)); ++ CHECK_NULL(L_XCODE, ctx->audio_stream.stream = avformat_new_stream(ctx->ifmt_ctx, NULL)); + +- ctx->audio_stream = avformat_new_stream(ctx->ifmt_ctx, decoder); +- if (!ctx->audio_stream) ++ stream_settings_set(&ctx->audio_stream, &ctx->settings, decoder->type); ++ ++ // Copy the data we just set to the structs we will be querying later, e.g. in open_filter ++ ctx->audio_stream.stream->time_base = ctx->audio_stream.codec->time_base; ++ ret = avcodec_parameters_from_context(ctx->audio_stream.stream->codecpar, ctx->audio_stream.codec); ++ if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Could not create stream with PCM16 decoder\n"); +- avformat_free_context(ctx->ifmt_ctx); +- free(ctx); +- return NULL; ++ DPRINTF(E_LOG, L_XCODE, "Cannot copy stream parameters (%s): %s\n", ctx->settings.audio_codec_name, err2str(ret)); ++ goto out_free_codec; + } + +- ctx->audio_stream->codec->time_base.num = 1; +- ctx->audio_stream->codec->time_base.den = 44100; +- ctx->audio_stream->codec->sample_rate = 44100; +- ctx->audio_stream->codec->sample_fmt = AV_SAMPLE_FMT_S16; +- ctx->audio_stream->codec->channel_layout = AV_CH_LAYOUT_STEREO; +- + return ctx; ++ ++ out_free_codec: ++ avcodec_free_context(&ctx->audio_stream.codec); ++ avformat_free_context(ctx->ifmt_ctx); ++ out_free_ctx: ++ free(ctx); ++ return NULL; + } + + int +@@ -1457,15 +1283,18 @@ transcode_decode_cleanup(struct decode_ctx *ctx) + void + transcode_encode_cleanup(struct encode_ctx *ctx) + { +- int i; ++ if (ctx->audio_stream.stream) ++ { ++ if (ctx->audio_stream.filter_graph) ++ filter_encode_write_frame(ctx, &ctx->audio_stream, NULL); ++ flush_encoder(ctx, &ctx->audio_stream); ++ } + +- // Flush filters and encoders +- for (i = 0; i < ctx->ofmt_ctx->nb_streams; i++) ++ if (ctx->video_stream.stream) + { +- if (!ctx->filter_ctx[i].filter_graph) +- continue; +- filter_encode_write_frame(ctx, NULL, i); +- flush_encoder(ctx, i); ++ if (ctx->video_stream.filter_graph) ++ filter_encode_write_frame(ctx, &ctx->video_stream, NULL); ++ flush_encoder(ctx, &ctx->video_stream); + } + + av_write_trailer(ctx->ofmt_ctx); +@@ -1498,9 +1327,8 @@ int + transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) + { + AVPacket packet; +- AVStream *in_stream; + AVFrame *frame; +- unsigned int stream_index; ++ enum AVMediaType type; + int got_frame; + int retry; + int ret; +@@ -1519,14 +1347,14 @@ transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) + retry = 0; + do + { +- ret = read_packet(&packet, &in_stream, &stream_index, ctx); ++ ret = read_packet(&packet, &type, ctx); + if (ret < 0) + { + // Some decoders need to be flushed, meaning the decoder is to be called + // with empty input until no more frames are returned + DPRINTF(E_DBG, L_XCODE, "Could not read packet, will flush decoders\n"); + +- got_frame = flush_decoder(frame, &in_stream, &stream_index, ctx); ++ got_frame = flush_decoder(frame, &type, ctx); + if (got_frame) + break; + +@@ -1540,22 +1368,22 @@ transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) + // "used" will tell us how much of the packet was decoded. We may + // not get a frame because of insufficient input, in which case we loop to + // read another packet. +- if (in_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) +- used = avcodec_decode_audio4(in_stream->codec, frame, &got_frame, &packet); ++ if (type == AVMEDIA_TYPE_AUDIO) ++ used = avcodec_decode_audio4(ctx->audio_stream.codec, frame, &got_frame, &packet); + else +- used = avcodec_decode_video2(in_stream->codec, frame, &got_frame, &packet); ++ used = avcodec_decode_video2(ctx->video_stream.codec, frame, &got_frame, &packet); + + // decoder returned an error, but maybe the packet was just a bad apple, + // so let's try MAX_BAD_PACKETS times before giving up + if (used < 0) + { +- DPRINTF(E_DBG, L_XCODE, "Couldn't decode packet\n"); ++ DPRINTF(E_DBG, L_XCODE, "Couldn't decode packet: %s\n", err2str(used)); + + retry += 1; + if (retry < MAX_BAD_PACKETS) + continue; + +- DPRINTF(E_LOG, L_XCODE, "Couldn't decode packet after %i retries\n", MAX_BAD_PACKETS); ++ DPRINTF(E_LOG, L_XCODE, "Couldn't decode packet after %i retries: %s\n", MAX_BAD_PACKETS, err2str(used)); + + av_frame_free(&frame); + return -1; +@@ -1587,7 +1415,7 @@ transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) + } + + (*decoded)->frame = frame; +- (*decoded)->stream_index = stream_index; ++ (*decoded)->type = type; + } + + return got_frame; +@@ -1597,24 +1425,27 @@ transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) + int + transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct encode_ctx *ctx) + { +- int stream_index; ++ struct stream_ctx *s; + int encoded_length; + int ret; + + encoded_length = 0; + +- stream_index = ctx->out_stream_map[decoded->stream_index]; +- if (stream_index < 0) ++ if (decoded->type == AVMEDIA_TYPE_AUDIO) ++ s = &ctx->audio_stream; ++ else if (decoded->type == AVMEDIA_TYPE_VIDEO) ++ s = &ctx->video_stream; ++ else + return -1; + +- if (ctx->wavhdr) ++ if (ctx->settings.wavheader) + { + encoded_length += sizeof(ctx->header); + evbuffer_add(evbuf, ctx->header, sizeof(ctx->header)); +- ctx->wavhdr = 0; ++ ctx->settings.wavheader = 0; + } + +- ret = filter_encode_write_frame(ctx, decoded->frame, stream_index); ++ ret = filter_encode_write_frame(ctx, s, decoded->frame); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error occurred: %s\n", err2str(ret)); +@@ -1680,8 +1511,8 @@ transcode_raw2frame(uint8_t *data, size_t size) + return NULL; + } + +- decoded->stream_index = 0; +- decoded->frame = frame; ++ decoded->type = AVMEDIA_TYPE_AUDIO; ++ decoded->frame = frame; + + frame->nb_samples = size / 4; + frame->format = AV_SAMPLE_FMT_S16; +@@ -1704,91 +1535,82 @@ transcode_raw2frame(uint8_t *data, size_t size) + } + + +-/* TODO remux this frame without reencoding +- av_packet_rescale_ts(&packet, in_stream->time_base, out_stream->time_base); +- +- ret = av_interleaved_write_frame(ctx->ofmt_ctx, &packet); +- if (ret < 0) +- goto end;*/ +- +- + /* Seeking */ + + int + transcode_seek(struct transcode_ctx *ctx, int ms) + { +- struct decode_ctx *decode_ctx; +- AVStream *in_stream; ++ struct decode_ctx *dec_ctx = ctx->decode_ctx; ++ struct stream_ctx *s; + int64_t start_time; + int64_t target_pts; + int64_t got_pts; + int got_ms; + int ret; +- int i; + +- decode_ctx = ctx->decode_ctx; +- in_stream = ctx->decode_ctx->audio_stream; +- start_time = in_stream->start_time; ++ s = &dec_ctx->audio_stream; ++ if (!s->stream) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Could not seek in non-audio input\n"); ++ return -1; ++ } ++ ++ start_time = s->stream->start_time; + + target_pts = ms; + target_pts = target_pts * AV_TIME_BASE / 1000; +- target_pts = av_rescale_q(target_pts, AV_TIME_BASE_Q, in_stream->time_base); ++ target_pts = av_rescale_q(target_pts, AV_TIME_BASE_Q, s->stream->time_base); + + if ((start_time != AV_NOPTS_VALUE) && (start_time > 0)) + target_pts += start_time; + +- ret = av_seek_frame(decode_ctx->ifmt_ctx, in_stream->index, target_pts, AVSEEK_FLAG_BACKWARD); ++ ret = av_seek_frame(dec_ctx->ifmt_ctx, s->stream->index, target_pts, AVSEEK_FLAG_BACKWARD); + if (ret < 0) + { + DPRINTF(E_WARN, L_XCODE, "Could not seek into stream: %s\n", err2str(ret)); + return -1; + } + +- for (i = 0; i < decode_ctx->ifmt_ctx->nb_streams; i++) +- { +- if (decode_stream(decode_ctx, decode_ctx->ifmt_ctx->streams[i])) +- avcodec_flush_buffers(decode_ctx->ifmt_ctx->streams[i]->codec); +-// avcodec_flush_buffers(ctx->ofmt_ctx->streams[stream_nb]->codec); +- } ++ avcodec_flush_buffers(s->codec); + + // Fast forward until first packet with a timestamp is found +- in_stream->codec->skip_frame = AVDISCARD_NONREF; ++ s->codec->skip_frame = AVDISCARD_NONREF; + while (1) + { +- av_packet_unref(&decode_ctx->packet); ++ av_packet_unref(&dec_ctx->packet); + +- decode_ctx->timestamp = av_gettime(); ++ dec_ctx->timestamp = av_gettime(); + +- ret = av_read_frame(decode_ctx->ifmt_ctx, &decode_ctx->packet); ++ ret = av_read_frame(dec_ctx->ifmt_ctx, &dec_ctx->packet); + if (ret < 0) + { + DPRINTF(E_WARN, L_XCODE, "Could not read more data while seeking: %s\n", err2str(ret)); +- in_stream->codec->skip_frame = AVDISCARD_DEFAULT; ++ s->codec->skip_frame = AVDISCARD_DEFAULT; + return -1; + } + +- if (decode_ctx->packet.stream_index != in_stream->index) ++ if (stream_find(dec_ctx, dec_ctx->packet.stream_index) == AVMEDIA_TYPE_UNKNOWN) + continue; + + // Need a pts to return the real position +- if (decode_ctx->packet.pts == AV_NOPTS_VALUE) ++ if (dec_ctx->packet.pts == AV_NOPTS_VALUE) + continue; + + break; + } +- in_stream->codec->skip_frame = AVDISCARD_DEFAULT; ++ s->codec->skip_frame = AVDISCARD_DEFAULT; + + // Tell transcode_decode() to resume with ctx->packet +- decode_ctx->resume = 1; +- decode_ctx->resume_offset = 0; ++ dec_ctx->resume = 1; ++ dec_ctx->resume_offset = 0; + + // Compute position in ms from pts +- got_pts = decode_ctx->packet.pts; ++ got_pts = dec_ctx->packet.pts; + + if ((start_time != AV_NOPTS_VALUE) && (start_time > 0)) + got_pts -= start_time; + +- got_pts = av_rescale_q(got_pts, in_stream->time_base, AV_TIME_BASE_Q); ++ got_pts = av_rescale_q(got_pts, s->stream->time_base, AV_TIME_BASE_Q); + got_ms = got_pts / (AV_TIME_BASE / 1000); + + // Since negative return would mean error, we disallow it here +diff --git a/src/transcode.h b/src/transcode.h +index 3a8614f0..c6d60cd7 100644 +--- a/src/transcode.h ++++ b/src/transcode.h +@@ -6,19 +6,17 @@ + #include "db.h" + #include "http.h" + +-#define XCODE_WAVHEADER (1 << 14) +-#define XCODE_HAS_VIDEO (1 << 15) +- + enum transcode_profile + { +- // Transcodes the best available audio stream into PCM16 (does not add wav header) +- XCODE_PCM16_NOHEADER = 1, +- // Transcodes the best available audio stream into PCM16 (with wav header) +- XCODE_PCM16_HEADER = XCODE_WAVHEADER | 2, +- // Transcodes the best available audio stream into MP3 +- XCODE_MP3 = 3, +- // Transcodes video + audio + subtitle streams (not tested - for future use) +- XCODE_H264_AAC = XCODE_HAS_VIDEO | 4, ++ // Transcodes the best audio stream into PCM16 (does not add wav header) ++ XCODE_PCM16_NOHEADER, ++ // Transcodes the best audio stream into PCM16 (with wav header) ++ XCODE_PCM16_HEADER, ++ // Transcodes the best audio stream into MP3 ++ XCODE_MP3, ++ // Transcodes the best video stream into JPEG/PNG ++ XCODE_JPEG, ++ XCODE_PNG, + }; + + struct decode_ctx; +@@ -28,13 +26,13 @@ struct decoded_frame; + + // Setting up + struct decode_ctx * +-transcode_decode_setup(enum data_kind data_kind, const char *path, uint32_t song_length, int decode_video); ++transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length); + + struct encode_ctx * +-transcode_encode_setup(struct decode_ctx *src_ctx, enum transcode_profile profile, off_t *est_size); ++transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size); + + struct transcode_ctx * +-transcode_setup(enum data_kind data_kind, const char *path, uint32_t song_length, enum transcode_profile profile, off_t *est_size); ++transcode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length, off_t *est_size); + + struct decode_ctx * + transcode_decode_setup_raw(void); diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-2.patch forked-daapd-25.0/debian/patches/ffmpeg4-2.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-2.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-2.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,38 @@ +commit 7c8eba74bbdaa6a202cf9f5c44face5f5b0bbea5 +Author: ejurgensen +Date: Sun Feb 26 15:40:37 2017 +0100 + + [transcode] Remove unused param to open_filter() + +diff --git a/src/transcode.c b/src/transcode.c +index 29d4b9a4..6b7dc00f 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -870,7 +870,7 @@ close_output(struct encode_ctx *ctx) + } + + static int +-open_filter(struct stream_ctx *out_stream, struct stream_ctx *in_stream, const char *filter_spec) ++open_filter(struct stream_ctx *out_stream, struct stream_ctx *in_stream) + { + AVFilter *buffersrc; + AVFilter *format; +@@ -1029,16 +1029,14 @@ open_filters(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + + if (ctx->settings.encode_audio) + { +- // anull is a passthrough (dummy) filter for audio +- ret = open_filter(&ctx->audio_stream, &src_ctx->audio_stream, "anull"); ++ ret = open_filter(&ctx->audio_stream, &src_ctx->audio_stream); + if (ret < 0) + goto out_fail; + } + + if (ctx->settings.encode_video) + { +- // null is a passthrough (dummy) filter for video +- ret = open_filter(&ctx->video_stream, &src_ctx->video_stream, "null"); ++ ret = open_filter(&ctx->video_stream, &src_ctx->video_stream); + if (ret < 0) + goto out_fail; + } diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-3.patch forked-daapd-25.0/debian/patches/ffmpeg4-3.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-3.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-3.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,334 @@ +commit 5afed60a42fef25693cd678c20b62343cd978394 +Author: ejurgensen +Date: Sun Feb 26 17:50:04 2017 +0100 + + [transcode] Implement new ffmpeg encoding methods: avcodec_send_frame/avcodec_receive_packet + +diff --git a/src/transcode.c b/src/transcode.c +index 6b7dc00f..a587081a 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -93,6 +93,10 @@ struct stream_ctx + AVFilterContext *buffersink_ctx; + AVFilterContext *buffersrc_ctx; + AVFilterGraph *filter_graph; ++ ++ // Used for seeking ++ int64_t prev_pts; ++ int64_t offset_pts; + }; + + struct decode_ctx +@@ -139,9 +143,11 @@ struct encode_ctx + // The ffmpeg muxer writes to this buffer using the avio_evbuffer interface + struct evbuffer *obuf; + +- // Used for seeking +- int64_t prev_pts[MAX_STREAMS]; +- int64_t offset_pts[MAX_STREAMS]; ++ // Contains the most recent packet from av_buffersink_get_frame() ++ AVFrame *filt_frame; ++ ++ // Contains the most recent packet from avcodec_receive_packet() ++ AVPacket *encoded_pkt; + + // How many output bytes we have processed in total + off_t total_bytes; +@@ -457,120 +463,67 @@ read_packet(AVPacket *packet, enum AVMediaType *type, struct decode_ctx *ctx) + return 0; + } + +-static int +-encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *filt_frame, int *got_frame) ++// Prepares a packet from the encoder for muxing ++static void ++packet_prepare(AVPacket *pkt, struct stream_ctx *s) + { +- AVPacket enc_pkt; +- unsigned int stream_index; +- int ret; +- int got_frame_local; +- +- if (!got_frame) +- got_frame = &got_frame_local; +- +- stream_index = s->stream->index; +- +- // Encode filtered frame +- enc_pkt.data = NULL; +- enc_pkt.size = 0; +- av_init_packet(&enc_pkt); +- +- if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) +- ret = avcodec_encode_audio2(s->codec, &enc_pkt, filt_frame, got_frame); +- else if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) +- ret = avcodec_encode_video2(s->codec, &enc_pkt, filt_frame, got_frame); +- else +- return -1; +- +- if (ret < 0) +- return -1; +- if (!(*got_frame)) +- return 0; +- +- // Prepare packet for muxing +- enc_pkt.stream_index = stream_index; ++ pkt->stream_index = s->stream->index; + +- // This "wonderful" peace of code makes sure that the timestamp never decreases, +- // even if the user seeked backwards. The muxer will not accept decreasing +- // timestamps +- enc_pkt.pts += ctx->offset_pts[stream_index]; +- if (enc_pkt.pts < ctx->prev_pts[stream_index]) ++ // This "wonderful" peace of code makes sure that the timestamp always increases, ++ // even if the user seeked backwards. The muxer will not accept non-increasing ++ // timestamps. ++ pkt->pts += s->offset_pts; ++ if (pkt->pts < s->prev_pts) + { +- ctx->offset_pts[stream_index] += ctx->prev_pts[stream_index] - enc_pkt.pts; +- enc_pkt.pts = ctx->prev_pts[stream_index]; ++ s->offset_pts += s->prev_pts - pkt->pts; ++ pkt->pts = s->prev_pts; + } +- ctx->prev_pts[stream_index] = enc_pkt.pts; +- enc_pkt.dts = enc_pkt.pts; //FIXME ++ s->prev_pts = pkt->pts; ++ pkt->dts = pkt->pts; //FIXME + +- av_packet_rescale_ts(&enc_pkt, s->codec->time_base, s->stream->time_base); +- +- // Mux encoded frame +- ret = av_interleaved_write_frame(ctx->ofmt_ctx, &enc_pkt); +- return ret; ++ av_packet_rescale_ts(pkt, s->codec->time_base, s->stream->time_base); + } + +-#if HAVE_DECL_AV_BUFFERSRC_ADD_FRAME_FLAGS && HAVE_DECL_AV_BUFFERSINK_GET_FRAME + static int +-filter_encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *frame) ++encode_write(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *filt_frame) + { +- AVFrame *filt_frame; + int ret; + +- // Push the decoded frame into the filtergraph +- if (frame) +- { +- ret = av_buffersrc_add_frame_flags(s->buffersrc_ctx, frame, 0); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Error while feeding the filtergraph: %s\n", err2str(ret)); +- return -1; +- } +- } ++ // If filt_frame is null then flushing will be initiated by the codec ++ ret = avcodec_send_frame(s->codec, filt_frame); ++ if (ret < 0) ++ return ret; + +- // Pull filtered frames from the filtergraph + while (1) + { +- filt_frame = av_frame_alloc(); +- if (!filt_frame) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for filt_frame\n"); +- return -1; +- } +- +- ret = av_buffersink_get_frame(s->buffersink_ctx, filt_frame); ++ ret = avcodec_receive_packet(s->codec, ctx->encoded_pkt); + if (ret < 0) + { +- /* if no more frames for output - returns AVERROR(EAGAIN) +- * if flushed and no more frames for output - returns AVERROR_EOF +- * rewrite retcode to 0 to show it as normal procedure completion +- */ +- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) ++ if (ret == AVERROR(EAGAIN)) + ret = 0; +- av_frame_free(&filt_frame); ++ + break; + } + +- filt_frame->pict_type = AV_PICTURE_TYPE_NONE; +- ret = encode_write_frame(ctx, s, filt_frame, NULL); +- av_frame_free(&filt_frame); ++ packet_prepare(ctx->encoded_pkt, s); ++ ++ ret = av_interleaved_write_frame(ctx->ofmt_ctx, ctx->encoded_pkt); + if (ret < 0) + break; + } + + return ret; + } +-#else ++ + static int +-filter_encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *frame) ++filter_encode_write(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *frame) + { +- AVFilterBufferRef *picref; +- AVFrame *filt_frame; + int ret; + + // Push the decoded frame into the filtergraph + if (frame) + { +- ret = av_buffersrc_write_frame(s->buffersrc_ctx, frame); ++ ret = av_buffersrc_add_frame(s->buffersrc_ctx, frame); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error while feeding the filtergraph: %s\n", err2str(ret)); +@@ -578,44 +531,28 @@ filter_encode_write_frame(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame + } + } + +- // Pull filtered frames from the filtergraph ++ // Pull filtered frames from the filtergraph and pass to encoder + while (1) + { +- filt_frame = av_frame_alloc(); +- if (!filt_frame) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for filt_frame\n"); +- return -1; +- } +- +- if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO && !(s->codec->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) +- ret = av_buffersink_read_samples(s->buffersink_ctx, &picref, s->codec->frame_size); +- else +- ret = av_buffersink_read(s->buffersink_ctx, &picref); +- ++ ret = av_buffersink_get_frame(s->buffersink_ctx, ctx->filt_frame); + if (ret < 0) + { +- /* if no more frames for output - returns AVERROR(EAGAIN) +- * if flushed and no more frames for output - returns AVERROR_EOF +- * rewrite retcode to 0 to show it as normal procedure completion +- */ +- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) ++ if (!frame) // We are flushing ++ ret = encode_write(ctx, s, NULL); ++ else if (ret == AVERROR(EAGAIN)) + ret = 0; +- av_frame_free(&filt_frame); ++ + break; + } + +- avfilter_copy_buf_props(filt_frame, picref); +- ret = encode_write_frame(ctx, s, filt_frame, NULL); +- av_frame_free(&filt_frame); +- avfilter_unref_buffer(picref); ++ ret = encode_write(ctx, s, ctx->filt_frame); ++ av_frame_unref(ctx->filt_frame); + if (ret < 0) + break; + } + + return ret; + } +-#endif + + /* Will step through each stream and feed the stream decoder with empty packets + * to see if the decoder has more frames lined up. Will return non-zero if a +@@ -647,24 +584,6 @@ flush_decoder(AVFrame *frame, enum AVMediaType *type, struct decode_ctx *ctx) + return got_frame; + } + +-static void +-flush_encoder(struct encode_ctx *ctx, struct stream_ctx *s) +-{ +- int ret; +- int got_frame; +- +- DPRINTF(E_DBG, L_XCODE, "Flushing output stream #%u encoder\n", s->stream->index); +- +- if (!(s->codec->codec->capabilities & CODEC_CAP_DELAY)) +- return; +- +- do +- { +- ret = encode_write_frame(ctx, s, NULL, &got_frame); +- } +- while ((ret == 0) && got_frame); +-} +- + + /* --------------------------- INPUT/OUTPUT INIT --------------------------- */ + +@@ -1069,6 +988,8 @@ transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, + + CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct decode_ctx))); + ++ av_init_packet(&ctx->packet); ++ + ctx->duration = song_length; + ctx->data_kind = data_kind; + +@@ -1078,8 +999,6 @@ transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, + return NULL; + } + +- av_init_packet(&ctx->packet); +- + return ctx; + } + +@@ -1089,6 +1008,8 @@ transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ct + struct encode_ctx *ctx; + + CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct encode_ctx))); ++ CHECK_NULL(L_XCODE, ctx->filt_frame = av_frame_alloc()); ++ CHECK_NULL(L_XCODE, ctx->encoded_pkt = av_packet_alloc()); + + if ((init_settings(&ctx->settings, profile) < 0) || (open_output(ctx, src_ctx) < 0)) + { +@@ -1281,24 +1202,24 @@ transcode_decode_cleanup(struct decode_ctx *ctx) + void + transcode_encode_cleanup(struct encode_ctx *ctx) + { ++ // Flush audio encoder + if (ctx->audio_stream.stream) +- { +- if (ctx->audio_stream.filter_graph) +- filter_encode_write_frame(ctx, &ctx->audio_stream, NULL); +- flush_encoder(ctx, &ctx->audio_stream); +- } ++ filter_encode_write(ctx, &ctx->audio_stream, NULL); + ++ // Flush video encoder + if (ctx->video_stream.stream) +- { +- if (ctx->video_stream.filter_graph) +- filter_encode_write_frame(ctx, &ctx->video_stream, NULL); +- flush_encoder(ctx, &ctx->video_stream); +- } ++ filter_encode_write(ctx, &ctx->video_stream, NULL); ++ ++ // Flush muxer ++ av_interleaved_write_frame(ctx->ofmt_ctx, NULL); + + av_write_trailer(ctx->ofmt_ctx); + + close_filters(ctx); + close_output(ctx); ++ ++ av_packet_free(&ctx->encoded_pkt); ++ av_frame_free(&ctx->filt_frame); + free(ctx); + } + +@@ -1443,7 +1364,7 @@ transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct e + ctx->settings.wavheader = 0; + } + +- ret = filter_encode_write_frame(ctx, s, decoded->frame); ++ ret = filter_encode_write(ctx, s, decoded->frame); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error occurred: %s\n", err2str(ret)); diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-4.patch forked-daapd-25.0/debian/patches/ffmpeg4-4.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-4.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-4.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,830 @@ +Modified by Peter Michael Green to fix hunks that would not apply. + +commit e96b9500dbec61236317567db4834dc095db1c2b +Author: ejurgensen +Date: Sun Feb 26 23:41:30 2017 +0100 + + [transcode] Implement new ffmpeg decoding methods: avcodec_send_packet/avcodec_receive_frame + +Index: forked-daapd-25.0.new/src/httpd.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/httpd.c ++++ forked-daapd-25.0.new/src/httpd.c +@@ -167,7 +167,7 @@ stream_end(struct stream_ctx *st, int fa + event_free(st->ev); + + if (st->xcode) +- transcode_cleanup(st->xcode); ++ transcode_cleanup(&st->xcode); + else + { + free(st->buf); +@@ -750,7 +750,7 @@ httpd_stream_file(struct evhttp_request + if (st->evbuf) + evbuffer_free(st->evbuf); + if (st->xcode) +- transcode_cleanup(st->xcode); ++ transcode_cleanup(&st->xcode); + if (st->buf) + free(st->buf); + if (st->fd > 0) +Index: forked-daapd-25.0.new/src/httpd_streaming.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/httpd_streaming.c ++++ forked-daapd-25.0.new/src/httpd_streaming.c +@@ -300,7 +300,7 @@ streaming_init(void) + } + + streaming_encode_ctx = transcode_encode_setup(XCODE_MP3, decode_ctx, NULL); +- transcode_decode_cleanup(decode_ctx); ++ transcode_decode_cleanup(&decode_ctx); + if (!streaming_encode_ctx) + { + DPRINTF(E_LOG, L_STREAMING, "Will not be able to stream mp3, libav does not support mp3 encoding\n"); +@@ -399,7 +399,7 @@ streaming_init(void) + close(streaming_pipe[0]); + close(streaming_pipe[1]); + pipe_fail: +- transcode_encode_cleanup(streaming_encode_ctx); ++ transcode_encode_cleanup(&streaming_encode_ctx); + + return -1; + } +@@ -432,7 +432,7 @@ streaming_deinit(void) + close(streaming_pipe[0]); + close(streaming_pipe[1]); + +- transcode_encode_cleanup(streaming_encode_ctx); ++ transcode_encode_cleanup(&streaming_encode_ctx); + evbuffer_free(streaming_encoded_data); + free(streaming_silence_data); + } +Index: forked-daapd-25.0.new/src/input.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/input.c ++++ forked-daapd-25.0.new/src/input.c +@@ -483,7 +483,7 @@ input_flush(short *flags) + pthread_mutex_unlock(&input_buffer.mutex); + + #ifdef DEBUG +- DPRINTF(E_DBG, L_PLAYER, "Flush with flags %d\n", *flags); ++ DPRINTF(E_DBG, L_PLAYER, "Flushing %zu bytes with flags %d\n", len, *flags); + #endif + } + +Index: forked-daapd-25.0.new/src/inputs/file_http.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/inputs/file_http.c ++++ forked-daapd-25.0.new/src/inputs/file_http.c +@@ -90,7 +90,9 @@ start(struct player_source *ps) + static int + stop(struct player_source *ps) + { +- transcode_cleanup(ps->input_ctx); ++ struct transcode_ctx *ctx = ps->input_ctx; ++ ++ transcode_cleanup(&ctx); + + ps->input_ctx = NULL; + ps->setup_done = 0; +Index: forked-daapd-25.0.new/src/transcode.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.c ++++ forked-daapd-25.0.new/src/transcode.c +@@ -117,12 +117,14 @@ struct decode_ctx + // Data kind (used to determine if ICY metadata is relevant to look for) + enum data_kind data_kind; + +- // Contains the most recent packet from av_read_frame +- // Used for resuming after seek and for freeing correctly +- // in transcode_decode() +- AVPacket packet; +- int resume; +- int resume_offset; ++ // Set to true if we just seeked ++ bool resume; ++ ++ // Contains the most recent packet from av_read_frame() ++ AVPacket *packet; ++ ++ // Contains the most recent frame from avcodec_receive_frame() ++ AVFrame *decoded_frame; + + // Used to measure if av_read_frame is taking too long + int64_t timestamp; +@@ -164,6 +166,8 @@ struct transcode_ctx + { + struct decode_ctx *decode_ctx; + struct encode_ctx *encode_ctx; ++ ++ bool eof; + }; + + struct decoded_frame +@@ -407,56 +411,42 @@ static int decode_interrupt_cb(void *arg + return 0; + } + +-/* Will read the next packet from the source, unless we are in resume mode, in +- * which case the most recent packet will be returned, but with an adjusted data +- * pointer. Use ctx->resume and ctx->resume_offset to make the function resume +- * from the most recent packet. ++/* Will read the next packet from the source, unless we are resuming after a ++ * seek in which case the most recent packet found by transcode_seek() will be ++ * returned. The packet will be put in ctx->packet. + * +- * @out packet Pointer to an already allocated AVPacket. The content of the +- * packet will be updated, and packet->data is pointed to the data +- * returned by av_read_frame(). The packet struct is owned by the +- * caller, but *not* packet->data, so don't free the packet with +- * av_free_packet()/av_packet_unref() + * @out type Media type of packet + * @in ctx Decode context + * @return 0 if OK, < 0 on error or end of file + */ + static int +-read_packet(AVPacket *packet, enum AVMediaType *type, struct decode_ctx *ctx) ++read_packet(enum AVMediaType *type, struct decode_ctx *dec_ctx) + { + int ret; + ++ // We just seeked, so transcode_seek() will have found a new ctx->packet and ++ // we should just use start with that (if the stream is one are ok with) ++ if (dec_ctx->resume) ++ { ++ dec_ctx->resume = 0; ++ *type = stream_find(dec_ctx, dec_ctx->packet->stream_index); ++ if (*type != AVMEDIA_TYPE_UNKNOWN) ++ return 0; ++ } ++ + do + { +- if (ctx->resume) +- { +- // Copies packet struct, but not actual packet payload, and adjusts +- // data pointer to somewhere inside the payload if resume_offset is set +- *packet = ctx->packet; +- packet->data += ctx->resume_offset; +- packet->size -= ctx->resume_offset; +- ctx->resume = 0; +- } +- else +- { +- // We are going to read a new packet from source, so now it is safe to +- // discard the previous packet and reset resume_offset +- av_packet_unref(&ctx->packet); +- +- ctx->resume_offset = 0; +- ctx->timestamp = av_gettime(); +- +- ret = av_read_frame(ctx->ifmt_ctx, &ctx->packet); +- if (ret < 0) +- { +- DPRINTF(E_WARN, L_XCODE, "Could not read frame: %s\n", err2str(ret)); +- return ret; +- } ++ dec_ctx->timestamp = av_gettime(); + +- *packet = ctx->packet; ++ av_packet_unref(dec_ctx->packet); ++ ret = av_read_frame(dec_ctx->ifmt_ctx, dec_ctx->packet); ++ if (ret < 0) ++ { ++ DPRINTF(E_WARN, L_XCODE, "Could not read frame: %s\n", err2str(ret)); ++ return ret; + } + +- *type = stream_find(ctx, packet->stream_index); ++ *type = stream_find(dec_ctx, dec_ctx->packet->stream_index); + } + while (*type == AVMEDIA_TYPE_UNKNOWN); + +@@ -484,6 +474,10 @@ packet_prepare(AVPacket *pkt, struct str + av_packet_rescale_ts(pkt, s->codec->time_base, s->stream->time_base); + } + ++/* ++ * Part 4 of the conversion chain: read -> decode -> filter -> encode -> write ++ * ++ */ + static int + encode_write(struct encode_ctx *ctx, struct stream_ctx *s, AVFrame *filt_frame) + { +@@ -554,39 +548,104 @@ filter_encode_write(struct encode_ctx *c + return ret; + } + +-/* Will step through each stream and feed the stream decoder with empty packets +- * to see if the decoder has more frames lined up. Will return non-zero if a +- * frame is found. Should be called until it stops returning anything. ++/* ++ * Part 2 of the conversion chain: read -> decode -> filter -> encode -> write ++ * ++ * If there is no encode_ctx the chain will aborted here + * +- * @out frame AVFrame if there was anything to flush, otherwise undefined +- * @out stream Set to the AVStream where a decoder returned a frame +- * @in ctx Decode context +- * @return Non-zero (true) if frame found, otherwise 0 (false) + */ + static int +-flush_decoder(AVFrame *frame, enum AVMediaType *type, struct decode_ctx *ctx) ++decode_filter_encode_write(struct transcode_ctx *ctx, struct stream_ctx *s, AVPacket *pkt, enum AVMediaType type) + { +- AVPacket dummypacket = { 0 }; +- int got_frame = 0; ++ struct decode_ctx *dec_ctx = ctx->decode_ctx; ++ struct stream_ctx *out_stream = NULL; ++ int ret; ++ ++ ret = avcodec_send_packet(s->codec, pkt); ++ if (ret < 0) ++ return ret; + +- if (ctx->audio_stream.codec) ++ if (ctx->encode_ctx) + { +- *type = AVMEDIA_TYPE_AUDIO; +- avcodec_decode_audio4(ctx->audio_stream.codec, frame, &got_frame, &dummypacket); ++ if (type == AVMEDIA_TYPE_AUDIO) ++ out_stream = &ctx->encode_ctx->audio_stream; ++ else if (type == AVMEDIA_TYPE_VIDEO) ++ out_stream = &ctx->encode_ctx->video_stream; ++ else ++ return -1; ++ + } + +- if (!got_frame && ctx->video_stream.codec) ++ while (1) + { +- *type = AVMEDIA_TYPE_VIDEO; +- avcodec_decode_video2(ctx->video_stream.codec, frame, &got_frame, &dummypacket); ++ ret = avcodec_receive_frame(s->codec, dec_ctx->decoded_frame); ++ if (ret < 0) ++ { ++ if (ret == AVERROR(EAGAIN)) ++ ret = 0; ++ else if (out_stream) ++ ret = filter_encode_write(ctx->encode_ctx, out_stream, NULL); // Flush ++ ++ break; ++ } ++ ++ if (!out_stream) ++ break; ++ ++ ret = filter_encode_write(ctx->encode_ctx, out_stream, dec_ctx->decoded_frame); ++ if (ret < 0) ++ break; + } + +- return got_frame; ++ return ret; ++} ++ ++/* ++ * Part 1 of the conversion chain: read -> decode -> filter -> encode -> write ++ * ++ * Will read exactly one packet from the input and put it in the chain. You ++ * cannot count on anything coming out of the other end from just one packet, ++ * so you probably should loop when calling this and check the contents of ++ * enc_ctx->obuf. ++ * ++ */ ++static int ++read_decode_filter_encode_write(struct transcode_ctx *ctx) ++{ ++ struct decode_ctx *dec_ctx = ctx->decode_ctx; ++ enum AVMediaType type; ++ int ret; ++ ++ ret = read_packet(&type, dec_ctx); ++ if (ret < 0) ++ { ++ DPRINTF(E_DBG, L_XCODE, "No more input, flushing codecs\n"); ++ ++ if (dec_ctx->audio_stream.stream) ++ decode_filter_encode_write(ctx, &dec_ctx->audio_stream, NULL, AVMEDIA_TYPE_AUDIO); ++ if (dec_ctx->video_stream.stream) ++ decode_filter_encode_write(ctx, &dec_ctx->video_stream, NULL, AVMEDIA_TYPE_VIDEO); ++ ++ return ret; ++ } ++ ++ if (type == AVMEDIA_TYPE_AUDIO) ++ ret = decode_filter_encode_write(ctx, &dec_ctx->audio_stream, dec_ctx->packet, type); ++ else if (type == AVMEDIA_TYPE_VIDEO) ++ ret = decode_filter_encode_write(ctx, &dec_ctx->video_stream, dec_ctx->packet, type); ++ ++ return ret; + } + + + /* --------------------------- INPUT/OUTPUT INIT --------------------------- */ + ++/* ++ * Part 3 of the conversion chain: read -> decode -> filter -> encode -> write ++ * ++ * transcode_encode() starts here since the caller already has a frame ++ * ++ */ + static int + open_input(struct decode_ctx *ctx, const char *path) + { +@@ -762,6 +821,11 @@ open_output(struct encode_ctx *ctx, stru + goto out_free_streams; + } + ++ if (ctx->settings.wavheader) ++ { ++ evbuffer_add(ctx->obuf, ctx->header, sizeof(ctx->header)); ++ } ++ + return 0; + + out_free_streams: +@@ -988,18 +1052,22 @@ transcode_decode_setup(enum transcode_pr + + CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct decode_ctx))); + +- av_init_packet(&ctx->packet); ++ CHECK_NULL(L_XCODE, ctx->decoded_frame = av_frame_alloc()); ++ CHECK_NULL(L_XCODE, ctx->packet = av_packet_alloc()); + + ctx->duration = song_length; + ctx->data_kind = data_kind; + + if ((init_settings(&ctx->settings, profile) < 0) || (open_input(ctx, path) < 0)) +- { +- free(ctx); +- return NULL; +- } ++ goto fail_free; + + return ctx; ++ ++ fail_free: ++ av_packet_free(&ctx->packet); ++ av_frame_free(&ctx->decoded_frame); ++ free(ctx); ++ return NULL; + } + + struct encode_ctx * +@@ -1011,26 +1079,30 @@ transcode_encode_setup(enum transcode_pr + CHECK_NULL(L_XCODE, ctx->filt_frame = av_frame_alloc()); + CHECK_NULL(L_XCODE, ctx->encoded_pkt = av_packet_alloc()); + +- if ((init_settings(&ctx->settings, profile) < 0) || (open_output(ctx, src_ctx) < 0)) +- { +- free(ctx); +- return NULL; +- } ++ if (init_settings(&ctx->settings, profile) < 0) ++ goto fail_free; ++ ++ if (ctx->settings.wavheader) ++ make_wav_header(ctx, src_ctx, est_size); ++ ++ if (open_output(ctx, src_ctx) < 0) ++ goto fail_free; + + if (open_filters(ctx, src_ctx) < 0) +- { +- close_output(ctx); +- free(ctx); +- return NULL; +- } ++ goto fail_close; + + if (src_ctx->data_kind == DATA_KIND_HTTP) + ctx->icy_interval = METADATA_ICY_INTERVAL * ctx->settings.channels * ctx->settings.byte_depth * ctx->settings.sample_rate; + +- if (ctx->settings.wavheader) +- make_wav_header(ctx, src_ctx, est_size); +- + return ctx; ++ ++ fail_close: ++ close_output(ctx); ++ fail_free: ++ av_packet_free(&ctx->encoded_pkt); ++ av_frame_free(&ctx->filt_frame); ++ free(ctx); ++ return NULL; + } + + struct transcode_ctx * +@@ -1038,7 +1110,7 @@ transcode_setup(enum transcode_profile p + { + struct transcode_ctx *ctx; + +- CHECK_NULL(L_XCODE, ctx = malloc(sizeof(struct transcode_ctx))); ++ CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct transcode_ctx))); + + ctx->decode_ctx = transcode_decode_setup(profile, data_kind, path, song_length); + if (!ctx->decode_ctx) +@@ -1050,7 +1122,7 @@ transcode_setup(enum transcode_profile p + ctx->encode_ctx = transcode_encode_setup(profile, ctx->decode_ctx, est_size); + if (!ctx->encode_ctx) + { +- transcode_decode_cleanup(ctx->decode_ctx); ++ transcode_decode_cleanup(&ctx->decode_ctx); + free(ctx); + return NULL; + } +@@ -1192,43 +1264,54 @@ transcode_needed(const char *user_agent, + /* Cleanup */ + + void +-transcode_decode_cleanup(struct decode_ctx *ctx) ++transcode_decode_cleanup(struct decode_ctx **ctx) + { +- av_packet_unref(&ctx->packet); +- close_input(ctx); +- free(ctx); ++ if (!(*ctx)) ++ return; ++ ++ close_input(*ctx); ++ ++ av_packet_free(&(*ctx)->packet); ++ av_frame_free(&(*ctx)->decoded_frame); ++ free(*ctx); ++ *ctx = NULL; + } + + void +-transcode_encode_cleanup(struct encode_ctx *ctx) ++transcode_encode_cleanup(struct encode_ctx **ctx) + { ++ if (!*ctx) ++ return; ++ + // Flush audio encoder +- if (ctx->audio_stream.stream) +- filter_encode_write(ctx, &ctx->audio_stream, NULL); ++ if ((*ctx)->audio_stream.stream) ++ filter_encode_write(*ctx, &(*ctx)->audio_stream, NULL); + + // Flush video encoder +- if (ctx->video_stream.stream) +- filter_encode_write(ctx, &ctx->video_stream, NULL); ++ if ((*ctx)->video_stream.stream) ++ filter_encode_write(*ctx, &(*ctx)->video_stream, NULL); + +- // Flush muxer +- av_interleaved_write_frame(ctx->ofmt_ctx, NULL); ++ // Flush muxer ++ av_interleaved_write_frame((*ctx)->ofmt_ctx, NULL); + +- av_write_trailer(ctx->ofmt_ctx); ++ av_write_trailer((*ctx)->ofmt_ctx); + +- close_filters(ctx); +- close_output(ctx); ++ close_filters(*ctx); ++ close_output(*ctx); + +- av_packet_free(&ctx->encoded_pkt); +- av_frame_free(&ctx->filt_frame); +- free(ctx); ++ av_packet_free(&(*ctx)->encoded_pkt); ++ av_frame_free(&(*ctx)->filt_frame); ++ free(*ctx); ++ *ctx = NULL; + } + + void +-transcode_cleanup(struct transcode_ctx *ctx) ++transcode_cleanup(struct transcode_ctx **ctx) + { +- transcode_encode_cleanup(ctx->encode_ctx); +- transcode_decode_cleanup(ctx->decode_ctx); +- free(ctx); ++ transcode_encode_cleanup(&(*ctx)->encode_ctx); ++ transcode_decode_cleanup(&(*ctx)->decode_ctx); ++ free(*ctx); ++ *ctx = NULL; + } + + void +@@ -1241,103 +1324,11 @@ transcode_decoded_free(struct decoded_fr + + /* Encoding, decoding and transcoding */ + +- + int +-transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx) ++transcode_decode(struct decoded_frame **decoded, struct decode_ctx *dec_ctx) + { +- AVPacket packet; +- AVFrame *frame; +- enum AVMediaType type; +- int got_frame; +- int retry; +- int ret; +- int used; +- +- // Alloc the frame we will return on success +- frame = av_frame_alloc(); +- if (!frame) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decode frame\n"); +- +- return -1; +- } +- +- // Loop until we either fail or get a frame +- retry = 0; +- do +- { +- ret = read_packet(&packet, &type, ctx); +- if (ret < 0) +- { +- // Some decoders need to be flushed, meaning the decoder is to be called +- // with empty input until no more frames are returned +- DPRINTF(E_DBG, L_XCODE, "Could not read packet, will flush decoders\n"); +- +- got_frame = flush_decoder(frame, &type, ctx); +- if (got_frame) +- break; +- +- av_frame_free(&frame); +- if (ret == AVERROR_EOF) +- return 0; +- else +- return -1; +- } +- +- // "used" will tell us how much of the packet was decoded. We may +- // not get a frame because of insufficient input, in which case we loop to +- // read another packet. +- if (type == AVMEDIA_TYPE_AUDIO) +- used = avcodec_decode_audio4(ctx->audio_stream.codec, frame, &got_frame, &packet); +- else +- used = avcodec_decode_video2(ctx->video_stream.codec, frame, &got_frame, &packet); +- +- // decoder returned an error, but maybe the packet was just a bad apple, +- // so let's try MAX_BAD_PACKETS times before giving up +- if (used < 0) +- { +- DPRINTF(E_DBG, L_XCODE, "Couldn't decode packet: %s\n", err2str(used)); +- +- retry += 1; +- if (retry < MAX_BAD_PACKETS) +- continue; +- +- DPRINTF(E_LOG, L_XCODE, "Couldn't decode packet after %i retries: %s\n", MAX_BAD_PACKETS, err2str(used)); +- +- av_frame_free(&frame); +- return -1; +- } +- +- // decoder didn't process the entire packet, so flag a resume, meaning +- // that the next read_packet() will return this same packet, but where the +- // data pointer is adjusted with an offset +- if (used < packet.size) +- { +- DPRINTF(E_SPAM, L_XCODE, "Decoder did not finish packet, packet will be resumed\n"); +- +- ctx->resume_offset += used; +- ctx->resume = 1; +- } +- } +- while (!got_frame); +- +- if (got_frame > 0) +- { +- // Return the decoded frame and stream index +- *decoded = malloc(sizeof(struct decoded_frame)); +- if (!(*decoded)) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decoded result\n"); +- +- av_frame_free(&frame); +- return -1; +- } +- +- (*decoded)->frame = frame; +- (*decoded)->type = type; +- } +- +- return got_frame; ++ DPRINTF(E_LOG, L_XCODE, "Bug! Call to transcode_decode(), but the lazy programmer didn't implement it\n"); ++ return -1; + } + + // Filters and encodes +@@ -1345,10 +1336,10 @@ int + transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct encode_ctx *ctx) + { + struct stream_ctx *s; +- int encoded_length; ++ size_t start_length; + int ret; + +- encoded_length = 0; ++ start_length = evbuffer_get_length(ctx->obuf); + + if (decoded->type == AVMEDIA_TYPE_AUDIO) + s = &ctx->audio_stream; +@@ -1357,54 +1348,55 @@ transcode_encode(struct evbuffer *evbuf, + else + return -1; + +- if (ctx->settings.wavheader) +- { +- encoded_length += sizeof(ctx->header); +- evbuffer_add(evbuf, ctx->header, sizeof(ctx->header)); +- ctx->settings.wavheader = 0; +- } +- + ret = filter_encode_write(ctx, s, decoded->frame); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Error occurred: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Error occurred while encoding: %s\n", err2str(ret)); + return ret; + } + +- encoded_length += evbuffer_get_length(ctx->obuf); ++ ret = evbuffer_get_length(ctx->obuf) - start_length; ++ + evbuffer_add_buffer(evbuf, ctx->obuf); + +- return encoded_length; ++ return ret; + } + + int +-transcode(struct evbuffer *evbuf, int wanted, struct transcode_ctx *ctx, int *icy_timer) ++transcode(struct evbuffer *evbuf, int want_bytes, struct transcode_ctx *ctx, int *icy_timer) + { +- struct decoded_frame *decoded; +- int processed; ++ size_t start_length; ++ int processed = 0; + int ret; + + *icy_timer = 0; + +- processed = 0; +- while (processed < wanted) +- { +- ret = transcode_decode(&decoded, ctx->decode_ctx); +- if (ret <= 0) +- return ret; ++ if (ctx->eof) ++ return 0; + +- ret = transcode_encode(evbuf, decoded, ctx->encode_ctx); +- transcode_decoded_free(decoded); +- if (ret < 0) +- return -1; ++ start_length = evbuffer_get_length(ctx->encode_ctx->obuf); + +- processed += ret; +- } ++ do ++ { ++ ret = read_decode_filter_encode_write(ctx); ++ processed = evbuffer_get_length(ctx->encode_ctx->obuf) - start_length; ++ } ++ while ((ret == 0) && (!want_bytes || (processed < want_bytes))); ++ ++ evbuffer_add_buffer(evbuf, ctx->encode_ctx->obuf); + + ctx->encode_ctx->total_bytes += processed; + if (ctx->encode_ctx->icy_interval) + *icy_timer = (ctx->encode_ctx->total_bytes % ctx->encode_ctx->icy_interval < processed); + ++ if (ret == AVERROR_EOF) ++ { ++ ctx->eof = 1; ++ ret = 0; ++ } ++ else if (ret < 0) ++ return ret; ++ + return processed; + } + +@@ -1496,11 +1488,10 @@ transcode_seek(struct transcode_ctx *ctx + s->codec->skip_frame = AVDISCARD_NONREF; + while (1) + { +- av_packet_unref(&dec_ctx->packet); +- + dec_ctx->timestamp = av_gettime(); + +- ret = av_read_frame(dec_ctx->ifmt_ctx, &dec_ctx->packet); ++ av_packet_unref(dec_ctx->packet); ++ ret = av_read_frame(dec_ctx->ifmt_ctx, dec_ctx->packet); + if (ret < 0) + { + DPRINTF(E_WARN, L_XCODE, "Could not read more data while seeking: %s\n", err2str(ret)); +@@ -1508,23 +1499,22 @@ transcode_seek(struct transcode_ctx *ctx + return -1; + } + +- if (stream_find(dec_ctx, dec_ctx->packet.stream_index) == AVMEDIA_TYPE_UNKNOWN) ++ if (stream_find(dec_ctx, dec_ctx->packet->stream_index) == AVMEDIA_TYPE_UNKNOWN) + continue; + + // Need a pts to return the real position +- if (dec_ctx->packet.pts == AV_NOPTS_VALUE) ++ if (dec_ctx->packet->pts == AV_NOPTS_VALUE) + continue; + + break; + } + s->codec->skip_frame = AVDISCARD_DEFAULT; + +- // Tell transcode_decode() to resume with ctx->packet ++ // Tell read_packet() to resume with dec_ctx->packet + dec_ctx->resume = 1; +- dec_ctx->resume_offset = 0; + + // Compute position in ms from pts +- got_pts = dec_ctx->packet.pts; ++ got_pts = dec_ctx->packet->pts; + + if ((start_time != AV_NOPTS_VALUE) && (start_time > 0)) + got_pts -= start_time; +Index: forked-daapd-25.0.new/src/transcode.h +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.h ++++ forked-daapd-25.0.new/src/transcode.h +@@ -42,13 +42,13 @@ transcode_needed(const char *user_agent, + + // Cleaning up + void +-transcode_decode_cleanup(struct decode_ctx *ctx); ++transcode_decode_cleanup(struct decode_ctx **ctx); + + void +-transcode_encode_cleanup(struct encode_ctx *ctx); ++transcode_encode_cleanup(struct encode_ctx **ctx); + + void +-transcode_cleanup(struct transcode_ctx *ctx); ++transcode_cleanup(struct transcode_ctx **ctx); + + void + transcode_decoded_free(struct decoded_frame *decoded); +@@ -57,35 +57,36 @@ transcode_decoded_free(struct decoded_fr + + /* Demuxes and decodes the next packet from the input. + * +- * @out decoded A newly allocated struct with a pointer to the frame and the +- * stream. Must be freed with transcode_decoded_free(). +- * @in ctx Decode context +- * @return Positive if OK, negative if error, 0 if EOF ++ * @out decoded A newly allocated struct with a pointer to the frame and the ++ * stream. Must be freed with transcode_decoded_free(). ++ * @in ctx Decode context ++ * @return Positive if OK, negative if error, 0 if EOF + */ + int + transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx); + + /* Encodes and remuxes a frame. Also resamples if needed. + * +- * @out evbuf An evbuffer filled with remuxed data +- * @in frame The frame to encode, e.g. from transcode_decode +- * @in wanted Bytes that the caller wants processed +- * @in ctx Encode context +- * @return Length of evbuf if OK, negative if error ++ * @out evbuf An evbuffer filled with remuxed data ++ * @in frame The frame to encode, e.g. from transcode_decode ++ * @in ctx Encode context ++ * @return Bytes added if OK, negative if error + */ + int + transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct encode_ctx *ctx); + +-/* Demuxes, decodes, encodes and remuxes the next packet from the input. ++/* Demuxes, decodes, encodes and remuxes from the input. + * +- * @out evbuf An evbuffer filled with remuxed data +- * @in wanted Bytes that the caller wants processed +- * @in ctx Transcode context +- * @out icy_timer True if METADATA_ICY_INTERVAL has elapsed +- * @return Bytes processed if OK, negative if error, 0 if EOF ++ * @out evbuf An evbuffer filled with remuxed data ++ * @in want_bytes Minimum number of bytes the caller wants added to the evbuffer ++ * - set want_bytes to 0 to transcode everything until EOF/error ++ * - set want_bytes to 1 to get one encoded packet ++ * @in ctx Transcode context ++ * @out icy_timer True if METADATA_ICY_INTERVAL has elapsed ++ * @return Bytes added if OK, negative if error, 0 if EOF + */ + int +-transcode(struct evbuffer *evbuf, int wanted, struct transcode_ctx *ctx, int *icy_timer); ++transcode(struct evbuffer *evbuf, int want_bytes, struct transcode_ctx *ctx, int *icy_timer); + + struct decoded_frame * + transcode_raw2frame(uint8_t *data, size_t size); diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-5.patch forked-daapd-25.0/debian/patches/ffmpeg4-5.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-5.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-5.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,113 @@ +Modified by Peter Micheal Green to fix hunks that would not apply. + +commit 1e180b5ce8d0c1dc0a7324bc23e8b5359379b336 +Author: ejurgensen +Date: Mon Feb 27 20:42:07 2017 +0100 + + [transcode] Call av_write_trailer before cleanup so that any flushed data + will be written to encode_ctx->obuf, where it can be passed to the caller + +Index: forked-daapd-25.0.new/src/transcode.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.c ++++ forked-daapd-25.0.new/src/transcode.c +@@ -120,6 +120,9 @@ struct decode_ctx + // Set to true if we just seeked + bool resume; + ++ // Set to true if we have reached eof ++ bool eof; ++ + // Contains the most recent packet from av_read_frame() + AVPacket *packet; + +@@ -166,8 +169,6 @@ struct transcode_ctx + { + struct decode_ctx *decode_ctx; + struct encode_ctx *encode_ctx; +- +- bool eof; + }; + + struct decoded_frame +@@ -475,7 +476,7 @@ packet_prepare(AVPacket *pkt, struct str + } + + /* +- * Part 4 of the conversion chain: read -> decode -> filter -> encode -> write ++ * Part 4+5 of the conversion chain: read -> decode -> filter -> encode -> write + * + */ + static int +@@ -619,13 +620,21 @@ read_decode_filter_encode_write(struct t + ret = read_packet(&type, dec_ctx); + if (ret < 0) + { +- DPRINTF(E_DBG, L_XCODE, "No more input, flushing codecs\n"); ++ if (ret == AVERROR_EOF) ++ dec_ctx->eof = 1; + + if (dec_ctx->audio_stream.stream) + decode_filter_encode_write(ctx, &dec_ctx->audio_stream, NULL, AVMEDIA_TYPE_AUDIO); + if (dec_ctx->video_stream.stream) + decode_filter_encode_write(ctx, &dec_ctx->video_stream, NULL, AVMEDIA_TYPE_VIDEO); + ++ // Flush muxer ++ if (ctx->encode_ctx) ++ { ++ av_interleaved_write_frame(ctx->encode_ctx->ofmt_ctx, NULL); ++ av_write_trailer(ctx->encode_ctx->ofmt_ctx); ++ } ++ + return ret; + } + +@@ -849,6 +858,7 @@ close_output(struct encode_ctx *ctx) + + avio_evbuffer_close(ctx->ofmt_ctx->pb); + evbuffer_free(ctx->obuf); ++ + avformat_free_context(ctx->ofmt_ctx); + } + +@@ -1283,19 +1293,6 @@ transcode_encode_cleanup(struct encode_c + if (!*ctx) + return; + +- // Flush audio encoder +- if ((*ctx)->audio_stream.stream) +- filter_encode_write(*ctx, &(*ctx)->audio_stream, NULL); +- +- // Flush video encoder +- if ((*ctx)->video_stream.stream) +- filter_encode_write(*ctx, &(*ctx)->video_stream, NULL); +- +- // Flush muxer +- av_interleaved_write_frame((*ctx)->ofmt_ctx, NULL); +- +- av_write_trailer((*ctx)->ofmt_ctx); +- + close_filters(*ctx); + close_output(*ctx); + +@@ -1372,7 +1369,7 @@ transcode(struct evbuffer *evbuf, int wa + + *icy_timer = 0; + +- if (ctx->eof) ++ if (ctx->decode_ctx->eof) + return 0; + + start_length = evbuffer_get_length(ctx->encode_ctx->obuf); +@@ -1391,10 +1388,7 @@ transcode(struct evbuffer *evbuf, int wa + *icy_timer = (ctx->encode_ctx->total_bytes % ctx->encode_ctx->icy_interval < processed); + + if (ret == AVERROR_EOF) +- { +- ctx->eof = 1; +- ret = 0; +- } ++ ret = 0; + else if (ret < 0) + return ret; + diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-6.patch forked-daapd-25.0/debian/patches/ffmpeg4-6.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-6.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-6.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,1442 @@ +Modified by Peter Michael Green to fix hunks that would not apply. + +commit e7f888645fd6273941a7fe156e93ebb62413eaa8 +Author: ejurgensen +Date: Tue Feb 28 23:06:01 2017 +0100 + + [artwork/transcode] Adjust transcode.c so it can take care of artwork + rescaling, meaning we can do without parallel ffmpeg interfaces. + This also moves artwork rescaling from libswscale to libavfilter, which + seems to fix a problem with PNG rescaling. + +Index: forked-daapd-25.0.new/src/artwork.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/artwork.c ++++ forked-daapd-25.0.new/src/artwork.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (C) 2015-2016 Espen Jürgensen ++ * Copyright (C) 2015-2017 Espen Jürgensen + * Copyright (C) 2010-2011 Julien BLACHE + * + * This program is free software; you can redistribute it and/or modify +@@ -30,27 +30,20 @@ + #include + #include + +-#include +-#include +-#include +-#include +- + #include "db.h" + #include "misc.h" + #include "logger.h" + #include "conffile.h" + #include "cache.h" + #include "http.h" ++#include "transcode.h" + +-#include "avio_evbuffer.h" + #include "artwork.h" + + #ifdef HAVE_SPOTIFY_H + # include "spotify.h" + #endif + +-#include "ffmpeg-compat.h" +- + /* This artwork module will look for artwork by consulting a set of sources one + * at a time. A source is for instance the local library, the cache or a cover + * art database. For each source there is a handler function, which will do the +@@ -278,43 +271,42 @@ artwork_read(struct evbuffer *evbuf, cha + /* Will the source image fit inside requested size. If not, what size should it + * be rescaled to to maintain aspect ratio. + * +- * @in src Image source +- * @in max_w Requested width +- * @in max_h Requested height + * @out target_w Rescaled width + * @out target_h Rescaled height +- * @return 0 no rescaling needed, 1 rescaling needed ++ * @in width Actual width ++ * @in height Actual height ++ * @in max_w Requested width ++ * @in max_h Requested height ++ * @return -1 no rescaling needed, otherwise 0 + */ + static int +-rescale_needed(AVCodecContext *src, int max_w, int max_h, int *target_w, int *target_h) ++rescale_calculate(int *target_w, int *target_h, int width, int height, int max_w, int max_h) + { +- DPRINTF(E_DBG, L_ART, "Original image dimensions: w %d h %d\n", src->width, src->height); ++ DPRINTF(E_DBG, L_ART, "Original image dimensions: w %d h %d\n", width, height); + +- *target_w = src->width; +- *target_h = src->height; ++ *target_w = width; ++ *target_h = height; + +- if ((src->width == 0) || (src->height == 0)) /* Unknown source size, can't rescale */ +- return 0; ++ if ((width == 0) || (height == 0)) /* Unknown source size, can't rescale */ ++ return -1; + + if ((max_w <= 0) || (max_h <= 0)) /* No valid target dimensions, use original */ +- return 0; ++ return -1; + +- if ((src->width <= max_w) && (src->height <= max_h)) /* Smaller than target */ +- return 0; ++ if ((width <= max_w) && (height <= max_h)) /* Smaller than target */ ++ return -1; + +- if (src->width * max_h > src->height * max_w) /* Wider aspect ratio than target */ ++ if (width * max_h > height * max_w) /* Wider aspect ratio than target */ + { + *target_w = max_w; +- *target_h = (double)max_w * ((double)src->height / (double)src->width); ++ *target_h = (double)max_w * ((double)height / (double)width); + } + else /* Taller or equal aspect ratio */ + { +- *target_w = (double)max_h * ((double)src->width / (double)src->height); ++ *target_w = (double)max_h * ((double)width / (double)height); + *target_h = max_h; + } + +- DPRINTF(E_DBG, L_ART, "Raw destination width %d height %d\n", *target_w, *target_h); +- + if ((*target_h > max_h) && (max_h > 0)) + *target_h = max_h; + +@@ -324,326 +316,9 @@ rescale_needed(AVCodecContext *src, int + if ((*target_w > max_w) && (max_w > 0)) + *target_w = max_w - (max_w % 2); + +- DPRINTF(E_DBG, L_ART, "Destination width %d height %d\n", *target_w, *target_h); +- +- return 1; +-} +- +-/* Rescale an image +- * +- * @out evbuf Rescaled image data +- * @in src_ctx Image source +- * @in s Index of stream containing image +- * @in out_w Rescaled width +- * @in out_h Rescaled height +- * @return ART_FMT_* on success, -1 on error +- */ +-static int +-artwork_rescale(struct evbuffer *evbuf, AVFormatContext *src_ctx, int s, int out_w, int out_h) +-{ +- uint8_t *buf; +- +- AVCodecContext *src; +- +- AVFormatContext *dst_ctx; +- AVCodecContext *dst; +- AVOutputFormat *dst_fmt; +- AVStream *dst_st; +- +- AVCodec *img_decoder; +- AVCodec *img_encoder; +- +- AVFrame *i_frame; +- AVFrame *o_frame; +- +- struct SwsContext *swsctx; +- +- AVPacket pkt; +- int have_frame; +- int ret; +- +- src = src_ctx->streams[s]->codec; +- +- // Avoids threading issue in both ffmpeg and libav that prevents decoding embedded png's +- src->thread_count = 1; +- +- img_decoder = avcodec_find_decoder(src->codec_id); +- if (!img_decoder) +- { +- DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename); +- +- return -1; +- } +- +- ret = avcodec_open2(src, img_decoder, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret))); +- +- return -1; +- } +- +- if (src->pix_fmt < 0) +- { +- DPRINTF(E_LOG, L_ART, "Unknown pixel format for artwork %s\n", src_ctx->filename); +- +- ret = -1; +- goto out_close_src; +- } ++ DPRINTF(E_DBG, L_ART, "Rescale required, destination width %d height %d\n", *target_w, *target_h); + +- /* Set up output */ +- dst_fmt = av_guess_format("image2", NULL, NULL); +- if (!dst_fmt) +- { +- DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n"); +- +- ret = -1; +- goto out_close_src; +- } +- +- dst_fmt->video_codec = AV_CODEC_ID_NONE; +- +- /* Try to keep same codec if possible */ +- if (src->codec_id == AV_CODEC_ID_PNG) +- dst_fmt->video_codec = AV_CODEC_ID_PNG; +- else if (src->codec_id == AV_CODEC_ID_MJPEG) +- dst_fmt->video_codec = AV_CODEC_ID_MJPEG; +- +- /* If not possible, select new codec */ +- if (dst_fmt->video_codec == AV_CODEC_ID_NONE) +- { +- dst_fmt->video_codec = AV_CODEC_ID_PNG; +- } +- +- img_encoder = avcodec_find_encoder(dst_fmt->video_codec); +- if (!img_encoder) +- { +- DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec); +- +- ret = -1; +- goto out_close_src; +- } +- +- dst_ctx = avformat_alloc_context(); +- if (!dst_ctx) +- { +- DPRINTF(E_LOG, L_ART, "Out of memory for format context\n"); +- +- ret = -1; +- goto out_close_src; +- } +- +- dst_ctx->oformat = dst_fmt; +- +- dst_fmt->flags &= ~AVFMT_NOFILE; +- +- dst_st = avformat_new_stream(dst_ctx, NULL); +- if (!dst_st) +- { +- DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n"); +- +- ret = -1; +- goto out_free_dst_ctx; +- } +- +- dst = dst_st->codec; +- +- avcodec_get_context_defaults3(dst, NULL); +- +- if (dst_fmt->flags & AVFMT_GLOBALHEADER) +- dst->flags |= CODEC_FLAG_GLOBAL_HEADER; +- +- dst->codec_id = dst_fmt->video_codec; +- dst->codec_type = AVMEDIA_TYPE_VIDEO; +- +- dst->pix_fmt = avcodec_default_get_format(dst, img_encoder->pix_fmts); +- if (dst->pix_fmt < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n"); +- +- ret = -1; +- goto out_free_dst_ctx; +- } +- +- dst->time_base.num = 1; +- dst->time_base.den = 25; +- +- dst->width = out_w; +- dst->height = out_h; +- +- /* Open encoder */ +- ret = avcodec_open2(dst, img_encoder, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret))); +- +- ret = -1; +- goto out_free_dst_ctx; +- } +- +- i_frame = av_frame_alloc(); +- o_frame = av_frame_alloc(); +- if (!i_frame || !o_frame) +- { +- DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n"); +- +- ret = -1; +- goto out_free_frames; +- } +- +- ret = av_image_get_buffer_size(dst->pix_fmt, src->width, src->height, 1); +- +- DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret); +- +- buf = (uint8_t *)av_malloc(ret); +- if (!buf) +- { +- DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n"); +- +- ret = -1; +- goto out_free_frames; +- } +- +-#if HAVE_DECL_AV_IMAGE_FILL_ARRAYS +- av_image_fill_arrays(o_frame->data, o_frame->linesize, buf, dst->pix_fmt, src->width, src->height, 1); +-#else +- avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height); +-#endif +- o_frame->height = dst->height; +- o_frame->width = dst->width; +- o_frame->format = dst->pix_fmt; +- +- swsctx = sws_getContext(src->width, src->height, src->pix_fmt, +- dst->width, dst->height, dst->pix_fmt, +- SWS_BICUBIC, NULL, NULL, NULL); +- if (!swsctx) +- { +- DPRINTF(E_LOG, L_ART, "Could not get SWS context\n"); +- +- ret = -1; +- goto out_free_buf; +- } +- +- /* Get frame */ +- have_frame = 0; +- while (av_read_frame(src_ctx, &pkt) == 0) +- { +- if (pkt.stream_index != s) +- { +- av_packet_unref(&pkt); +- continue; +- } +- +- avcodec_decode_video2(src, i_frame, &have_frame, &pkt); +- break; +- } +- +- if (!have_frame) +- { +- DPRINTF(E_LOG, L_ART, "Could not decode artwork\n"); +- +- av_packet_unref(&pkt); +- sws_freeContext(swsctx); +- +- ret = -1; +- goto out_free_buf; +- } +- +- /* Scale */ +- sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize); +- +- sws_freeContext(swsctx); +- av_packet_unref(&pkt); +- +- /* Open output file */ +- dst_ctx->pb = avio_output_evbuffer_open(evbuf); +- if (!dst_ctx->pb) +- { +- DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n"); +- +- ret = -1; +- goto out_free_buf; +- } +- +- /* Encode frame */ +- av_init_packet(&pkt); +- pkt.data = NULL; +- pkt.size = 0; +- +- ret = avcodec_encode_video2(dst, &pkt, o_frame, &have_frame); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not encode artwork\n"); +- +- ret = -1; +- goto out_fclose_dst; +- } +- +- ret = avformat_write_header(dst_ctx, NULL); +- if (ret != 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret))); +- +- ret = -1; +- goto out_fclose_dst; +- } +- +- ret = av_interleaved_write_frame(dst_ctx, &pkt); +- +- if (ret != 0) +- { +- DPRINTF(E_LOG, L_ART, "Error writing artwork\n"); +- +- ret = -1; +- goto out_fclose_dst; +- } +- +- ret = av_write_trailer(dst_ctx); +- if (ret != 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret))); +- +- ret = -1; +- goto out_fclose_dst; +- } +- +- switch (dst_fmt->video_codec) +- { +- case AV_CODEC_ID_PNG: +- ret = ART_FMT_PNG; +- break; +- +- case AV_CODEC_ID_MJPEG: +- ret = ART_FMT_JPEG; +- break; +- +- default: +- DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n"); +- ret = -1; +- break; +- } +- +- out_fclose_dst: +- avio_evbuffer_close(dst_ctx->pb); +- av_packet_unref(&pkt); +- +- out_free_buf: +- av_free(buf); +- +- out_free_frames: +- if (i_frame) +- av_frame_free(&i_frame); +- if (o_frame) +- av_frame_free(&o_frame); +- avcodec_close(dst); +- +- out_free_dst_ctx: +- avformat_free_context(dst_ctx); +- +- out_close_src: +- avcodec_close(src); +- +- return ret; ++ return 0; + } + + /* Get an artwork file from the filesystem. Will rescale if needed. +@@ -657,8 +332,11 @@ artwork_rescale(struct evbuffer *evbuf, + static int + artwork_get(struct evbuffer *evbuf, char *path, int max_w, int max_h) + { +- AVFormatContext *src_ctx; +- int s; ++ struct decode_ctx *xcode_decode; ++ struct encode_ctx *xcode_encode; ++ void *frame; ++ int width; ++ int height; + int target_w; + int target_h; + int format_ok; +@@ -666,71 +344,71 @@ artwork_get(struct evbuffer *evbuf, char + + DPRINTF(E_SPAM, L_ART, "Getting artwork (max destination width %d height %d)\n", max_w, max_h); + +- src_ctx = NULL; +- +- ret = avformat_open_input(&src_ctx, path, NULL, NULL); +- if (ret < 0) ++ xcode_decode = transcode_decode_setup(XCODE_PNG, DATA_KIND_FILE, path, 0); // Good for XCODE_JPEG too ++ if (!xcode_decode) + { +- DPRINTF(E_WARN, L_ART, "Cannot open artwork file '%s': %s\n", path, strerror(AVUNERROR(ret))); +- +- return ART_E_ERROR; ++ DPRINTF(E_DBG, L_ART, "No artwork found in '%s'\n", path); ++ return ART_E_NONE; + } + +- ret = avformat_find_stream_info(src_ctx, NULL); +- if (ret < 0) ++ if (transcode_decode_query(xcode_decode, "is_jpeg")) ++ format_ok = ART_FMT_JPEG; ++ else if (transcode_decode_query(xcode_decode, "is_png")) ++ format_ok = ART_FMT_PNG; ++ else + { +- DPRINTF(E_WARN, L_ART, "Cannot get stream info: %s\n", strerror(AVUNERROR(ret))); +- +- avformat_close_input(&src_ctx); +- return ART_E_ERROR; ++ DPRINTF(E_LOG, L_ART, "Artwork file '%s' not a PNG or JPEG file\n", path); ++ goto fail_free_decode; + } + +- format_ok = 0; +- for (s = 0; s < src_ctx->nb_streams; s++) +- { +- if (src_ctx->streams[s]->codec->codec_id == AV_CODEC_ID_PNG) +- { +- format_ok = ART_FMT_PNG; +- break; +- } +- else if (src_ctx->streams[s]->codec->codec_id == AV_CODEC_ID_MJPEG) +- { +- format_ok = ART_FMT_JPEG; +- break; +- } +- } ++ width = transcode_decode_query(xcode_decode, "width"); ++ height = transcode_decode_query(xcode_decode, "height"); + +- if (s == src_ctx->nb_streams) ++ ret = rescale_calculate(&target_w, &target_h, width, height, max_w, max_h); ++ if (ret < 0) + { +- DPRINTF(E_LOG, L_ART, "Artwork file '%s' not a PNG or JPEG file\n", path); ++ // No rescaling required, just read the raw file into the evbuf ++ if (artwork_read(evbuf, path) != 0) ++ goto fail_free_decode; + +- avformat_close_input(&src_ctx); +- return ART_E_ERROR; ++ transcode_decode_cleanup(&xcode_decode); ++ return format_ok; + } + +- ret = rescale_needed(src_ctx->streams[s]->codec, max_w, max_h, &target_w, &target_h); ++ if (format_ok == ART_FMT_JPEG) ++ xcode_encode = transcode_encode_setup(XCODE_JPEG, xcode_decode, NULL, target_w, target_h); ++ else ++ xcode_encode = transcode_encode_setup(XCODE_PNG, xcode_decode, NULL, target_w, target_h); + +- /* Fastpath */ +- if (!ret && format_ok) ++ if (!xcode_encode) + { +- ret = artwork_read(evbuf, path); +- if (ret == 0) +- ret = format_ok; ++ DPRINTF(E_WARN, L_ART, "Cannot open artwork file for rescaling '%s'\n", path); ++ goto fail_free_decode; + } +- else +- ret = artwork_rescale(evbuf, src_ctx, s, target_w, target_h); + +- avformat_close_input(&src_ctx); ++ // We don't use transcode() because we just want to process one frame ++ ret = transcode_decode(&frame, xcode_decode); ++ if (ret < 0) ++ goto fail_free_encode; ++ ++ ret = transcode_encode(evbuf, xcode_encode, frame, 1); ++ ++ transcode_encode_cleanup(&xcode_encode); ++ transcode_decode_cleanup(&xcode_decode); + + if (ret < 0) + { +- if (evbuffer_get_length(evbuf) > 0) +- evbuffer_drain(evbuf, evbuffer_get_length(evbuf)); +- +- ret = ART_E_ERROR; ++ evbuffer_drain(evbuf, evbuffer_get_length(evbuf)); ++ return ART_E_ERROR; + } + +- return ret; ++ return format_ok; ++ ++ fail_free_encode: ++ transcode_encode_cleanup(&xcode_encode); ++ fail_free_decode: ++ transcode_decode_cleanup(&xcode_decode); ++ return ART_E_ERROR; + } + + /* Looks for an artwork file in a directory. Will rescale if needed. +@@ -949,92 +627,11 @@ source_item_cache_get(struct artwork_ctx + static int + source_item_embedded_get(struct artwork_ctx *ctx) + { +- AVFormatContext *src_ctx; +- AVStream *src_st; +- int s; +- int target_w; +- int target_h; +- int format; +- int ret; +- + DPRINTF(E_SPAM, L_ART, "Trying embedded artwork in %s\n", ctx->dbmfi->path); + +- src_ctx = NULL; +- +- ret = avformat_open_input(&src_ctx, ctx->dbmfi->path, NULL, NULL); +- if (ret < 0) +- { +- DPRINTF(E_WARN, L_ART, "Cannot open media file '%s': %s\n", ctx->dbmfi->path, strerror(AVUNERROR(ret))); +- return ART_E_ERROR; +- } +- +- ret = avformat_find_stream_info(src_ctx, NULL); +- if (ret < 0) +- { +- DPRINTF(E_WARN, L_ART, "Cannot get stream info: %s\n", strerror(AVUNERROR(ret))); +- avformat_close_input(&src_ctx); +- return ART_E_ERROR; +- } +- +- format = 0; +- for (s = 0; s < src_ctx->nb_streams; s++) +- { +- if (src_ctx->streams[s]->disposition & AV_DISPOSITION_ATTACHED_PIC) +- { +- if (src_ctx->streams[s]->codec->codec_id == AV_CODEC_ID_PNG) +- { +- format = ART_FMT_PNG; +- break; +- } +- else if (src_ctx->streams[s]->codec->codec_id == AV_CODEC_ID_MJPEG) +- { +- format = ART_FMT_JPEG; +- break; +- } +- } +- } +- +- if (s == src_ctx->nb_streams) +- { +- avformat_close_input(&src_ctx); +- return ART_E_NONE; +- } +- +- src_st = src_ctx->streams[s]; +- +- ret = rescale_needed(src_st->codec, ctx->max_w, ctx->max_h, &target_w, &target_h); +- +- /* Fastpath */ +- if (!ret && format) +- { +- DPRINTF(E_SPAM, L_ART, "Artwork not too large, using original image\n"); +- +- ret = evbuffer_add(ctx->evbuf, src_st->attached_pic.data, src_st->attached_pic.size); +- if (ret < 0) +- DPRINTF(E_LOG, L_ART, "Could not add embedded image to event buffer\n"); +- else +- ret = format; +- } +- else +- { +- DPRINTF(E_SPAM, L_ART, "Artwork too large, rescaling image\n"); +- +- ret = artwork_rescale(ctx->evbuf, src_ctx, s, target_w, target_h); +- } +- +- avformat_close_input(&src_ctx); +- +- if (ret < 0) +- { +- if (evbuffer_get_length(ctx->evbuf) > 0) +- evbuffer_drain(ctx->evbuf, evbuffer_get_length(ctx->evbuf)); +- +- ret = ART_E_ERROR; +- } +- else +- snprintf(ctx->path, sizeof(ctx->path), "%s", ctx->dbmfi->path); ++ snprintf(ctx->path, sizeof(ctx->path), "%s", ctx->dbmfi->path); + +- return ret; ++ return artwork_get(ctx->evbuf, ctx->path, ctx->max_w, ctx->max_h); + } + + /* Looks for basename(in_path).{png,jpg}, so if in_path is /foo/bar.mp3 it +Index: forked-daapd-25.0.new/src/httpd.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/httpd.c ++++ forked-daapd-25.0.new/src/httpd.c +@@ -309,11 +309,10 @@ stream_chunk_xcode_cb(int fd, short even + struct timeval tv; + int xcoded; + int ret; +- int dummy; + + st = (struct stream_ctx *)arg; + +- xcoded = transcode(st->evbuf, STREAM_CHUNK_SIZE, st->xcode, &dummy); ++ xcoded = transcode(st->evbuf, NULL, st->xcode, STREAM_CHUNK_SIZE); + if (xcoded <= 0) + { + if (xcoded == 0) +Index: forked-daapd-25.0.new/src/httpd_streaming.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/httpd_streaming.c ++++ forked-daapd-25.0.new/src/httpd_streaming.c +@@ -123,7 +123,7 @@ streaming_send_cb(evutil_socket_t fd, sh + { + struct streaming_session *session; + struct evbuffer *evbuf; +- struct decoded_frame *decoded; ++ void *frame; + uint8_t *buf; + int len; + int ret; +@@ -138,15 +138,15 @@ streaming_send_cb(evutil_socket_t fd, sh + if (!streaming_sessions) + return; + +- decoded = transcode_raw2frame(streaming_rawbuf, STREAMING_RAWBUF_SIZE); +- if (!decoded) ++ frame = transcode_frame_new(XCODE_MP3, streaming_rawbuf, STREAMING_RAWBUF_SIZE); ++ if (!frame) + { + DPRINTF(E_LOG, L_STREAMING, "Could not convert raw PCM to frame\n"); + return; + } + +- ret = transcode_encode(streaming_encoded_data, decoded, streaming_encode_ctx); +- transcode_decoded_free(decoded); ++ ret = transcode_encode(streaming_encoded_data, streaming_encode_ctx, frame, 0); ++ transcode_frame_free(frame); + if (ret < 0) + return; + } +@@ -288,7 +288,7 @@ int + streaming_init(void) + { + struct decode_ctx *decode_ctx; +- struct decoded_frame *decoded; ++ void *frame; + int remaining; + int ret; + +@@ -299,7 +299,7 @@ streaming_init(void) + return -1; + } + +- streaming_encode_ctx = transcode_encode_setup(XCODE_MP3, decode_ctx, NULL); ++ streaming_encode_ctx = transcode_encode_setup(XCODE_MP3, decode_ctx, NULL, 0, 0); + transcode_decode_cleanup(&decode_ctx); + if (!streaming_encode_ctx) + { +@@ -345,15 +345,15 @@ streaming_init(void) + remaining = STREAMING_SILENCE_INTERVAL * STOB(44100); + while (remaining > STREAMING_RAWBUF_SIZE) + { +- decoded = transcode_raw2frame(streaming_rawbuf, STREAMING_RAWBUF_SIZE); +- if (!decoded) ++ frame = transcode_frame_new(XCODE_MP3, streaming_rawbuf, STREAMING_RAWBUF_SIZE); ++ if (!frame) + { + DPRINTF(E_LOG, L_STREAMING, "Could not convert raw PCM to frame\n"); + goto silence_fail; + } + +- ret = transcode_encode(streaming_encoded_data, decoded, streaming_encode_ctx); +- transcode_decoded_free(decoded); ++ ret = transcode_encode(streaming_encoded_data, streaming_encode_ctx, frame, 0); ++ transcode_frame_free(frame); + if (ret < 0) + { + DPRINTF(E_LOG, L_STREAMING, "Could not encode silence buffer\n"); +Index: forked-daapd-25.0.new/src/inputs/file_http.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/inputs/file_http.c ++++ forked-daapd-25.0.new/src/inputs/file_http.c +@@ -70,7 +70,7 @@ start(struct player_source *ps) + { + // We set "wanted" to 1 because the read size doesn't matter to us + // TODO optimize? +- ret = transcode(evbuf, 1, ps->input_ctx, &icy_timer); ++ ret = transcode(evbuf, &icy_timer, ps->input_ctx, 1); + if (ret < 0) + break; + +Index: forked-daapd-25.0.new/src/logger.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/logger.c ++++ forked-daapd-25.0.new/src/logger.c +@@ -179,9 +179,9 @@ logger_ffmpeg(void *ptr, int level, cons + else if (level <= AV_LOG_WARNING) + severity = E_WARN; + else if (level <= AV_LOG_VERBOSE) +- severity = E_INFO; +- else if (level <= AV_LOG_DEBUG) + severity = E_DBG; ++ else if (level <= AV_LOG_DEBUG) ++ severity = E_SPAM; + else + severity = E_SPAM; + +Index: forked-daapd-25.0.new/src/transcode.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.c ++++ forked-daapd-25.0.new/src/transcode.c +@@ -64,6 +64,9 @@ struct settings_ctx + bool encode_video; + bool encode_audio; + ++ // Silence some log messages ++ bool silent; ++ + // Output format (for the muxer) + const char *format; + +@@ -76,6 +79,7 @@ struct settings_ctx + enum AVSampleFormat sample_format; + int byte_depth; + bool wavheader; ++ bool icy; + + // Video settings + enum AVCodecID video_codec; +@@ -123,6 +127,9 @@ struct decode_ctx + // Set to true if we have reached eof + bool eof; + ++ // Set to true if avcodec_receive_frame() gave us a frame ++ bool got_frame; ++ + // Contains the most recent packet from av_read_frame() + AVPacket *packet; + +@@ -171,12 +178,6 @@ struct transcode_ctx + struct encode_ctx *encode_ctx; + }; + +-struct decoded_frame +-{ +- AVFrame *frame; +- enum AVMediaType type; +-}; +- + + /* -------------------------- PROFILE CONFIGURATION ------------------------ */ + +@@ -200,6 +201,7 @@ init_settings(struct settings_ctx *setti + settings->channels = 2; + settings->sample_format = AV_SAMPLE_FMT_S16; + settings->byte_depth = 2; // Bytes per sample = 16/8 ++ settings->icy = 1; + break; + + case XCODE_MP3: +@@ -215,12 +217,14 @@ init_settings(struct settings_ctx *setti + + case XCODE_JPEG: + settings->encode_video = 1; ++ settings->silent = 1; + settings->format = "image2"; + settings->video_codec = AV_CODEC_ID_MJPEG; + break; + + case XCODE_PNG: + settings->encode_video = 1; ++ settings->silent = 1; + settings->format = "image2"; + settings->video_codec = AV_CODEC_ID_PNG; + break; +@@ -256,7 +260,7 @@ stream_settings_set(struct stream_ctx *s + s->codec->sample_fmt = settings->sample_format; + s->codec->time_base = (AVRational){1, settings->sample_rate}; + } +- else if (type == AVMEDIA_TYPE_AUDIO) ++ else if (type == AVMEDIA_TYPE_VIDEO) + { + s->codec->height = settings->height; + s->codec->width = settings->width; +@@ -366,6 +370,12 @@ stream_add(struct encode_ctx *ctx, struc + + stream_settings_set(s, &ctx->settings, encoder->type); + ++ if (!s->codec->pix_fmt) ++ { ++ s->codec->pix_fmt = avcodec_default_get_format(s->codec, encoder->pix_fmts); ++ DPRINTF(E_DBG, L_XCODE, "Pixel format set to %d (encoder is %s)\n", s->codec->pix_fmt, codec_name); ++ } ++ + if (ctx->ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) + s->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; + +@@ -590,6 +600,8 @@ decode_filter_encode_write(struct transc + break; + } + ++ dec_ctx->got_frame = 1; ++ + if (!out_stream) + break; + +@@ -649,6 +661,51 @@ read_decode_filter_encode_write(struct t + + /* --------------------------- INPUT/OUTPUT INIT --------------------------- */ + ++static AVCodecContext * ++open_decoder(unsigned int *stream_index, struct decode_ctx *ctx, enum AVMediaType type) ++{ ++ AVCodecContext *dec_ctx; ++ AVCodec *decoder; ++ int ret; ++ ++ *stream_index = av_find_best_stream(ctx->ifmt_ctx, type, -1, -1, &decoder, 0); ++ if ((*stream_index < 0) || (!decoder)) ++ { ++ if (!ctx->settings.silent) ++ DPRINTF(E_LOG, L_XCODE, "No stream data or decoder for '%s'\n", ctx->ifmt_ctx->filename); ++ return NULL; ++ } ++ ++ CHECK_NULL(L_XCODE, dec_ctx = avcodec_alloc_context3(decoder)); ++ ++ // In open_filter() we need to tell the sample rate and format that the decoder ++ // is giving us - however sample rate of dec_ctx will be 0 if we don't prime it ++ // with the streams codecpar data. ++ ret = avcodec_parameters_to_context(dec_ctx, ctx->ifmt_ctx->streams[*stream_index]->codecpar); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Failed to copy codecpar for stream #%d: %s\n", *stream_index, err2str(ret)); ++ avcodec_free_context(&dec_ctx); ++ return NULL; ++ } ++ ++ if (type == AVMEDIA_TYPE_AUDIO) ++ { ++ dec_ctx->request_sample_fmt = ctx->settings.sample_format; ++ dec_ctx->request_channel_layout = ctx->settings.channel_layout; ++ } ++ ++ ret = avcodec_open2(dec_ctx, NULL, NULL); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", *stream_index, err2str(ret)); ++ avcodec_free_context(&dec_ctx); ++ return NULL; ++ } ++ ++ return dec_ctx; ++} ++ + /* + * Part 3 of the conversion chain: read -> decode -> filter -> encode -> write + * +@@ -659,9 +716,8 @@ static int + open_input(struct decode_ctx *ctx, const char *path) + { + AVDictionary *options = NULL; +- AVCodec *decoder; + AVCodecContext *dec_ctx; +- int stream_index; ++ unsigned int stream_index; + int ret; + + CHECK_NULL(L_XCODE, ctx->ifmt_ctx = avformat_alloc_context()); +@@ -706,35 +762,9 @@ open_input(struct decode_ctx *ctx, const + + if (ctx->settings.encode_audio) + { +- // Find audio stream and open decoder +- stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &decoder, 0); +- if ((stream_index < 0) || (!decoder)) +- { +- DPRINTF(E_LOG, L_XCODE, "Did not find audio stream or suitable decoder for %s\n", path); +- goto out_fail; +- } +- +- CHECK_NULL(L_XCODE, dec_ctx = avcodec_alloc_context3(decoder)); +- +- // In open_filter() we need to tell the sample rate and format that the decoder +- // is giving us - however sample rate of dec_ctx will be 0 if we don't prime it +- // with the streams codecpar data. +- ret = avcodec_parameters_to_context(dec_ctx, ctx->ifmt_ctx->streams[stream_index]->codecpar); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Failed to copy codecpar for stream #%d: %s\n", stream_index, err2str(ret)); +- goto out_fail; +- } +- +- dec_ctx->request_sample_fmt = ctx->settings.sample_format; +- dec_ctx->request_channel_layout = ctx->settings.channel_layout; +- +- ret = avcodec_open2(dec_ctx, NULL, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); +- goto out_fail; +- } ++ dec_ctx = open_decoder(&stream_index, ctx, AVMEDIA_TYPE_AUDIO); ++ if (!dec_ctx) ++ goto out_fail; + + ctx->audio_stream.codec = dec_ctx; + ctx->audio_stream.stream = ctx->ifmt_ctx->streams[stream_index]; +@@ -742,22 +772,9 @@ open_input(struct decode_ctx *ctx, const + + if (ctx->settings.encode_video) + { +- // Find video stream and open decoder +- stream_index = av_find_best_stream(ctx->ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); +- if ((stream_index < 0) || (!decoder)) +- { +- DPRINTF(E_LOG, L_XCODE, "Did not find video stream or suitable decoder for '%s': %s\n", path, err2str(ret)); +- goto out_fail; +- } +- +- CHECK_NULL(L_XCODE, dec_ctx = avcodec_alloc_context3(decoder)); +- +- ret = avcodec_open2(dec_ctx, NULL, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_XCODE, "Failed to open decoder for stream #%d: %s\n", stream_index, err2str(ret)); +- goto out_fail; +- } ++ dec_ctx = open_decoder(&stream_index, ctx, AVMEDIA_TYPE_VIDEO); ++ if (!dec_ctx) ++ goto out_fail; + + ctx->video_stream.codec = dec_ctx; + ctx->video_stream.stream = ctx->ifmt_ctx->streams[stream_index]; +@@ -794,6 +811,9 @@ open_output(struct encode_ctx *ctx, stru + return -1; + } + ++ // Clear AVFMT_NOFILE bit, it is not allowed as we will set our own AVIOContext ++ ctx->ofmt_ctx->oformat->flags = ~AVFMT_NOFILE; ++ + ctx->obuf = evbuffer_new(); + if (!ctx->obuf) + { +@@ -902,7 +922,7 @@ open_filter(struct stream_ctx *out_strea + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer source: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create audio buffer source (%s): %s\n", args, err2str(ret)); + goto out_fail; + } + +@@ -914,7 +934,7 @@ open_filter(struct stream_ctx *out_strea + ret = avfilter_graph_create_filter(&format_ctx, format, "format", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create audio format filter: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create audio format filter (%s): %s\n", args, err2str(ret)); + goto out_fail; + } + +@@ -953,17 +973,17 @@ open_filter(struct stream_ctx *out_strea + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create buffer source: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create buffer source (%s): %s\n", args, err2str(ret)); + goto out_fail; + } + + snprintf(args, sizeof(args), +- "pix_fmt=%d", out_stream->codec->pix_fmt); ++ "pix_fmts=%s", av_get_pix_fmt_name(out_stream->codec->pix_fmt)); + + ret = avfilter_graph_create_filter(&format_ctx, format, "format", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create format filter: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create format filter (%s): %s\n", args, err2str(ret)); + goto out_fail; + } + +@@ -973,7 +993,7 @@ open_filter(struct stream_ctx *out_strea + ret = avfilter_graph_create_filter(&scale_ctx, scale, "scale", args, NULL, filter_graph); + if (ret < 0) + { +- DPRINTF(E_LOG, L_XCODE, "Cannot create scale filter: %s\n", err2str(ret)); ++ DPRINTF(E_LOG, L_XCODE, "Cannot create scale filter (%s): %s\n", args, err2str(ret)); + goto out_fail; + } + +@@ -1081,7 +1101,7 @@ transcode_decode_setup(enum transcode_pr + } + + struct encode_ctx * +-transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size) ++transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size, int width, int height) + { + struct encode_ctx *ctx; + +@@ -1092,6 +1112,9 @@ transcode_encode_setup(enum transcode_pr + if (init_settings(&ctx->settings, profile) < 0) + goto fail_free; + ++ ctx->settings.width = width; ++ ctx->settings.height = height; ++ + if (ctx->settings.wavheader) + make_wav_header(ctx, src_ctx, est_size); + +@@ -1101,7 +1124,7 @@ transcode_encode_setup(enum transcode_pr + if (open_filters(ctx, src_ctx) < 0) + goto fail_close; + +- if (src_ctx->data_kind == DATA_KIND_HTTP) ++ if (ctx->settings.icy && src_ctx->data_kind == DATA_KIND_HTTP) + ctx->icy_interval = METADATA_ICY_INTERVAL * ctx->settings.channels * ctx->settings.byte_depth * ctx->settings.sample_rate; + + return ctx; +@@ -1129,7 +1152,7 @@ transcode_setup(enum transcode_profile p + return NULL; + } + +- ctx->encode_ctx = transcode_encode_setup(profile, ctx->decode_ctx, est_size); ++ ctx->encode_ctx = transcode_encode_setup(profile, ctx->decode_ctx, est_size, 0, 0); + if (!ctx->encode_ctx) + { + transcode_decode_cleanup(&ctx->decode_ctx); +@@ -1311,62 +1334,92 @@ transcode_cleanup(struct transcode_ctx * + *ctx = NULL; + } + +-void +-transcode_decoded_free(struct decoded_frame *decoded) +-{ +- av_frame_free(&decoded->frame); +- free(decoded); +-} +- +- + /* Encoding, decoding and transcoding */ + + int +-transcode_decode(struct decoded_frame **decoded, struct decode_ctx *dec_ctx) ++transcode_decode(void **frame, struct decode_ctx *dec_ctx) + { +- DPRINTF(E_LOG, L_XCODE, "Bug! Call to transcode_decode(), but the lazy programmer didn't implement it\n"); +- return -1; ++ struct transcode_ctx ctx; ++ int ret; ++ ++ if (dec_ctx->got_frame) ++ DPRINTF(E_LOG, L_XCODE, "Bug! Currently no support for multiple calls to transcode_decode()\n"); ++ ++ ctx.decode_ctx = dec_ctx; ++ ctx.encode_ctx = NULL; ++ ++ do ++ { ++ // This function stops after decoding because ctx->encode_ctx is NULL ++ ret = read_decode_filter_encode_write(&ctx); ++ } ++ while ((ret == 0) && (!dec_ctx->got_frame)); ++ ++ if (ret < 0) ++ return -1; ++ ++ *frame = dec_ctx->decoded_frame; ++ ++ if (dec_ctx->eof) ++ return 0; ++ ++ return 1; + } + + // Filters and encodes + int +-transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct encode_ctx *ctx) ++transcode_encode(struct evbuffer *evbuf, struct encode_ctx *ctx, void *frame, int eof) + { ++ AVFrame *f = frame; + struct stream_ctx *s; + size_t start_length; + int ret; + + start_length = evbuffer_get_length(ctx->obuf); + +- if (decoded->type == AVMEDIA_TYPE_AUDIO) ++ // Really crappy way of detecting if frame is audio, video or something else ++ if (f->channel_layout && f->sample_rate) + s = &ctx->audio_stream; +- else if (decoded->type == AVMEDIA_TYPE_VIDEO) ++ else if (f->width && f->height) + s = &ctx->video_stream; + else +- return -1; ++ { ++ DPRINTF(E_LOG, L_XCODE, "Bug! Encoder could not detect frame type\n"); ++ return -1; ++ } + +- ret = filter_encode_write(ctx, s, decoded->frame); ++ ret = filter_encode_write(ctx, s, f); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error occurred while encoding: %s\n", err2str(ret)); + return ret; + } + ++ // Flush ++ if (eof) ++ { ++ filter_encode_write(ctx, s, NULL); ++ av_write_trailer(ctx->ofmt_ctx); ++ } ++ + ret = evbuffer_get_length(ctx->obuf) - start_length; + ++ // TODO Shouldn't we flush and let the muxer write the trailer now? ++ + evbuffer_add_buffer(evbuf, ctx->obuf); + + return ret; + } + + int +-transcode(struct evbuffer *evbuf, int want_bytes, struct transcode_ctx *ctx, int *icy_timer) ++transcode(struct evbuffer *evbuf, int *icy_timer, struct transcode_ctx *ctx, int want_bytes) + { + size_t start_length; + int processed = 0; + int ret; + +- *icy_timer = 0; ++ if (icy_timer) ++ *icy_timer = 0; + + if (ctx->decode_ctx->eof) + return 0; +@@ -1383,7 +1436,7 @@ transcode(struct evbuffer *evbuf, int wa + evbuffer_add_buffer(evbuf, ctx->encode_ctx->obuf); + + ctx->encode_ctx->total_bytes += processed; +- if (ctx->encode_ctx->icy_interval) ++ if (icy_timer && ctx->encode_ctx->icy_interval) + *icy_timer = (ctx->encode_ctx->total_bytes % ctx->encode_ctx->icy_interval < processed); + + if (ret == AVERROR_EOF) +@@ -1394,49 +1447,45 @@ transcode(struct evbuffer *evbuf, int wa + return processed; + } + +-struct decoded_frame * +-transcode_raw2frame(uint8_t *data, size_t size) ++void * ++transcode_frame_new(enum transcode_profile profile, uint8_t *data, size_t size) + { +- struct decoded_frame *decoded; +- AVFrame *frame; ++ AVFrame *f; + int ret; + +- decoded = malloc(sizeof(struct decoded_frame)); +- if (!decoded) +- { +- DPRINTF(E_LOG, L_XCODE, "Out of memory for decoded struct\n"); +- return NULL; +- } +- +- frame = av_frame_alloc(); +- if (!frame) ++ f = av_frame_alloc(); ++ if (!f) + { + DPRINTF(E_LOG, L_XCODE, "Out of memory for frame\n"); +- free(decoded); + return NULL; + } + +- decoded->type = AVMEDIA_TYPE_AUDIO; +- decoded->frame = frame; +- +- frame->nb_samples = size / 4; +- frame->format = AV_SAMPLE_FMT_S16; +- frame->channel_layout = AV_CH_LAYOUT_STEREO; ++ f->nb_samples = size / 4; ++ f->format = AV_SAMPLE_FMT_S16; ++ f->channel_layout = AV_CH_LAYOUT_STEREO; + #ifdef HAVE_FFMPEG +- frame->channels = 2; ++ f->channels = 2; + #endif +- frame->pts = AV_NOPTS_VALUE; +- frame->sample_rate = 44100; ++ f->pts = AV_NOPTS_VALUE; ++ f->sample_rate = 44100; + +- ret = avcodec_fill_audio_frame(frame, 2, frame->format, data, size, 0); ++ ret = avcodec_fill_audio_frame(f, 2, f->format, data, size, 0); + if (ret < 0) + { + DPRINTF(E_LOG, L_XCODE, "Error filling frame with rawbuf: %s\n", err2str(ret)); +- transcode_decoded_free(decoded); ++ av_frame_free(&f); + return NULL; + } + +- return decoded; ++ return f; ++} ++ ++void ++transcode_frame_free(void *frame) ++{ ++ AVFrame *f = frame; ++ ++ av_frame_free(&f); + } + + +@@ -1525,6 +1574,34 @@ transcode_seek(struct transcode_ctx *ctx + return got_ms; + } + ++/* Querying */ ++ ++int ++transcode_decode_query(struct decode_ctx *ctx, const char *query) ++{ ++ if (strcmp(query, "width") == 0) ++ { ++ if (ctx->video_stream.stream) ++ return ctx->video_stream.stream->codecpar->width; ++ } ++ else if (strcmp(query, "height") == 0) ++ { ++ if (ctx->video_stream.stream) ++ return ctx->video_stream.stream->codecpar->height; ++ } ++ else if (strcmp(query, "is_png") == 0) ++ { ++ if (ctx->video_stream.stream) ++ return (ctx->video_stream.stream->codecpar->codec_id == AV_CODEC_ID_PNG); ++ } ++ else if (strcmp(query, "is_jpeg") == 0) ++ { ++ if (ctx->video_stream.stream) ++ return (ctx->video_stream.stream->codecpar->codec_id == AV_CODEC_ID_MJPEG); ++ } ++ ++ return -1; ++} + + /* Metadata */ + +Index: forked-daapd-25.0.new/src/transcode.h +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.h ++++ forked-daapd-25.0.new/src/transcode.h +@@ -22,14 +22,13 @@ enum transcode_profile + struct decode_ctx; + struct encode_ctx; + struct transcode_ctx; +-struct decoded_frame; + + // Setting up + struct decode_ctx * + transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length); + + struct encode_ctx * +-transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size); ++transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size, int width, int height); + + struct transcode_ctx * + transcode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length, off_t *est_size); +@@ -50,51 +49,74 @@ transcode_encode_cleanup(struct encode_c + void + transcode_cleanup(struct transcode_ctx **ctx); + +-void +-transcode_decoded_free(struct decoded_frame *decoded); +- + // Transcoding + + /* Demuxes and decodes the next packet from the input. + * +- * @out decoded A newly allocated struct with a pointer to the frame and the +- * stream. Must be freed with transcode_decoded_free(). ++ * @out frame A pointer to the frame. Caller should not free it, that will ++ * be done by the next call to the function or by the cleanup ++ * function. + * @in ctx Decode context + * @return Positive if OK, negative if error, 0 if EOF + */ + int +-transcode_decode(struct decoded_frame **decoded, struct decode_ctx *ctx); ++transcode_decode(void **frame, struct decode_ctx *ctx); + + /* Encodes and remuxes a frame. Also resamples if needed. + * + * @out evbuf An evbuffer filled with remuxed data +- * @in frame The frame to encode, e.g. from transcode_decode + * @in ctx Encode context ++ * @in frame The decoded frame to encode, e.g. from transcode_decode ++ * @in eof If true the muxer will write a trailer to the output + * @return Bytes added if OK, negative if error + */ + int +-transcode_encode(struct evbuffer *evbuf, struct decoded_frame *decoded, struct encode_ctx *ctx); ++transcode_encode(struct evbuffer *evbuf, struct encode_ctx *ctx, void *frame, int eof); + + /* Demuxes, decodes, encodes and remuxes from the input. + * + * @out evbuf An evbuffer filled with remuxed data ++ * @out icy_timer True if METADATA_ICY_INTERVAL has elapsed ++ * @in ctx Transcode context + * @in want_bytes Minimum number of bytes the caller wants added to the evbuffer + * - set want_bytes to 0 to transcode everything until EOF/error + * - set want_bytes to 1 to get one encoded packet +- * @in ctx Transcode context +- * @out icy_timer True if METADATA_ICY_INTERVAL has elapsed + * @return Bytes added if OK, negative if error, 0 if EOF + */ + int +-transcode(struct evbuffer *evbuf, int want_bytes, struct transcode_ctx *ctx, int *icy_timer); ++transcode(struct evbuffer *evbuf, int *icy_timer, struct transcode_ctx *ctx, int want_bytes); + +-struct decoded_frame * +-transcode_raw2frame(uint8_t *data, size_t size); ++/* Converts a buffer with raw data to a frame that can be passed directly to the ++ * transcode_encode() function ++ * ++ * @in profile Tells the function what kind of frame to create ++ * @in data Buffer with raw data ++ * @in size Size of buffer ++ * @return Opaque pointer to frame if OK, otherwise NULL ++ */ ++void * ++transcode_frame_new(enum transcode_profile profile, uint8_t *data, size_t size); ++void ++transcode_frame_free(void *frame); + +-// Seeking ++/* Seek to the specified position - next transcode() will return this packet ++ * ++ * @in ctx Transcode context ++ * @in seek Requested seek position in ms ++ * @return Negative if error, otherwise actual seek position ++ */ + int + transcode_seek(struct transcode_ctx *ctx, int ms); + ++/* Query for information about a media file opened by transcode_decode_setup() ++ * ++ * @in ctx Decode context ++ * @in query Query - see implementation for supported queries ++ * @return Negative if error, otherwise query dependent ++ */ ++int ++transcode_decode_query(struct decode_ctx *ctx, const char *query); ++ + // Metadata + struct http_icy_metadata * + transcode_metadata(struct transcode_ctx *ctx, int *changed); diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-7.patch forked-daapd-25.0/debian/patches/ffmpeg4-7.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-7.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-7.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,324 @@ +Refreshed by Peter Michael Green to remove fuzz. + +commit 441ad006a6003e33b981470e07c2bacd2c066cb7 +Author: ejurgensen +Date: Wed Mar 1 21:29:08 2017 +0100 + + [artwork/transcode] Also let transcode.c handle rescaling of non-file Spotify artwork + +Index: forked-daapd-25.0.new/src/artwork.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/artwork.c ++++ forked-daapd-25.0.new/src/artwork.c +@@ -324,13 +324,14 @@ rescale_calculate(int *target_w, int *ta + /* Get an artwork file from the filesystem. Will rescale if needed. + * + * @out evbuf Image data +- * @in path Path to the artwork ++ * @in path Path to the artwork (alternative to inbuf) ++ * @in inbuf Buffer with the artwork (alternative to path) + * @in max_w Requested width + * @in max_h Requested height + * @return ART_FMT_* on success, ART_E_ERROR on error + */ + static int +-artwork_get(struct evbuffer *evbuf, char *path, int max_w, int max_h) ++artwork_get(struct evbuffer *evbuf, char *path, struct evbuffer *inbuf, int max_w, int max_h) + { + struct decode_ctx *xcode_decode; + struct encode_ctx *xcode_encode; +@@ -344,7 +345,7 @@ artwork_get(struct evbuffer *evbuf, char + + DPRINTF(E_SPAM, L_ART, "Getting artwork (max destination width %d height %d)\n", max_w, max_h); + +- xcode_decode = transcode_decode_setup(XCODE_PNG, DATA_KIND_FILE, path, 0); // Good for XCODE_JPEG too ++ xcode_decode = transcode_decode_setup(XCODE_JPEG, DATA_KIND_FILE, path, inbuf, 0); // Covers XCODE_PNG too + if (!xcode_decode) + { + DPRINTF(E_DBG, L_ART, "No artwork found in '%s'\n", path); +@@ -368,7 +369,7 @@ artwork_get(struct evbuffer *evbuf, char + if (ret < 0) + { + // No rescaling required, just read the raw file into the evbuf +- if (artwork_read(evbuf, path) != 0) ++ if (!path || artwork_read(evbuf, path) != 0) + goto fail_free_decode; + + transcode_decode_cleanup(&xcode_decode); +@@ -518,7 +519,7 @@ artwork_get_dir_image(struct evbuffer *e + + snprintf(out_path, PATH_MAX, "%s", path); + +- return artwork_get(evbuf, path, max_w, max_h); ++ return artwork_get(evbuf, path, NULL, max_w, max_h); + } + + +@@ -631,7 +632,7 @@ source_item_embedded_get(struct artwork_ + + snprintf(ctx->path, sizeof(ctx->path), "%s", ctx->dbmfi->path); + +- return artwork_get(ctx->evbuf, ctx->path, ctx->max_w, ctx->max_h); ++ return artwork_get(ctx->evbuf, ctx->path, NULL, ctx->max_w, ctx->max_h); + } + + /* Looks for basename(in_path).{png,jpg}, so if in_path is /foo/bar.mp3 it +@@ -685,7 +686,7 @@ source_item_own_get(struct artwork_ctx * + + snprintf(ctx->path, sizeof(ctx->path), "%s", path); + +- return artwork_get(ctx->evbuf, path, ctx->max_w, ctx->max_h); ++ return artwork_get(ctx->evbuf, path, NULL, ctx->max_w, ctx->max_h); + } + + /* +@@ -774,13 +775,8 @@ source_item_stream_get(struct artwork_ct + static int + source_item_spotify_get(struct artwork_ctx *ctx) + { +- AVFormatContext *src_ctx; +- AVIOContext *avio; +- AVInputFormat *ifmt; + struct evbuffer *raw; + struct evbuffer *evbuf; +- int target_w; +- int target_h; + int ret; + + raw = evbuffer_new(); +@@ -820,75 +816,29 @@ source_item_spotify_get(struct artwork_c + goto out_free_evbuf; + } + +- // Now evbuf will be processed by ffmpeg, since it probably needs to be rescaled +- src_ctx = avformat_alloc_context(); +- if (!src_ctx) +- { +- DPRINTF(E_LOG, L_ART, "Out of memory for source context\n"); +- goto out_free_evbuf; +- } +- +- avio = avio_input_evbuffer_open(evbuf); +- if (!avio) +- { +- DPRINTF(E_LOG, L_ART, "Could not alloc input evbuffer\n"); +- goto out_free_ctx; +- } +- +- src_ctx->pb = avio; +- +- ifmt = av_find_input_format("mjpeg"); +- if (!ifmt) +- { +- DPRINTF(E_LOG, L_ART, "Could not find mjpeg input format\n"); +- goto out_close_avio; +- } +- +- ret = avformat_open_input(&src_ctx, NULL, ifmt, NULL); +- if (ret < 0) ++ // For non-file input, artwork_get() will also fail if no rescaling is required ++ ret = artwork_get(ctx->evbuf, NULL, evbuf, ctx->max_w, ctx->max_h); ++ if (ret == ART_E_ERROR) + { +- DPRINTF(E_LOG, L_ART, "Could not open input\n"); +- goto out_close_avio; +- } +- +- ret = avformat_find_stream_info(src_ctx, NULL); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not find stream info\n"); +- goto out_close_input; +- } +- +- ret = rescale_needed(src_ctx->streams[0]->codec, ctx->max_w, ctx->max_h, &target_w, &target_h); +- if (!ret) +- ret = evbuffer_add_buffer(ctx->evbuf, raw); +- else +- ret = artwork_rescale(ctx->evbuf, src_ctx, 0, target_w, target_h); +- if (ret < 0) +- { +- DPRINTF(E_LOG, L_ART, "Could not add or rescale image to output evbuf\n"); +- goto out_close_input; ++ DPRINTF(E_DBG, L_ART, "Not rescaling Spotify image\n"); ++ ret = evbuffer_add_buffer(ctx->evbuf, raw); ++ if (ret < 0) ++ { ++ DPRINTF(E_LOG, L_ART, "Could not add or rescale image to output evbuf\n"); ++ goto out_free_evbuf; ++ } + } + +- avformat_close_input(&src_ctx); +- avio_evbuffer_close(avio); + evbuffer_free(evbuf); + evbuffer_free(raw); + + return ART_FMT_JPEG; + +- out_close_input: +- avformat_close_input(&src_ctx); +- out_close_avio: +- avio_evbuffer_close(avio); +- out_free_ctx: +- if (src_ctx) +- avformat_free_context(src_ctx); + out_free_evbuf: + evbuffer_free(evbuf); + evbuffer_free(raw); + + return ART_E_ERROR; +- + } + #else + static int +Index: forked-daapd-25.0.new/src/avio_evbuffer.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/avio_evbuffer.c ++++ forked-daapd-25.0.new/src/avio_evbuffer.c +@@ -125,6 +125,9 @@ avio_evbuffer_close(AVIOContext *s) + { + struct avio_evbuffer *ae; + ++ if (!s) ++ return; ++ + ae = (struct avio_evbuffer *)s->opaque; + + avio_flush(s); +Index: forked-daapd-25.0.new/src/transcode.c +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.c ++++ forked-daapd-25.0.new/src/transcode.c +@@ -70,6 +70,9 @@ struct settings_ctx + // Output format (for the muxer) + const char *format; + ++ // Input format (for the demuxer) ++ const char *in_format; ++ + // Audio settings + enum AVCodecID audio_codec; + const char *audio_codec_name; +@@ -111,6 +114,9 @@ struct decode_ctx + // Input format context + AVFormatContext *ifmt_ctx; + ++ // IO Context for non-file input ++ AVIOContext *avio; ++ + // Stream and decoder data + struct stream_ctx audio_stream; + struct stream_ctx video_stream; +@@ -219,6 +225,7 @@ init_settings(struct settings_ctx *setti + settings->encode_video = 1; + settings->silent = 1; + settings->format = "image2"; ++ settings->in_format = "mjpeg"; + settings->video_codec = AV_CODEC_ID_MJPEG; + break; + +@@ -713,10 +720,11 @@ open_decoder(unsigned int *stream_index, + * + */ + static int +-open_input(struct decode_ctx *ctx, const char *path) ++open_input(struct decode_ctx *ctx, const char *path, struct evbuffer *evbuf) + { + AVDictionary *options = NULL; + AVCodecContext *dec_ctx; ++ AVInputFormat *ifmt; + unsigned int stream_index; + int ret; + +@@ -736,7 +744,24 @@ open_input(struct decode_ctx *ctx, const + ctx->ifmt_ctx->interrupt_callback.opaque = ctx; + ctx->timestamp = av_gettime(); + +- ret = avformat_open_input(&ctx->ifmt_ctx, path, NULL, &options); ++ if (evbuf) ++ { ++ ifmt = av_find_input_format(ctx->settings.in_format); ++ if (!ifmt) ++ { ++ DPRINTF(E_LOG, L_XCODE, "Could not find input format: '%s'\n", ctx->settings.in_format); ++ return -1; ++ } ++ ++ CHECK_NULL(L_XCODE, ctx->avio = avio_input_evbuffer_open(evbuf)); ++ ++ ctx->ifmt_ctx->pb = ctx->avio; ++ ret = avformat_open_input(&ctx->ifmt_ctx, NULL, ifmt, &options); ++ } ++ else ++ { ++ ret = avformat_open_input(&ctx->ifmt_ctx, path, NULL, &options); ++ } + + if (options) + av_dict_free(&options); +@@ -783,6 +808,7 @@ open_input(struct decode_ctx *ctx, const + return 0; + + out_fail: ++ avio_evbuffer_close(ctx->avio); + avcodec_free_context(&ctx->audio_stream.codec); + avcodec_free_context(&ctx->video_stream.codec); + avformat_close_input(&ctx->ifmt_ctx); +@@ -793,6 +819,7 @@ open_input(struct decode_ctx *ctx, const + static void + close_input(struct decode_ctx *ctx) + { ++ avio_evbuffer_close(ctx->avio); + avcodec_free_context(&ctx->audio_stream.codec); + avcodec_free_context(&ctx->video_stream.codec); + avformat_close_input(&ctx->ifmt_ctx); +@@ -1076,7 +1103,7 @@ close_filters(struct encode_ctx *ctx) + /* Setup */ + + struct decode_ctx * +-transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length) ++transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, struct evbuffer *evbuf, uint32_t song_length) + { + struct decode_ctx *ctx; + +@@ -1088,7 +1115,7 @@ transcode_decode_setup(enum transcode_pr + ctx->duration = song_length; + ctx->data_kind = data_kind; + +- if ((init_settings(&ctx->settings, profile) < 0) || (open_input(ctx, path) < 0)) ++ if ((init_settings(&ctx->settings, profile) < 0) || (open_input(ctx, path, evbuf) < 0)) + goto fail_free; + + return ctx; +@@ -1145,7 +1172,7 @@ transcode_setup(enum transcode_profile p + + CHECK_NULL(L_XCODE, ctx = calloc(1, sizeof(struct transcode_ctx))); + +- ctx->decode_ctx = transcode_decode_setup(profile, data_kind, path, song_length); ++ ctx->decode_ctx = transcode_decode_setup(profile, data_kind, path, NULL, song_length); + if (!ctx->decode_ctx) + { + free(ctx); +@@ -1405,8 +1432,6 @@ transcode_encode(struct evbuffer *evbuf, + + ret = evbuffer_get_length(ctx->obuf) - start_length; + +- // TODO Shouldn't we flush and let the muxer write the trailer now? +- + evbuffer_add_buffer(evbuf, ctx->obuf); + + return ret; +Index: forked-daapd-25.0.new/src/transcode.h +=================================================================== +--- forked-daapd-25.0.new.orig/src/transcode.h ++++ forked-daapd-25.0.new/src/transcode.h +@@ -25,7 +25,7 @@ struct transcode_ctx; + + // Setting up + struct decode_ctx * +-transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, uint32_t song_length); ++transcode_decode_setup(enum transcode_profile profile, enum data_kind data_kind, const char *path, struct evbuffer *evbuf, uint32_t song_length); + + struct encode_ctx * + transcode_encode_setup(enum transcode_profile profile, struct decode_ctx *src_ctx, off_t *est_size, int width, int height); diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-8.patch forked-daapd-25.0/debian/patches/ffmpeg4-8.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-8.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-8.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,30 @@ +commit 6951639d24b1dcb0a1d9c48fdd44032f3acbf2f4 +Author: ejurgensen +Date: Wed Mar 1 22:32:41 2017 +0100 + + [transcode] Adjustments for libav 12 + +diff --git a/src/transcode.c b/src/transcode.c +index bbf59cee..dd6c322e 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -991,8 +991,8 @@ open_filter(struct stream_ctx *out_stream, struct stream_ctx *in_stream) + } + + snprintf(args, sizeof(args), +- "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", +- in_stream->codec->width, in_stream->codec->height, in_stream->codec->pix_fmt, ++ "width=%d:height=%d:pix_fmt=%s:time_base=%d/%d:sar=%d/%d", ++ in_stream->codec->width, in_stream->codec->height, av_get_pix_fmt_name(in_stream->codec->pix_fmt), + in_stream->stream->time_base.num, in_stream->stream->time_base.den, + in_stream->codec->sample_aspect_ratio.num, in_stream->codec->sample_aspect_ratio.den); + +@@ -1014,7 +1014,7 @@ open_filter(struct stream_ctx *out_stream, struct stream_ctx *in_stream) + } + + snprintf(args, sizeof(args), +- "width=%d:height=%d", out_stream->codec->width, out_stream->codec->height); ++ "w=%d:h=%d", out_stream->codec->width, out_stream->codec->height); + + ret = avfilter_graph_create_filter(&scale_ctx, scale, "scale", args, NULL, filter_graph); + if (ret < 0) diff -Nru forked-daapd-25.0/debian/patches/ffmpeg4-9.patch forked-daapd-25.0/debian/patches/ffmpeg4-9.patch --- forked-daapd-25.0/debian/patches/ffmpeg4-9.patch 1970-01-01 00:00:00.000000000 +0000 +++ forked-daapd-25.0/debian/patches/ffmpeg4-9.patch 2018-07-26 13:42:48.000000000 +0000 @@ -0,0 +1,49 @@ +commit f9375ef915fb44a926c772fa9e4c6fcfb59135ac +Author: ejurgensen +Date: Sat Mar 4 09:40:29 2017 +0100 + + [transcode] More adjustments for libav 12 + +diff --git a/src/transcode.c b/src/transcode.c +index dd6c322e..4df09e5a 100644 +--- a/src/transcode.c ++++ b/src/transcode.c +@@ -33,8 +33,8 @@ + #include + #include + #include +- +-#include "ffmpeg-compat.h" ++#include ++#include + + #include "logger.h" + #include "conffile.h" +@@ -827,18 +827,22 @@ close_input(struct decode_ctx *ctx) + static int + open_output(struct encode_ctx *ctx, struct decode_ctx *src_ctx) + { ++ AVOutputFormat *oformat; + int ret; + +- ctx->ofmt_ctx = NULL; +- avformat_alloc_output_context2(&ctx->ofmt_ctx, NULL, ctx->settings.format, NULL); +- if (!ctx->ofmt_ctx) ++ oformat = av_guess_format(ctx->settings.format, NULL, NULL); ++ if (!oformat) + { +- DPRINTF(E_LOG, L_XCODE, "Could not create output context\n"); ++ DPRINTF(E_LOG, L_XCODE, "ffmpeg/libav could not find the '%s' output format\n", ctx->settings.format); + return -1; + } + + // Clear AVFMT_NOFILE bit, it is not allowed as we will set our own AVIOContext +- ctx->ofmt_ctx->oformat->flags = ~AVFMT_NOFILE; ++ oformat->flags = ~AVFMT_NOFILE; ++ ++ CHECK_NULL(L_XCODE, ctx->ofmt_ctx = avformat_alloc_context()); ++ ++ ctx->ofmt_ctx->oformat = oformat; + + ctx->obuf = evbuffer_new(); + if (!ctx->obuf) diff -Nru forked-daapd-25.0/debian/patches/series forked-daapd-25.0/debian/patches/series --- forked-daapd-25.0/debian/patches/series 2017-10-22 22:46:13.000000000 +0000 +++ forked-daapd-25.0/debian/patches/series 2018-07-26 13:42:48.000000000 +0000 @@ -1 +1,12 @@ 0001-raop-Restore-ATV4-tvOS11-support-by-removing-needles.patch +ffmpeg4-1.patch +ffmpeg4-2.patch +ffmpeg4-3.patch +ffmpeg4-4.patch +ffmpeg4-5.patch +ffmpeg4-6.patch +ffmpeg4-7.patch +ffmpeg4-8.patch +ffmpeg4-9.patch +ffmpeg4-10.patch +ffmpeg4-11.patch diff -Nru forked-daapd-25.0/debian/rules forked-daapd-25.0/debian/rules --- forked-daapd-25.0/debian/rules 2017-10-22 22:46:13.000000000 +0000 +++ forked-daapd-25.0/debian/rules 2018-07-26 13:42:48.000000000 +0000 @@ -35,3 +35,4 @@ override_dh_clean: dh_clean rm -f debian/forked-daapd.service + rm -f config.log src/RSP*.h src/RSP*.c src/RSP*.tokens src/RSP*.u src/DAAP*.h src/DAAP*.c src/DAAP*.tokens src/DAAP*.u src/SMARTPL*.h src/SMARTPL*.c src/SMARTPL*.tokens src/SMARTPL*.u src/*hash.h build-aux/config.sub build-aux/config.guess