Browse Source

add muilt decoder support modify wasm build for fpmeg4.11

m7s
xiangxud 3 years ago
parent
commit
7eec81de7f
  1. 1
      a.out.js
  2. BIN
      a.out.wasm
  3. 8
      build_decoder_wasm.sh
  4. 2
      dist/libffmpeg_265.js
  5. BIN
      dist/libffmpeg_265.wasm
  6. 1200
      ffmpeg/include/libavcodec/avcodec.h
  7. 69
      ffmpeg/include/libavcodec/bsf.h
  8. 157
      ffmpeg/include/libavcodec/codec.h
  9. 70
      ffmpeg/include/libavcodec/codec_id.h
  10. 17
      ffmpeg/include/libavcodec/codec_par.h
  11. 1
      ffmpeg/include/libavcodec/dv_profile.h
  12. 115
      ffmpeg/include/libavcodec/packet.h
  13. 4
      ffmpeg/include/libavcodec/qsv.h
  14. 86
      ffmpeg/include/libavcodec/vaapi.h
  15. 19
      ffmpeg/include/libavcodec/vdpau.h
  16. 135
      ffmpeg/include/libavcodec/version.h
  17. 3
      ffmpeg/include/libavcodec/xvmc.h
  18. 2907
      ffmpeg/include/libavformat/avformat.h
  19. 842
      ffmpeg/include/libavformat/avio.h
  20. 47
      ffmpeg/include/libavformat/version.h
  21. 53
      ffmpeg/include/libavformat/version_major.h
  22. 9
      ffmpeg/include/libavutil/adler32.h
  23. 1
      ffmpeg/include/libavutil/aes.h
  24. 1
      ffmpeg/include/libavutil/aes_ctr.h
  25. 2
      ffmpeg/include/libavutil/attributes.h
  26. 3
      ffmpeg/include/libavutil/audio_fifo.h
  27. 2
      ffmpeg/include/libavutil/avassert.h
  28. 6
      ffmpeg/include/libavutil/avutil.h
  29. 37
      ffmpeg/include/libavutil/buffer.h
  30. 575
      ffmpeg/include/libavutil/channel_layout.h
  31. 42
      ffmpeg/include/libavutil/common.h
  32. 33
      ffmpeg/include/libavutil/cpu.h
  33. 1
      ffmpeg/include/libavutil/crc.h
  34. 2
      ffmpeg/include/libavutil/dict.h
  35. 3
      ffmpeg/include/libavutil/display.h
  36. 166
      ffmpeg/include/libavutil/dovi_meta.h
  37. 2
      ffmpeg/include/libavutil/error.h
  38. 2
      ffmpeg/include/libavutil/eval.h
  39. 2
      ffmpeg/include/libavutil/ffversion.h
  40. 251
      ffmpeg/include/libavutil/fifo.h
  41. 3
      ffmpeg/include/libavutil/file.h
  42. 92
      ffmpeg/include/libavutil/film_grain_params.h
  43. 217
      ffmpeg/include/libavutil/frame.h
  44. 6
      ffmpeg/include/libavutil/hash.h
  45. 1
      ffmpeg/include/libavutil/hmac.h
  46. 4
      ffmpeg/include/libavutil/hwcontext.h
  47. 9
      ffmpeg/include/libavutil/hwcontext_d3d11va.h
  48. 13
      ffmpeg/include/libavutil/hwcontext_qsv.h
  49. 40
      ffmpeg/include/libavutil/hwcontext_videotoolbox.h
  50. 151
      ffmpeg/include/libavutil/hwcontext_vulkan.h
  51. 22
      ffmpeg/include/libavutil/imgutils.h
  52. 24
      ffmpeg/include/libavutil/log.h
  53. 30
      ffmpeg/include/libavutil/macros.h
  54. 9
      ffmpeg/include/libavutil/md5.h
  55. 46
      ffmpeg/include/libavutil/mem.h
  56. 6
      ffmpeg/include/libavutil/murmur3.h
  57. 33
      ffmpeg/include/libavutil/opt.h
  58. 32
      ffmpeg/include/libavutil/pixdesc.h
  59. 1
      ffmpeg/include/libavutil/pixelutils.h
  60. 76
      ffmpeg/include/libavutil/pixfmt.h
  61. 5
      ffmpeg/include/libavutil/ripemd.h
  62. 8
      ffmpeg/include/libavutil/samplefmt.h
  63. 5
      ffmpeg/include/libavutil/sha.h
  64. 5
      ffmpeg/include/libavutil/sha512.h
  65. 1
      ffmpeg/include/libavutil/tree.h
  66. 84
      ffmpeg/include/libavutil/tx.h
  67. 48
      ffmpeg/include/libavutil/version.h
  68. 117
      ffmpeg/include/libswscale/swscale.h
  69. 17
      ffmpeg/include/libswscale/version.h
  70. BIN
      ffmpeg/lib/libavcodec.a
  71. BIN
      ffmpeg/lib/libavformat.a
  72. BIN
      ffmpeg/lib/libavutil.a
  73. BIN
      ffmpeg/lib/libswscale.a
  74. 10
      ffmpeg/lib/pkgconfig/libavcodec.pc
  75. 14
      ffmpeg/lib/pkgconfig/libavformat.pc
  76. 10
      ffmpeg/lib/pkgconfig/libavutil.pc
  77. 10
      ffmpeg/lib/pkgconfig/libswscale.pc
  78. 4
      ffmpeg/share/ffmpeg/examples/decode_audio.c
  79. 12
      ffmpeg/share/ffmpeg/examples/decode_video.c
  80. 8
      ffmpeg/share/ffmpeg/examples/demuxing_decoding.c
  81. 30
      ffmpeg/share/ffmpeg/examples/encode_audio.c
  82. 24
      ffmpeg/share/ffmpeg/examples/encode_video.c
  83. 25
      ffmpeg/share/ffmpeg/examples/extract_mvs.c
  84. 13
      ffmpeg/share/ffmpeg/examples/filter_audio.c
  85. 36
      ffmpeg/share/ffmpeg/examples/filtering_audio.c
  86. 18
      ffmpeg/share/ffmpeg/examples/filtering_video.c
  87. 24
      ffmpeg/share/ffmpeg/examples/hw_decode.c
  88. 2
      ffmpeg/share/ffmpeg/examples/metadata.c
  89. 73
      ffmpeg/share/ffmpeg/examples/muxing.c
  90. 59
      ffmpeg/share/ffmpeg/examples/qsvdec.c
  91. 45
      ffmpeg/share/ffmpeg/examples/remuxing.c
  92. 16
      ffmpeg/share/ffmpeg/examples/resampling_audio.c
  93. 64
      ffmpeg/share/ffmpeg/examples/transcode_aac.c
  94. 29
      ffmpeg/share/ffmpeg/examples/transcoding.c
  95. 2
      ffmpeg/share/ffmpeg/examples/vaapi_encode.c
  96. 6
      ffmpeg/share/ffmpeg/examples/vaapi_transcode.c
  97. 1
      libffmpeg_265_js
  98. BIN
      libffmpeg_265_wasm
  99. 1
      node_modules/.bin/mime
  100. 1
      node_modules/.bin/uglifyjs

1
a.out.js

File diff suppressed because one or more lines are too long

BIN
a.out.wasm

Binary file not shown.

8
build_decoder_wasm.sh

@ -16,8 +16,14 @@ emcc decode_video.c ffmpeg/lib/libavcodec.a ffmpeg/lib/libavutil.a ffmpeg/lib/li
-s TOTAL_MEMORY=${TOTAL_MEMORY} \ -s TOTAL_MEMORY=${TOTAL_MEMORY} \
-s EXPORTED_FUNCTIONS="${EXPORTED_FUNCTIONS}" \ -s EXPORTED_FUNCTIONS="${EXPORTED_FUNCTIONS}" \
-s EXPORTED_RUNTIME_METHODS="['addFunction']" \ -s EXPORTED_RUNTIME_METHODS="['addFunction']" \
-s RESERVED_FUNCTION_POINTERS=14 \ -s RESERVED_FUNCTION_POINTERS=18 \
-s FORCE_FILESYSTEM=1 \ -s FORCE_FILESYSTEM=1 \
-o dist/libffmpeg_$1.js -o dist/libffmpeg_$1.js
echo "Finished Build" echo "Finished Build"
rm test/libffmpeg_$1.wasm
rm test/libffmpeg_$1.js
cp dist/libffmpeg_$1.wasm ./test/
cp dist/libffmpeg_$1.js ./test/

2
dist/libffmpeg_265.js

File diff suppressed because one or more lines are too long

BIN
dist/libffmpeg_265.wasm

Binary file not shown.

1200
ffmpeg/include/libavcodec/avcodec.h

File diff suppressed because it is too large

69
ffmpeg/include/libavcodec/bsf.h

@ -30,31 +30,12 @@
#include "packet.h" #include "packet.h"
/** /**
* @defgroup lavc_bsf Bitstream filters * @addtogroup lavc_core
* @ingroup libavc
*
* Bitstream filters transform encoded media data without decoding it. This
* allows e.g. manipulating various header values. Bitstream filters operate on
* @ref AVPacket "AVPackets".
*
* The bitstream filtering API is centered around two structures:
* AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
* in abstract, the latter a specific filtering process. Obtain an
* AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
* it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
* AVBSFContext fields, as described in its documentation, then call
* av_bsf_init() to prepare the filter context for use.
*
* Submit packets for filtering using av_bsf_send_packet(), obtain filtered
* results with av_bsf_receive_packet(). When no more input packets will be
* sent, submit a NULL AVPacket to signal the end of the stream to the filter.
* av_bsf_receive_packet() will then return trailing packets, if any are
* produced by the filter.
*
* Finally, free the filter context with av_bsf_free().
* @{ * @{
*/ */
typedef struct AVBSFInternal AVBSFInternal;
/** /**
* The bitstream filter state. * The bitstream filter state.
* *
@ -76,6 +57,12 @@ typedef struct AVBSFContext {
*/ */
const struct AVBitStreamFilter *filter; const struct AVBitStreamFilter *filter;
/**
* Opaque libavcodec internal data. Must not be touched by the caller in any
* way.
*/
AVBSFInternal *internal;
/** /**
* Opaque filter-specific private data. If filter->priv_class is non-NULL, * Opaque filter-specific private data. If filter->priv_class is non-NULL,
* this is an AVOptions-enabled struct. * this is an AVOptions-enabled struct.
@ -128,6 +115,20 @@ typedef struct AVBitStreamFilter {
* code to this class. * code to this class.
*/ */
const AVClass *priv_class; const AVClass *priv_class;
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavcodec and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
int priv_data_size;
int (*init)(AVBSFContext *ctx);
int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
void (*close)(AVBSFContext *ctx);
void (*flush)(AVBSFContext *ctx);
} AVBitStreamFilter; } AVBitStreamFilter;
/** /**
@ -153,9 +154,9 @@ const AVBitStreamFilter *av_bsf_iterate(void **opaque);
* av_bsf_init() before sending any data to the filter. * av_bsf_init() before sending any data to the filter.
* *
* @param filter the filter for which to allocate an instance. * @param filter the filter for which to allocate an instance.
* @param[out] ctx a pointer into which the pointer to the newly-allocated context * @param ctx a pointer into which the pointer to the newly-allocated context
* will be written. It must be freed with av_bsf_free() after the * will be written. It must be freed with av_bsf_free() after the
* filtering is done. * filtering is done.
* *
* @return 0 on success, a negative AVERROR code on failure * @return 0 on success, a negative AVERROR code on failure
*/ */
@ -181,11 +182,9 @@ int av_bsf_init(AVBSFContext *ctx);
* sending more empty packets does nothing) and will cause the filter to output * sending more empty packets does nothing) and will cause the filter to output
* any packets it may have buffered internally. * any packets it may have buffered internally.
* *
* @return * @return 0 on success. AVERROR(EAGAIN) if packets need to be retrieved from the
* - 0 on success. * filter (using av_bsf_receive_packet()) before new input can be consumed. Another
* - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using * negative AVERROR value if an error occurs.
* av_bsf_receive_packet()) before new input can be consumed.
* - Another negative AVERROR value if an error occurs.
*/ */
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
@ -202,12 +201,10 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
* overwritten by the returned data. On failure, pkt is not * overwritten by the returned data. On failure, pkt is not
* touched. * touched.
* *
* @return * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the
* - 0 on success. * filter (using av_bsf_send_packet()) to get more output. AVERROR_EOF if there
* - AVERROR(EAGAIN) if more packets need to be sent to the filter (using * will be no further output from the filter. Another negative AVERROR value if
* av_bsf_send_packet()) to get more output. * an error occurs.
* - AVERROR_EOF if there will be no further output from the filter.
* - Another negative AVERROR value if an error occurs.
* *
* @note one input packet may result in several output packets, so after sending * @note one input packet may result in several output packets, so after sending
* a packet with av_bsf_send_packet(), this function needs to be called * a packet with av_bsf_send_packet(), this function needs to be called

157
ffmpeg/include/libavcodec/codec.h

@ -31,7 +31,7 @@
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#include "libavcodec/codec_id.h" #include "libavcodec/codec_id.h"
#include "libavcodec/version_major.h" #include "libavcodec/version.h"
/** /**
* @addtogroup lavc_core * @addtogroup lavc_core
@ -50,12 +50,7 @@
* avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer. * avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
*/ */
#define AV_CODEC_CAP_DR1 (1 << 1) #define AV_CODEC_CAP_DR1 (1 << 1)
#if FF_API_FLAG_TRUNCATED
/**
* @deprecated Use parsers to always send proper frames.
*/
#define AV_CODEC_CAP_TRUNCATED (1 << 3) #define AV_CODEC_CAP_TRUNCATED (1 << 3)
#endif
/** /**
* Encoder or decoder requires flushing with NULL input at the end in order to * Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output. * give the complete and correct output.
@ -182,14 +177,6 @@
*/ */
#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21) #define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
/**
* The encoder is able to output reconstructed frame data, i.e. raw frames that
* would be produced by decoding the encoded bitstream.
*
* Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME flag.
*/
#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
/** /**
* AVProfile. * AVProfile.
*/ */
@ -198,6 +185,12 @@ typedef struct AVProfile {
const char *name; ///< short name for the profile const char *name; ///< short name for the profile
} AVProfile; } AVProfile;
typedef struct AVCodecDefault AVCodecDefault;
struct AVCodecContext;
struct AVSubtitle;
struct AVPacket;
/** /**
* AVCodec. * AVCodec.
*/ */
@ -221,18 +214,12 @@ typedef struct AVCodec {
* see AV_CODEC_CAP_* * see AV_CODEC_CAP_*
*/ */
int capabilities; int capabilities;
uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
#if FF_API_OLD_CHANNEL_LAYOUT
/**
* @deprecated use ch_layouts instead
*/
attribute_deprecated
const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
#endif uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
const AVClass *priv_class; ///< AVClass for the private context const AVClass *priv_class; ///< AVClass for the private context
const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
@ -248,10 +235,117 @@ typedef struct AVCodec {
*/ */
const char *wrapper_name; const char *wrapper_name;
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavcodec and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
int priv_data_size;
#if FF_API_NEXT
struct AVCodec *next;
#endif
/**
* @name Frame-level threading support functions
* @{
*/
/**
* Copy necessary context variables from a previous thread context to the current one.
* If not defined, the next thread will start automatically; otherwise, the codec
* must call ff_thread_finish_setup().
*
* dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
*/
int (*update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src);
/** @} */
/**
* Private codec-specific defaults.
*/
const AVCodecDefault *defaults;
/**
* Initialize codec static data, called from av_codec_iterate().
*
* This is not intended for time consuming operations as it is
* run for every codec regardless of that codec being used.
*/
void (*init_static_data)(struct AVCodec *codec);
int (*init)(struct AVCodecContext *);
int (*encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size,
const struct AVSubtitle *sub);
/**
* Encode data to an AVPacket.
*
* @param avctx codec context
* @param avpkt output AVPacket
* @param[in] frame AVFrame containing the raw data to be encoded
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
* non-empty packet was returned in avpkt.
* @return 0 on success, negative error code on failure
*/
int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
const struct AVFrame *frame, int *got_packet_ptr);
/**
* Decode picture or subtitle data.
*
* @param avctx codec context
* @param outdata codec type dependent output struct
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
* non-empty frame or subtitle was returned in
* outdata.
* @param[in] avpkt AVPacket containing the data to be decoded
* @return amount of bytes read from the packet on success, negative error
* code on failure
*/
int (*decode)(struct AVCodecContext *avctx, void *outdata,
int *got_frame_ptr, struct AVPacket *avpkt);
int (*close)(struct AVCodecContext *);
/**
* Encode API with decoupled frame/packet dataflow. This function is called
* to get one output packet. It should call ff_encode_get_frame() to obtain
* input data.
*/
int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
/**
* Decode API with decoupled packet/frame dataflow. This function is called
* to get one output frame. It should call ff_decode_get_packet() to obtain
* input data.
*/
int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* Flush buffers.
* Will be called when seeking
*/
void (*flush)(struct AVCodecContext *);
/**
* Internal codec capabilities.
* See FF_CODEC_CAP_* in internal.h
*/
int caps_internal;
/**
* Decoding only, a comma-separated list of bitstream filters to apply to
* packets before decoding.
*/
const char *bsfs;
/**
* Array of pointers to hardware configurations supported by the codec,
* or NULL if no hardware supported. The array is terminated by a NULL
* pointer.
*
* The user can only access this field via avcodec_get_hw_config().
*/
const struct AVCodecHWConfigInternal *const *hw_configs;
/** /**
* Array of supported channel layouts, terminated with a zeroed layout. * List of supported codec_tags, terminated by FF_CODEC_TAGS_END.
*/ */
const AVChannelLayout *ch_layouts; const uint32_t *codec_tags;
} AVCodec; } AVCodec;
/** /**
@ -271,7 +365,7 @@ const AVCodec *av_codec_iterate(void **opaque);
* @param id AVCodecID of the requested decoder * @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise. * @return A decoder if one was found, NULL otherwise.
*/ */
const AVCodec *avcodec_find_decoder(enum AVCodecID id); AVCodec *avcodec_find_decoder(enum AVCodecID id);
/** /**
* Find a registered decoder with the specified name. * Find a registered decoder with the specified name.
@ -279,7 +373,7 @@ const AVCodec *avcodec_find_decoder(enum AVCodecID id);
* @param name name of the requested decoder * @param name name of the requested decoder
* @return A decoder if one was found, NULL otherwise. * @return A decoder if one was found, NULL otherwise.
*/ */
const AVCodec *avcodec_find_decoder_by_name(const char *name); AVCodec *avcodec_find_decoder_by_name(const char *name);
/** /**
* Find a registered encoder with a matching codec ID. * Find a registered encoder with a matching codec ID.
@ -287,7 +381,7 @@ const AVCodec *avcodec_find_decoder_by_name(const char *name);
* @param id AVCodecID of the requested encoder * @param id AVCodecID of the requested encoder
* @return An encoder if one was found, NULL otherwise. * @return An encoder if one was found, NULL otherwise.
*/ */
const AVCodec *avcodec_find_encoder(enum AVCodecID id); AVCodec *avcodec_find_encoder(enum AVCodecID id);
/** /**
* Find a registered encoder with the specified name. * Find a registered encoder with the specified name.
@ -295,7 +389,7 @@ const AVCodec *avcodec_find_encoder(enum AVCodecID id);
* @param name name of the requested encoder * @param name name of the requested encoder
* @return An encoder if one was found, NULL otherwise. * @return An encoder if one was found, NULL otherwise.
*/ */
const AVCodec *avcodec_find_encoder_by_name(const char *name); AVCodec *avcodec_find_encoder_by_name(const char *name);
/** /**
* @return a non-zero number if codec is an encoder, zero otherwise * @return a non-zero number if codec is an encoder, zero otherwise
*/ */
@ -306,15 +400,6 @@ int av_codec_is_encoder(const AVCodec *codec);
*/ */
int av_codec_is_decoder(const AVCodec *codec); int av_codec_is_decoder(const AVCodec *codec);
/**
* Return a name for the specified profile, if available.
*
* @param codec the codec that is searched for the given profile
* @param profile the profile value for which a name is requested
* @return A name for the profile if found, NULL otherwise.
*/
const char *av_get_profile_name(const AVCodec *codec, int profile);
enum { enum {
/** /**
* The codec supports this format via the hw_device_ctx interface. * The codec supports this format via the hw_device_ctx interface.

70
ffmpeg/include/libavcodec/codec_id.h

@ -22,7 +22,6 @@
#define AVCODEC_CODEC_ID_H #define AVCODEC_CODEC_ID_H
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#include "libavutil/samplefmt.h"
/** /**
* @addtogroup lavc_core * @addtogroup lavc_core
@ -247,7 +246,8 @@ enum AVCodecID {
AV_CODEC_ID_MSP2, AV_CODEC_ID_MSP2,
AV_CODEC_ID_VVC, AV_CODEC_ID_VVC,
#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC #define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
AV_CODEC_ID_Y41P,
AV_CODEC_ID_Y41P = 0x8000,
AV_CODEC_ID_AVRP, AV_CODEC_ID_AVRP,
AV_CODEC_ID_012V, AV_CODEC_ID_012V,
AV_CODEC_ID_AVUI, AV_CODEC_ID_AVUI,
@ -307,13 +307,6 @@ enum AVCodecID {
AV_CODEC_ID_CRI, AV_CODEC_ID_CRI,
AV_CODEC_ID_SIMBIOSIS_IMX, AV_CODEC_ID_SIMBIOSIS_IMX,
AV_CODEC_ID_SGA_VIDEO, AV_CODEC_ID_SGA_VIDEO,
AV_CODEC_ID_GEM,
AV_CODEC_ID_VBN,
AV_CODEC_ID_JPEGXL,
AV_CODEC_ID_QOI,
AV_CODEC_ID_PHM,
AV_CODEC_ID_RADIANCE_HDR,
AV_CODEC_ID_WBMP,
/* various PCM "codecs" */ /* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -348,7 +341,8 @@ enum AVCodecID {
AV_CODEC_ID_PCM_S24LE_PLANAR, AV_CODEC_ID_PCM_S24LE_PLANAR,
AV_CODEC_ID_PCM_S32LE_PLANAR, AV_CODEC_ID_PCM_S32LE_PLANAR,
AV_CODEC_ID_PCM_S16BE_PLANAR, AV_CODEC_ID_PCM_S16BE_PLANAR,
AV_CODEC_ID_PCM_S64LE,
AV_CODEC_ID_PCM_S64LE = 0x10800,
AV_CODEC_ID_PCM_S64BE, AV_CODEC_ID_PCM_S64BE,
AV_CODEC_ID_PCM_F16LE, AV_CODEC_ID_PCM_F16LE,
AV_CODEC_ID_PCM_F24LE, AV_CODEC_ID_PCM_F24LE,
@ -387,7 +381,8 @@ enum AVCodecID {
AV_CODEC_ID_ADPCM_G722, AV_CODEC_ID_ADPCM_G722,
AV_CODEC_ID_ADPCM_IMA_APC, AV_CODEC_ID_ADPCM_IMA_APC,
AV_CODEC_ID_ADPCM_VIMA, AV_CODEC_ID_ADPCM_VIMA,
AV_CODEC_ID_ADPCM_AFC,
AV_CODEC_ID_ADPCM_AFC = 0x11800,
AV_CODEC_ID_ADPCM_IMA_OKI, AV_CODEC_ID_ADPCM_IMA_OKI,
AV_CODEC_ID_ADPCM_DTK, AV_CODEC_ID_ADPCM_DTK,
AV_CODEC_ID_ADPCM_IMA_RAD, AV_CODEC_ID_ADPCM_IMA_RAD,
@ -406,7 +401,6 @@ enum AVCodecID {
AV_CODEC_ID_ADPCM_IMA_MTF, AV_CODEC_ID_ADPCM_IMA_MTF,
AV_CODEC_ID_ADPCM_IMA_CUNNING, AV_CODEC_ID_ADPCM_IMA_CUNNING,
AV_CODEC_ID_ADPCM_IMA_MOFLEX, AV_CODEC_ID_ADPCM_IMA_MOFLEX,
AV_CODEC_ID_ADPCM_IMA_ACORN,
/* AMR */ /* AMR */
AV_CODEC_ID_AMR_NB = 0x12000, AV_CODEC_ID_AMR_NB = 0x12000,
@ -421,7 +415,8 @@ enum AVCodecID {
AV_CODEC_ID_INTERPLAY_DPCM, AV_CODEC_ID_INTERPLAY_DPCM,
AV_CODEC_ID_XAN_DPCM, AV_CODEC_ID_XAN_DPCM,
AV_CODEC_ID_SOL_DPCM, AV_CODEC_ID_SOL_DPCM,
AV_CODEC_ID_SDX2_DPCM,
AV_CODEC_ID_SDX2_DPCM = 0x14800,
AV_CODEC_ID_GREMLIN_DPCM, AV_CODEC_ID_GREMLIN_DPCM,
AV_CODEC_ID_DERF_DPCM, AV_CODEC_ID_DERF_DPCM,
@ -494,7 +489,8 @@ enum AVCodecID {
AV_CODEC_ID_ON2AVC, AV_CODEC_ID_ON2AVC,
AV_CODEC_ID_DSS_SP, AV_CODEC_ID_DSS_SP,
AV_CODEC_ID_CODEC2, AV_CODEC_ID_CODEC2,
AV_CODEC_ID_FFWAVESYNTH,
AV_CODEC_ID_FFWAVESYNTH = 0x15800,
AV_CODEC_ID_SONIC, AV_CODEC_ID_SONIC,
AV_CODEC_ID_SONIC_LS, AV_CODEC_ID_SONIC_LS,
AV_CODEC_ID_EVRC, AV_CODEC_ID_EVRC,
@ -521,8 +517,6 @@ enum AVCodecID {
AV_CODEC_ID_SIREN, AV_CODEC_ID_SIREN,
AV_CODEC_ID_HCA, AV_CODEC_ID_HCA,
AV_CODEC_ID_FASTAUDIO, AV_CODEC_ID_FASTAUDIO,
AV_CODEC_ID_MSNSIREN,
AV_CODEC_ID_DFPWM,
/* subtitle codecs */ /* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -535,7 +529,8 @@ enum AVCodecID {
AV_CODEC_ID_HDMV_PGS_SUBTITLE, AV_CODEC_ID_HDMV_PGS_SUBTITLE,
AV_CODEC_ID_DVB_TELETEXT, AV_CODEC_ID_DVB_TELETEXT,
AV_CODEC_ID_SRT, AV_CODEC_ID_SRT,
AV_CODEC_ID_MICRODVD,
AV_CODEC_ID_MICRODVD = 0x17800,
AV_CODEC_ID_EIA_608, AV_CODEC_ID_EIA_608,
AV_CODEC_ID_JACOSUB, AV_CODEC_ID_JACOSUB,
AV_CODEC_ID_SAMI, AV_CODEC_ID_SAMI,
@ -559,7 +554,7 @@ enum AVCodecID {
AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
AV_CODEC_ID_EPG, AV_CODEC_ID_EPG,
AV_CODEC_ID_BINTEXT, AV_CODEC_ID_BINTEXT = 0x18800,
AV_CODEC_ID_XBIN, AV_CODEC_ID_XBIN,
AV_CODEC_ID_IDF, AV_CODEC_ID_IDF,
AV_CODEC_ID_OTF, AV_CODEC_ID_OTF,
@ -590,45 +585,6 @@ enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
*/ */
const char *avcodec_get_name(enum AVCodecID id); const char *avcodec_get_name(enum AVCodecID id);
/**
* Return codec bits per sample.
*
* @param[in] codec_id the codec
* @return Number of bits per sample or zero if unknown for the given codec.
*/
int av_get_bits_per_sample(enum AVCodecID codec_id);
/**
* Return codec bits per sample.
* Only return non-zero if the bits per sample is exactly correct, not an
* approximation.
*
* @param[in] codec_id the codec
* @return Number of bits per sample or zero if unknown for the given codec.
*/
int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
/**
* Return a name for the specified profile, if available.
*
* @param codec_id the ID of the codec to which the requested profile belongs
* @param profile the profile value for which a name is requested
* @return A name for the profile if found, NULL otherwise.
*
* @note unlike av_get_profile_name(), which searches a list of profiles
* supported by a specific decoder or encoder implementation, this
* function searches the list of profiles from the AVCodecDescriptor
*/
const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
/**
* Return the PCM codec associated with a sample format.
* @param be endianness, 0 for little, 1 for big,
* -1 (or anything else) for native
* @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
*/
enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
/** /**
* @} * @}
*/ */

17
ffmpeg/include/libavcodec/codec_par.h

@ -24,7 +24,6 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#include "libavutil/channel_layout.h"
#include "libavutil/rational.h" #include "libavutil/rational.h"
#include "libavutil/pixfmt.h" #include "libavutil/pixfmt.h"
@ -155,22 +154,16 @@ typedef struct AVCodecParameters {
*/ */
int video_delay; int video_delay;
#if FF_API_OLD_CHANNEL_LAYOUT
/** /**
* Audio only. The channel layout bitmask. May be 0 if the channel layout is * Audio only. The channel layout bitmask. May be 0 if the channel layout is
* unknown or unspecified, otherwise the number of bits set must be equal to * unknown or unspecified, otherwise the number of bits set must be equal to
* the channels field. * the channels field.
* @deprecated use ch_layout
*/ */
attribute_deprecated
uint64_t channel_layout; uint64_t channel_layout;
/** /**
* Audio only. The number of audio channels. * Audio only. The number of audio channels.
* @deprecated use ch_layout.nb_channels
*/ */
attribute_deprecated
int channels; int channels;
#endif
/** /**
* Audio only. The number of audio samples per second. * Audio only. The number of audio samples per second.
*/ */
@ -205,11 +198,6 @@ typedef struct AVCodecParameters {
* Audio only. Number of samples to skip after a discontinuity. * Audio only. Number of samples to skip after a discontinuity.
*/ */
int seek_preroll; int seek_preroll;
/**
* Audio only. The channel layout and number of channels.
*/
AVChannelLayout ch_layout;
} AVCodecParameters; } AVCodecParameters;
/** /**
@ -233,11 +221,6 @@ void avcodec_parameters_free(AVCodecParameters **par);
*/ */
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);
/**
* This function is the same as av_get_audio_frame_duration(), except it works
* with AVCodecParameters instead of an AVCodecContext.
*/
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
/** /**
* @} * @}

1
ffmpeg/include/libavcodec/dv_profile.h

@ -23,6 +23,7 @@
#include "libavutil/pixfmt.h" #include "libavutil/pixfmt.h"
#include "libavutil/rational.h" #include "libavutil/rational.h"
#include "avcodec.h"
/* minimum number of bytes to read from a DV stream in order to /* minimum number of bytes to read from a DV stream in order to
* determine the profile */ * determine the profile */

115
ffmpeg/include/libavcodec/packet.h

@ -28,9 +28,8 @@
#include "libavutil/buffer.h" #include "libavutil/buffer.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/rational.h" #include "libavutil/rational.h"
#include "libavutil/version.h"
#include "libavcodec/version_major.h" #include "libavcodec/version.h"
/** /**
* @defgroup lavc_packet AVPacket * @defgroup lavc_packet AVPacket
@ -291,14 +290,6 @@ enum AVPacketSideDataType {
*/ */
AV_PKT_DATA_S12M_TIMECODE, AV_PKT_DATA_S12M_TIMECODE,
/**
* HDR10+ dynamic metadata associated with a video frame. The metadata is in
* the form of the AVDynamicHDRPlus struct and contains
* information for color volume transform - application 4 of
* SMPTE 2094-40:2016 standard.
*/
AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
/** /**
* The number of side data types. * The number of side data types.
* This is not part of the public API/ABI in the sense that it may * This is not part of the public API/ABI in the sense that it may
@ -314,7 +305,11 @@ enum AVPacketSideDataType {
typedef struct AVPacketSideData { typedef struct AVPacketSideData {
uint8_t *data; uint8_t *data;
#if FF_API_BUFFER_SIZE_T
int size;
#else
size_t size; size_t size;
#endif
enum AVPacketSideDataType type; enum AVPacketSideDataType type;
} AVPacketSideData; } AVPacketSideData;
@ -393,29 +388,15 @@ typedef struct AVPacket {
int64_t pos; ///< byte position in stream, -1 if unknown int64_t pos; ///< byte position in stream, -1 if unknown
#if FF_API_CONVERGENCE_DURATION
/** /**
* for some private data of the user * @deprecated Same as the duration field, but as int64_t. This was required
*/ * for Matroska subtitles, whose duration values could overflow when the
void *opaque; * duration field was still an int.
/**
* AVBufferRef for free use by the API user. FFmpeg will never check the
* contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
* the packet is unreferenced. av_packet_copy_props() calls create a new
* reference with av_buffer_ref() for the target packet's opaque_ref field.
*
* This is unrelated to the opaque field, although it serves a similar
* purpose.
*/
AVBufferRef *opaque_ref;
/**
* Time base of the packet's timestamps.
* In the future, this field may be set on packets output by encoders or
* demuxers, but its value will be by default ignored on input to decoders
* or muxers.
*/ */
AVRational time_base; attribute_deprecated
int64_t convergence_duration;
#endif
} AVPacket; } AVPacket;
#if FF_API_INIT_PACKET #if FF_API_INIT_PACKET
@ -448,13 +429,8 @@ typedef struct AVPacketList {
#define AV_PKT_FLAG_DISPOSABLE 0x0010 #define AV_PKT_FLAG_DISPOSABLE 0x0010
enum AVSideDataParamChangeFlags { enum AVSideDataParamChangeFlags {
#if FF_API_OLD_CHANNEL_LAYOUT
/**
* @deprecated those are not used by any decoder
*/
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
#endif
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
}; };
@ -553,6 +529,45 @@ int av_grow_packet(AVPacket *pkt, int grow_by);
*/ */
int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
#if FF_API_AVPACKET_OLD_API
/**
* @warning This is a hack - the packet memory allocation stuff is broken. The
* packet is allocated if it was not really allocated.
*
* @deprecated Use av_packet_ref or av_packet_make_refcounted
*/
attribute_deprecated
int av_dup_packet(AVPacket *pkt);
/**
* Copy packet, including contents
*
* @return 0 on success, negative AVERROR on fail
*
* @deprecated Use av_packet_ref
*/
attribute_deprecated
int av_copy_packet(AVPacket *dst, const AVPacket *src);
/**
* Copy packet side data
*
* @return 0 on success, negative AVERROR on fail
*
* @deprecated Use av_packet_copy_props
*/
attribute_deprecated
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
/**
* Free a packet.
*
* @deprecated Use av_packet_unref
*
* @param pkt packet to free
*/
attribute_deprecated
void av_free_packet(AVPacket *pkt);
#endif
/** /**
* Allocate new information of a packet. * Allocate new information of a packet.
* *
@ -562,7 +577,11 @@ int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
* @return pointer to fresh allocated data or NULL otherwise * @return pointer to fresh allocated data or NULL otherwise
*/ */
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
#if FF_API_BUFFER_SIZE_T
int size);
#else
size_t size); size_t size);
#endif
/** /**
* Wrap an existing array as a packet side data. * Wrap an existing array as a packet side data.
@ -589,7 +608,11 @@ int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
* @return 0 on success, < 0 on failure * @return 0 on success, < 0 on failure
*/ */
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
#if FF_API_BUFFER_SIZE_T
int size);
#else
size_t size); size_t size);
#endif
/** /**
* Get side information from packet. * Get side information from packet.
@ -601,7 +624,19 @@ int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
* @return pointer to data if present or NULL otherwise * @return pointer to data if present or NULL otherwise
*/ */
uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
#if FF_API_BUFFER_SIZE_T
int *size);
#else
size_t *size); size_t *size);
#endif
#if FF_API_MERGE_SD_API
attribute_deprecated
int av_packet_merge_side_data(AVPacket *pkt);
attribute_deprecated
int av_packet_split_side_data(AVPacket *pkt);
#endif
const char *av_packet_side_data_name(enum AVPacketSideDataType type); const char *av_packet_side_data_name(enum AVPacketSideDataType type);
@ -612,7 +647,11 @@ const char *av_packet_side_data_name(enum AVPacketSideDataType type);
* @param size pointer to store the size of the returned data * @param size pointer to store the size of the returned data
* @return pointer to data if successful, NULL otherwise * @return pointer to data if successful, NULL otherwise
*/ */
#if FF_API_BUFFER_SIZE_T
uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);
#else
uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size); uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size);
#endif
/** /**
* Unpack a dictionary from side_data. * Unpack a dictionary from side_data.
* *
@ -621,8 +660,12 @@ uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size);
* @param dict the metadata storage dictionary * @param dict the metadata storage dictionary
* @return 0 on success, < 0 on failure * @return 0 on success, < 0 on failure
*/ */
#if FF_API_BUFFER_SIZE_T
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);
#else
int av_packet_unpack_dictionary(const uint8_t *data, size_t size, int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
AVDictionary **dict); AVDictionary **dict);
#endif
/** /**
* Convenience function to free all the side data stored. * Convenience function to free all the side data stored.

4
ffmpeg/include/libavcodec/qsv.h

@ -21,7 +21,7 @@
#ifndef AVCODEC_QSV_H #ifndef AVCODEC_QSV_H
#define AVCODEC_QSV_H #define AVCODEC_QSV_H
#include <mfxvideo.h> #include <mfx/mfxvideo.h>
#include "libavutil/buffer.h" #include "libavutil/buffer.h"
@ -61,8 +61,6 @@ typedef struct AVQSVContext {
* required by the encoder and the user-provided value nb_opaque_surfaces. * required by the encoder and the user-provided value nb_opaque_surfaces.
* The array of the opaque surfaces will be exported to the caller through * The array of the opaque surfaces will be exported to the caller through
* the opaque_surfaces field. * the opaque_surfaces field.
*
* The caller must set this field to zero for oneVPL (MFX_VERSION >= 2.0)
*/ */
int opaque_alloc; int opaque_alloc;

86
ffmpeg/include/libavcodec/vaapi.h

@ -0,0 +1,86 @@
/*
* Video Acceleration API (shared data between FFmpeg and the video player)
* HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
*
* Copyright (C) 2008-2009 Splitted-Desktop Systems
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VAAPI_H
#define AVCODEC_VAAPI_H
/**
* @file
* @ingroup lavc_codec_hwaccel_vaapi
* Public libavcodec VA API header.
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "version.h"
#if FF_API_STRUCT_VAAPI_CONTEXT
/**
* @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
* @ingroup lavc_codec_hwaccel
* @{
*/
/**
* This structure is used to share data between the FFmpeg library and
* the client video application.
* This shall be zero-allocated and available as
* AVCodecContext.hwaccel_context. All user members can be set once
* during initialization or through each AVCodecContext.get_buffer()
* function call. In any case, they must be valid prior to calling
* decoding functions.
*
* Deprecated: use AVCodecContext.hw_frames_ctx instead.
*/
struct attribute_deprecated vaapi_context {
/**
* Window system dependent data
*
* - encoding: unused
* - decoding: Set by user
*/
void *display;
/**
* Configuration ID
*
* - encoding: unused
* - decoding: Set by user
*/
uint32_t config_id;
/**
* Context ID (video decode pipeline)
*
* - encoding: unused
* - decoding: Set by user
*/
uint32_t context_id;
};
/* @} */
#endif /* FF_API_STRUCT_VAAPI_CONTEXT */
#endif /* AVCODEC_VAAPI_H */

19
ffmpeg/include/libavcodec/vdpau.h

@ -55,6 +55,7 @@
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "avcodec.h" #include "avcodec.h"
#include "version.h"
struct AVCodecContext; struct AVCodecContext;
struct AVFrame; struct AVFrame;
@ -152,6 +153,24 @@ int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type,
*/ */
AVVDPAUContext *av_vdpau_alloc_context(void); AVVDPAUContext *av_vdpau_alloc_context(void);
#if FF_API_VDPAU_PROFILE
/**
* Get a decoder profile that should be used for initializing a VDPAU decoder.
* Should be called from the AVCodecContext.get_format() callback.
*
* @deprecated Use av_vdpau_bind_context() instead.
*
* @param avctx the codec context being used for decoding the stream
* @param profile a pointer into which the result will be written on success.
* The contents of profile are undefined if this function returns
* an error.
*
* @return 0 on success (non-negative), a negative AVERROR on failure.
*/
attribute_deprecated
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
#endif
/* @}*/ /* @}*/
#endif /* AVCODEC_VDPAU_H */ #endif /* AVCODEC_VDPAU_H */

135
ffmpeg/include/libavcodec/version.h

@ -27,10 +27,9 @@
#include "libavutil/version.h" #include "libavutil/version.h"
#include "version_major.h" #define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 134
#define LIBAVCODEC_VERSION_MINOR 42 #define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \ LIBAVCODEC_VERSION_MINOR, \
@ -42,4 +41,132 @@
#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) #define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*/
#ifndef FF_API_AVCTX_TIMEBASE
#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODED_FRAME
#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_SIDEDATA_ONLY_PKT
#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_VDPAU_PROFILE
#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CONVERGENCE_DURATION
#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVPICTURE
#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVPACKET_OLD_API
#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_RTP_CALLBACK
#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_VBV_DELAY
#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODER_TYPE
#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_STAT_BITS
#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_PRIVATE_OPT
#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_ASS_TIMING
#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_BSF
#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_COPY_CONTEXT
#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_GET_CONTEXT_DEFAULTS
#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_NVENC_OLD_NAME
#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_STRUCT_VAAPI_CONTEXT
#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_MERGE_SD_API
#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_TAG_STRING
#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_GETCHROMA
#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_CODEC_GET_SET
#define FF_API_CODEC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_USER_VISIBLE_AVHWACCEL
#define FF_API_USER_VISIBLE_AVHWACCEL (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LOCKMGR
#define FF_API_LOCKMGR (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_UNSANITIZED_BITRATES
#define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OPENH264_SLICE_MODE
#define FF_API_OPENH264_SLICE_MODE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OPENH264_CABAC
#define FF_API_OPENH264_CABAC (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_UNUSED_CODEC_CAPS
#define FF_API_UNUSED_CODEC_CAPS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVPRIV_PUT_BITS
#define FF_API_AVPRIV_PUT_BITS (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_OLD_ENCDEC
#define FF_API_OLD_ENCDEC (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_AVCODEC_PIX_FMT
#define FF_API_AVCODEC_PIX_FMT (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_MPV_RC_STRATEGY
#define FF_API_MPV_RC_STRATEGY (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_PARSER_CHANGE
#define FF_API_PARSER_CHANGE (LIBAVCODEC_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_THREAD_SAFE_CALLBACKS
#define FF_API_THREAD_SAFE_CALLBACKS (LIBAVCODEC_VERSION_MAJOR < 60)
#endif
#ifndef FF_API_DEBUG_MV
#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 60)
#endif
#ifndef FF_API_GET_FRAME_CLASS
#define FF_API_GET_FRAME_CLASS (LIBAVCODEC_VERSION_MAJOR < 60)
#endif
#ifndef FF_API_AUTO_THREADS
#define FF_API_AUTO_THREADS (LIBAVCODEC_VERSION_MAJOR < 60)
#endif
#ifndef FF_API_INIT_PACKET
#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 60)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */

3
ffmpeg/include/libavcodec/xvmc.h

@ -27,11 +27,10 @@
* Public libavcodec XvMC header. * Public libavcodec XvMC header.
*/ */
#pragma message("XvMC is no longer supported; this header is deprecated and will be removed")
#include <X11/extensions/XvMC.h> #include <X11/extensions/XvMC.h>
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "version.h"
#include "avcodec.h" #include "avcodec.h"
/** /**

2907
ffmpeg/include/libavformat/avformat.h

File diff suppressed because it is too large

842
ffmpeg/include/libavformat/avio.h

@ -1,842 +0,0 @@
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVIO_H
#define AVFORMAT_AVIO_H
/**
* @file
* @ingroup lavf_io
* Buffered I/O operations
*/
#include <stdint.h>
#include <stdio.h>
#include "libavutil/attributes.h"
#include "libavutil/dict.h"
#include "libavutil/log.h"
#include "libavformat/version_major.h"
/**
* Seeking works like for a local file.
*/
#define AVIO_SEEKABLE_NORMAL (1 << 0)
/**
* Seeking by timestamp with avio_seek_time() is possible.
*/
#define AVIO_SEEKABLE_TIME (1 << 1)
/**
* Callback for checking whether to abort blocking functions.
* AVERROR_EXIT is returned in this case by the interrupted
* function. During blocking operations, callback is called with
* opaque as parameter. If the callback returns 1, the
* blocking operation will be aborted.
*
* No members can be added to this struct without a major bump, if
* new elements have been added after this struct in AVFormatContext
* or AVIOContext.
*/
typedef struct AVIOInterruptCB {
int (*callback)(void*);
void *opaque;
} AVIOInterruptCB;
/**
* Directory entry types.
*/
enum AVIODirEntryType {
AVIO_ENTRY_UNKNOWN,
AVIO_ENTRY_BLOCK_DEVICE,
AVIO_ENTRY_CHARACTER_DEVICE,
AVIO_ENTRY_DIRECTORY,
AVIO_ENTRY_NAMED_PIPE,
AVIO_ENTRY_SYMBOLIC_LINK,
AVIO_ENTRY_SOCKET,
AVIO_ENTRY_FILE,
AVIO_ENTRY_SERVER,
AVIO_ENTRY_SHARE,
AVIO_ENTRY_WORKGROUP,
};
/**
* Describes single entry of the directory.
*
* Only name and type fields are guaranteed be set.
* Rest of fields are protocol or/and platform dependent and might be unknown.
*/
typedef struct AVIODirEntry {
char *name; /**< Filename */
int type; /**< Type of the entry */
int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise.
Name can be encoded with UTF-8 even though 0 is set. */
int64_t size; /**< File size in bytes, -1 if unknown. */
int64_t modification_timestamp; /**< Time of last modification in microseconds since unix
epoch, -1 if unknown. */
int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch,
-1 if unknown. */
int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix
epoch, -1 if unknown. */
int64_t user_id; /**< User ID of owner, -1 if unknown. */
int64_t group_id; /**< Group ID of owner, -1 if unknown. */
int64_t filemode; /**< Unix file mode, -1 if unknown. */
} AVIODirEntry;
typedef struct AVIODirContext {
struct URLContext *url_context;
} AVIODirContext;
/**
* Different data types that can be returned via the AVIO
* write_data_type callback.
*/
enum AVIODataMarkerType {
/**
* Header data; this needs to be present for the stream to be decodeable.
*/
AVIO_DATA_MARKER_HEADER,
/**
* A point in the output bytestream where a decoder can start decoding
* (i.e. a keyframe). A demuxer/decoder given the data flagged with
* AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT,
* should give decodeable results.
*/
AVIO_DATA_MARKER_SYNC_POINT,
/**
* A point in the output bytestream where a demuxer can start parsing
* (for non self synchronizing bytestream formats). That is, any
* non-keyframe packet start point.
*/
AVIO_DATA_MARKER_BOUNDARY_POINT,
/**
* This is any, unlabelled data. It can either be a muxer not marking
* any positions at all, it can be an actual boundary/sync point
* that the muxer chooses not to mark, or a later part of a packet/fragment
* that is cut into multiple write callbacks due to limited IO buffer size.
*/
AVIO_DATA_MARKER_UNKNOWN,
/**
* Trailer data, which doesn't contain actual content, but only for
* finalizing the output file.
*/
AVIO_DATA_MARKER_TRAILER,
/**
* A point in the output bytestream where the underlying AVIOContext might
* flush the buffer depending on latency or buffering requirements. Typically
* means the end of a packet.
*/
AVIO_DATA_MARKER_FLUSH_POINT,
};
/**
* Bytestream IO Context.
* New public fields can be added with minor version bumps.
* Removal, reordering and changes to existing public fields require
* a major version bump.
* sizeof(AVIOContext) must not be used outside libav*.
*
* @note None of the function pointers in AVIOContext should be called
* directly, they should only be set by the client application
* when implementing custom I/O. Normally these are set to the
* function pointers specified in avio_alloc_context()
*/
typedef struct AVIOContext {
/**
* A class for private options.
*
* If this AVIOContext is created by avio_open2(), av_class is set and
* passes the options down to protocols.
*
* If this AVIOContext is manually allocated, then av_class may be set by
* the caller.
*
* warning -- this field can be NULL, be sure to not pass this AVIOContext
* to any av_opt_* functions in that case.
*/
const AVClass *av_class;
/*
* The following shows the relationship between buffer, buf_ptr,
* buf_ptr_max, buf_end, buf_size, and pos, when reading and when writing
* (since AVIOContext is used for both):
*
**********************************************************************************
* READING
**********************************************************************************
*
* | buffer_size |
* |---------------------------------------|
* | |
*
* buffer buf_ptr buf_end
* +---------------+-----------------------+
* |/ / / / / / / /|/ / / / / / /| |
* read buffer: |/ / consumed / | to be read /| |
* |/ / / / / / / /|/ / / / / / /| |
* +---------------+-----------------------+
*
* pos
* +-------------------------------------------+-----------------+
* input file: | | |
* +-------------------------------------------+-----------------+
*
*
**********************************************************************************
* WRITING
**********************************************************************************
*
* | buffer_size |
* |--------------------------------------|
* | |
*
* buf_ptr_max
* buffer (buf_ptr) buf_end
* +-----------------------+--------------+
* |/ / / / / / / / / / / /| |
* write buffer: | / / to be flushed / / | |
* |/ / / / / / / / / / / /| |
* +-----------------------+--------------+
* buf_ptr can be in this
* due to a backward seek
*
* pos
* +-------------+----------------------------------------------+
* output file: | | |
* +-------------+----------------------------------------------+
*
*/
unsigned char *buffer; /**< Start of the buffer. */
int buffer_size; /**< Maximum buffer size */
unsigned char *buf_ptr; /**< Current position in the buffer */
unsigned char *buf_end; /**< End of the data, may be less than
buffer+buffer_size if the read function returned
less data than requested, e.g. for streams where
no more data has been received yet. */
void *opaque; /**< A private pointer, passed to the read/write/seek/...
functions. */
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
int64_t (*seek)(void *opaque, int64_t offset, int whence);
int64_t pos; /**< position in the file of the current buffer */
int eof_reached; /**< true if was unable to read due to error or eof */
int error; /**< contains the error code or 0 if no error happened */
int write_flag; /**< true if open for writing */
int max_packet_size;
int min_packet_size; /**< Try to buffer at least this amount of data
before flushing it. */
unsigned long checksum;
unsigned char *checksum_ptr;
unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
/**
* Pause or resume playback for network streaming protocols - e.g. MMS.
*/
int (*read_pause)(void *opaque, int pause);
/**
* Seek to a given timestamp in stream with the specified stream_index.
* Needed for some network streaming protocols which don't support seeking
* to byte position.
*/
int64_t (*read_seek)(void *opaque, int stream_index,
int64_t timestamp, int flags);
/**
* A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
*/
int seekable;
/**
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
int direct;
/**
* ',' separated list of allowed protocols.
*/
const char *protocol_whitelist;
/**
* ',' separated list of disallowed protocols.
*/
const char *protocol_blacklist;
/**
* A callback that is used instead of write_packet.
*/
int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size,
enum AVIODataMarkerType type, int64_t time);
/**
* If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT,
* but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly
* small chunks of data returned from the callback).
*/
int ignore_boundary_point;
#if FF_API_AVIOCONTEXT_WRITTEN
/**
* @deprecated field utilized privately by libavformat. For a public
* statistic of how many bytes were written out, see
* AVIOContext::bytes_written.
*/
attribute_deprecated
int64_t written;
#endif
/**
* Maximum reached position before a backward seek in the write buffer,
* used keeping track of already written data for a later flush.
*/
unsigned char *buf_ptr_max;
/**
* Read-only statistic of bytes read for this AVIOContext.
*/
int64_t bytes_read;
/**
* Read-only statistic of bytes written for this AVIOContext.
*/
int64_t bytes_written;
} AVIOContext;
/**
* Return the name of the protocol that will handle the passed URL.
*
* NULL is returned if no protocol could be found for the given URL.
*
* @return Name of the protocol or NULL.
*/
const char *avio_find_protocol_name(const char *url);
/**
* Return AVIO_FLAG_* access flags corresponding to the access permissions
* of the resource in url, or a negative value corresponding to an
* AVERROR code in case of failure. The returned access flags are
* masked by the value in flags.
*
* @note This function is intrinsically unsafe, in the sense that the
* checked resource may change its existence or permission status from
* one call to another. Thus you should not trust the returned value,
* unless you are sure that no other processes are accessing the
* checked resource.
*/
int avio_check(const char *url, int flags);
/**
* Open directory for reading.
*
* @param s directory read context. Pointer to a NULL pointer must be passed.
* @param url directory to be listed.
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dictionary
* containing options that were not found. May be NULL.
* @return >=0 on success or negative on error.
*/
int avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options);
/**
* Get next directory entry.
*
* Returned entry must be freed with avio_free_directory_entry(). In particular
* it may outlive AVIODirContext.
*
* @param s directory read context.
* @param[out] next next entry or NULL when no more entries.
* @return >=0 on success or negative on error. End of list is not considered an
* error.
*/
int avio_read_dir(AVIODirContext *s, AVIODirEntry **next);
/**
* Close directory.
*
* @note Entries created using avio_read_dir() are not deleted and must be
* freeded with avio_free_directory_entry().
*
* @param s directory read context.
* @return >=0 on success or negative on error.
*/
int avio_close_dir(AVIODirContext **s);
/**
* Free entry allocated by avio_read_dir().
*
* @param entry entry to be freed.
*/
void avio_free_directory_entry(AVIODirEntry **entry);
/**
* Allocate and initialize an AVIOContext for buffered I/O. It must be later
* freed with avio_context_free().
*
* @param buffer Memory block for input/output operations via AVIOContext.
* The buffer must be allocated with av_malloc() and friends.
* It may be freed and replaced with a new buffer by libavformat.
* AVIOContext.buffer holds the buffer currently in use,
* which must be later freed with av_free().
* @param buffer_size The buffer size is very important for performance.
* For protocols with fixed blocksize it should be set to this blocksize.
* For others a typical size is a cache page, e.g. 4kb.
* @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.
* @param opaque An opaque pointer to user-specific data.
* @param read_packet A function for refilling the buffer, may be NULL.
* For stream protocols, must never return 0 but rather
* a proper AVERROR code.
* @param write_packet A function for writing the buffer contents, may be NULL.
* The function may not change the input buffers content.
* @param seek A function for seeking to specified byte position, may be NULL.
*
* @return Allocated AVIOContext or NULL on failure.
*/
AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
/**
* Free the supplied IO context and everything associated with it.
*
* @param s Double pointer to the IO context. This function will write NULL
* into s.
*/
void avio_context_free(AVIOContext **s);
void avio_w8(AVIOContext *s, int b);
void avio_write(AVIOContext *s, const unsigned char *buf, int size);
void avio_wl64(AVIOContext *s, uint64_t val);
void avio_wb64(AVIOContext *s, uint64_t val);
void avio_wl32(AVIOContext *s, unsigned int val);
void avio_wb32(AVIOContext *s, unsigned int val);
void avio_wl24(AVIOContext *s, unsigned int val);
void avio_wb24(AVIOContext *s, unsigned int val);
void avio_wl16(AVIOContext *s, unsigned int val);
void avio_wb16(AVIOContext *s, unsigned int val);
/**
* Write a NULL-terminated string.
* @return number of bytes written.
*/
int avio_put_str(AVIOContext *s, const char *str);
/**
* Convert an UTF-8 string to UTF-16LE and write it.
* @param s the AVIOContext
* @param str NULL-terminated UTF-8 string
*
* @return number of bytes written.
*/
int avio_put_str16le(AVIOContext *s, const char *str);
/**
* Convert an UTF-8 string to UTF-16BE and write it.
* @param s the AVIOContext
* @param str NULL-terminated UTF-8 string
*
* @return number of bytes written.
*/
int avio_put_str16be(AVIOContext *s, const char *str);
/**
* Mark the written bytestream as a specific type.
*
* Zero-length ranges are omitted from the output.
*
* @param time the stream time the current bytestream pos corresponds to
* (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not
* applicable
* @param type the kind of data written starting at the current pos
*/
void avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type);
/**
* ORing this as the "whence" parameter to a seek function causes it to
* return the filesize without seeking anywhere. Supporting this is optional.
* If it is not supported then the seek function will return <0.
*/
#define AVSEEK_SIZE 0x10000
/**
* Passing this flag as the "whence" parameter to a seek function causes it to
* seek by any means (like reopening and linear reading) or other normally unreasonable
* means that can be extremely slow.
* This may be ignored by the seek code.
*/
#define AVSEEK_FORCE 0x20000
/**
* fseek() equivalent for AVIOContext.
* @return new position or AVERROR.
*/
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence);
/**
* Skip given number of bytes forward
* @return new position or AVERROR.
*/
int64_t avio_skip(AVIOContext *s, int64_t offset);
/**
* ftell() equivalent for AVIOContext.
* @return position or AVERROR.
*/
static av_always_inline int64_t avio_tell(AVIOContext *s)
{
return avio_seek(s, 0, SEEK_CUR);
}
/**
* Get the filesize.
* @return filesize or AVERROR
*/
int64_t avio_size(AVIOContext *s);
/**
* Similar to feof() but also returns nonzero on read errors.
* @return non zero if and only if at end of file or a read error happened when reading.
*/
int avio_feof(AVIOContext *s);
/**
* Writes a formatted string to the context taking a va_list.
* @return number of bytes written, < 0 on error.
*/
int avio_vprintf(AVIOContext *s, const char *fmt, va_list ap);
/**
* Writes a formatted string to the context.
* @return number of bytes written, < 0 on error.
*/
int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
/**
* Write a NULL terminated array of strings to the context.
* Usually you don't need to use this function directly but its macro wrapper,
* avio_print.
*/
void avio_print_string_array(AVIOContext *s, const char *strings[]);
/**
* Write strings (const char *) to the context.
* This is a convenience macro around avio_print_string_array and it
* automatically creates the string array from the variable argument list.
* For simple string concatenations this function is more performant than using
* avio_printf since it does not need a temporary buffer.
*/
#define avio_print(s, ...) \
avio_print_string_array(s, (const char*[]){__VA_ARGS__, NULL})
/**
* Force flushing of buffered data.
*
* For write streams, force the buffered data to be immediately written to the output,
* without to wait to fill the internal buffer.
*
* For read streams, discard all currently buffered data, and advance the
* reported file position to that of the underlying stream. This does not
* read new data, and does not perform any seeks.
*/
void avio_flush(AVIOContext *s);
/**
* Read size bytes from AVIOContext into buf.
* @return number of bytes read or AVERROR
*/
int avio_read(AVIOContext *s, unsigned char *buf, int size);
/**
* Read size bytes from AVIOContext into buf. Unlike avio_read(), this is allowed
* to read fewer bytes than requested. The missing bytes can be read in the next
* call. This always tries to read at least 1 byte.
* Useful to reduce latency in certain cases.
* @return number of bytes read or AVERROR
*/
int avio_read_partial(AVIOContext *s, unsigned char *buf, int size);
/**
* @name Functions for reading from AVIOContext
* @{
*
* @note return 0 if EOF, so you cannot use it if EOF handling is
* necessary
*/
int avio_r8 (AVIOContext *s);
unsigned int avio_rl16(AVIOContext *s);
unsigned int avio_rl24(AVIOContext *s);
unsigned int avio_rl32(AVIOContext *s);
uint64_t avio_rl64(AVIOContext *s);
unsigned int avio_rb16(AVIOContext *s);
unsigned int avio_rb24(AVIOContext *s);
unsigned int avio_rb32(AVIOContext *s);
uint64_t avio_rb64(AVIOContext *s);
/**
* @}
*/
/**
* Read a string from pb into buf. The reading will terminate when either
* a NULL character was encountered, maxlen bytes have been read, or nothing
* more can be read from pb. The result is guaranteed to be NULL-terminated, it
* will be truncated if buf is too small.
* Note that the string is not interpreted or validated in any way, it
* might get truncated in the middle of a sequence for multi-byte encodings.
*
* @return number of bytes read (is always <= maxlen).
* If reading ends on EOF or error, the return value will be one more than
* bytes actually read.
*/
int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);
/**
* Read a UTF-16 string from pb and convert it to UTF-8.
* The reading will terminate when either a null or invalid character was
* encountered or maxlen bytes have been read.
* @return number of bytes read (is always <= maxlen)
*/
int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);
int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);
/**
* @name URL open modes
* The flags argument to avio_open must be one of the following
* constants, optionally ORed with other flags.
* @{
*/
#define AVIO_FLAG_READ 1 /**< read-only */
#define AVIO_FLAG_WRITE 2 /**< write-only */
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */
/**
* @}
*/
/**
* Use non-blocking mode.
* If this flag is set, operations on the context will return
* AVERROR(EAGAIN) if they can not be performed immediately.
* If this flag is not set, operations on the context will never return
* AVERROR(EAGAIN).
* Note that this flag does not affect the opening/connecting of the
* context. Connecting a protocol will always block if necessary (e.g. on
* network protocols) but never hang (e.g. on busy devices).
* Warning: non-blocking protocols is work-in-progress; this flag may be
* silently ignored.
*/
#define AVIO_FLAG_NONBLOCK 8
/**
* Use direct mode.
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
#define AVIO_FLAG_DIRECT 0x8000
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param url resource to access
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open(AVIOContext **s, const char *url, int flags);
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param url resource to access
* @param flags flags which control how the resource indicated by url
* is to be opened
* @param int_cb an interrupt callback to be used at the protocols level
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dict containing options
* that were not found. May be NULL.
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open2(AVIOContext **s, const char *url, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options);
/**
* Close the resource accessed by the AVIOContext s and free it.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_closep
*/
int avio_close(AVIOContext *s);
/**
* Close the resource accessed by the AVIOContext *s, free it
* and set the pointer pointing to it to NULL.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_close
*/
int avio_closep(AVIOContext **s);
/**
* Open a write only memory stream.
*
* @param s new IO context
* @return zero if no error.
*/
int avio_open_dyn_buf(AVIOContext **s);
/**
* Return the written size and a pointer to the buffer.
* The AVIOContext stream is left intact.
* The buffer must NOT be freed.
* No padding is added to the buffer.
*
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
/**
* Return the written size and a pointer to the buffer. The buffer
* must be freed with av_free().
* Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer.
*
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
/**
* Iterate through names of available protocols.
*
* @param opaque A private pointer representing current protocol.
* It must be a pointer to NULL on first iteration and will
* be updated by successive calls to avio_enum_protocols.
* @param output If set to 1, iterate over output protocols,
* otherwise over input protocols.
*
* @return A static string containing the name of current protocol or NULL
*/
const char *avio_enum_protocols(void **opaque, int output);
/**
* Get AVClass by names of available protocols.
*
* @return A AVClass of input protocol name or NULL
*/
const AVClass *avio_protocol_get_class(const char *name);
/**
* Pause and resume playing - only meaningful if using a network streaming
* protocol (e.g. MMS).
*
* @param h IO context from which to call the read_pause function pointer
* @param pause 1 for pause, 0 for resume
*/
int avio_pause(AVIOContext *h, int pause);
/**
* Seek to a given timestamp relative to some component stream.
* Only meaningful if using a network streaming protocol (e.g. MMS.).
*
* @param h IO context from which to call the seek function pointers
* @param stream_index The stream index that the timestamp is relative to.
* If stream_index is (-1) the timestamp should be in AV_TIME_BASE
* units from the beginning of the presentation.
* If a stream_index >= 0 is used and the protocol does not support
* seeking based on component streams, the call will fail.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units.
* @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
* and AVSEEK_FLAG_ANY. The protocol may silently ignore
* AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
* fail if used and not supported.
* @return >= 0 on success
* @see AVInputFormat::read_seek
*/
int64_t avio_seek_time(AVIOContext *h, int stream_index,
int64_t timestamp, int flags);
/* Avoid a warning. The header can not be included because it breaks c++. */
struct AVBPrint;
/**
* Read contents of h into print buffer, up to max_size bytes, or up to EOF.
*
* @return 0 for success (max_size bytes read or EOF reached), negative error
* code otherwise
*/
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size);
/**
* Accept and allocate a client context on a server context.
* @param s the server context
* @param c the client context, must be unallocated
* @return >= 0 on success or a negative value corresponding
* to an AVERROR on failure
*/
int avio_accept(AVIOContext *s, AVIOContext **c);
/**
* Perform one step of the protocol handshake to accept a new client.
* This function must be called on a client returned by avio_accept() before
* using it as a read/write context.
* It is separate from avio_accept() because it may block.
* A step of the handshake is defined by places where the application may
* decide to change the proceedings.
* For example, on a protocol with a request header and a reply header, each
* one can constitute a step because the application may use the parameters
* from the request to change parameters in the reply; or each individual
* chunk of the request can constitute a step.
* If the handshake is already finished, avio_handshake() does nothing and
* returns 0 immediately.
*
* @param c the client context to perform the handshake on
* @return 0 on a complete and successful handshake
* > 0 if the handshake progressed, but is not complete
* < 0 for an AVERROR code
*/
int avio_handshake(AVIOContext *c);
#endif /* AVFORMAT_AVIO_H */

47
ffmpeg/include/libavformat/version.h

@ -1,47 +0,0 @@
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_VERSION_H
#define AVFORMAT_VERSION_H
/**
* @file
* @ingroup libavf
* Libavformat version macros
*/
#include "libavutil/version.h"
#include "version_major.h"
#define LIBAVFORMAT_VERSION_MINOR 30
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
#endif /* AVFORMAT_VERSION_H */

53
ffmpeg/include/libavformat/version_major.h

@ -1,53 +0,0 @@
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_VERSION_MAJOR_H
#define AVFORMAT_VERSION_MAJOR_H
/**
* @file
* @ingroup libavf
* Libavformat version macros
*/
// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium)
// Also please add any ticket numbers that you believe might be affected here
#define LIBAVFORMAT_VERSION_MAJOR 59
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*
*/
#define FF_API_LAVF_PRIV_OPT (LIBAVFORMAT_VERSION_MAJOR < 60)
#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 60)
#define FF_API_AVIOCONTEXT_WRITTEN (LIBAVFORMAT_VERSION_MAJOR < 60)
#define FF_HLS_TS_OPTIONS (LIBAVFORMAT_VERSION_MAJOR < 60)
#define FF_API_AVSTREAM_CLASS (LIBAVFORMAT_VERSION_MAJOR > 59)
#define FF_API_R_FRAME_RATE 1
#endif /* AVFORMAT_VERSION_MAJOR_H */

9
ffmpeg/include/libavutil/adler32.h

@ -30,6 +30,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_adler32 Adler-32 * @defgroup lavu_adler32 Adler-32
@ -39,7 +40,11 @@
* @{ * @{
*/ */
#if FF_API_CRYPTO_SIZE_T
typedef unsigned long AVAdler;
#else
typedef uint32_t AVAdler; typedef uint32_t AVAdler;
#endif
/** /**
* Calculate the Adler32 checksum of a buffer. * Calculate the Adler32 checksum of a buffer.
@ -54,7 +59,11 @@ typedef uint32_t AVAdler;
* @return updated checksum * @return updated checksum
*/ */
AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf, AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf,
#if FF_API_CRYPTO_SIZE_T
unsigned int len) av_pure;
#else
size_t len) av_pure; size_t len) av_pure;
#endif
/** /**
* @} * @}

1
ffmpeg/include/libavutil/aes.h

@ -24,6 +24,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_aes AES * @defgroup lavu_aes AES

1
ffmpeg/include/libavutil/aes_ctr.h

@ -25,6 +25,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
#define AES_CTR_KEY_SIZE (16) #define AES_CTR_KEY_SIZE (16)
#define AES_CTR_IV_SIZE (8) #define AES_CTR_IV_SIZE (8)

2
ffmpeg/include/libavutil/attributes.h

@ -110,7 +110,7 @@
* scheduled for removal. * scheduled for removal.
*/ */
#ifndef AV_NOWARN_DEPRECATED #ifndef AV_NOWARN_DEPRECATED
#if AV_GCC_VERSION_AT_LEAST(4,6) || defined(__clang__) #if AV_GCC_VERSION_AT_LEAST(4,6)
# define AV_NOWARN_DEPRECATED(code) \ # define AV_NOWARN_DEPRECATED(code) \
_Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \

3
ffmpeg/include/libavutil/audio_fifo.h

@ -27,7 +27,8 @@
#ifndef AVUTIL_AUDIO_FIFO_H #ifndef AVUTIL_AUDIO_FIFO_H
#define AVUTIL_AUDIO_FIFO_H #define AVUTIL_AUDIO_FIFO_H
#include "attributes.h" #include "avutil.h"
#include "fifo.h"
#include "samplefmt.h" #include "samplefmt.h"
/** /**

2
ffmpeg/include/libavutil/avassert.h

@ -28,8 +28,8 @@
#define AVUTIL_AVASSERT_H #define AVUTIL_AVASSERT_H
#include <stdlib.h> #include <stdlib.h>
#include "avutil.h"
#include "log.h" #include "log.h"
#include "macros.h"
/** /**
* assert() equivalent, that is always enabled. * assert() equivalent, that is always enabled.

6
ffmpeg/include/libavutil/avutil.h

@ -331,18 +331,12 @@ unsigned av_int_list_length_for_size(unsigned elsize,
#define av_int_list_length(list, term) \ #define av_int_list_length(list, term) \
av_int_list_length_for_size(sizeof(*(list)), list, term) av_int_list_length_for_size(sizeof(*(list)), list, term)
#if FF_API_AV_FOPEN_UTF8
/** /**
* Open a file using a UTF-8 filename. * Open a file using a UTF-8 filename.
* The API of this function matches POSIX fopen(), errors are returned through * The API of this function matches POSIX fopen(), errors are returned through
* errno. * errno.
* @deprecated Avoid using it, as on Windows, the FILE* allocated by this
* function may be allocated with a different CRT than the caller
* who uses the FILE*. No replacement provided in public API.
*/ */
attribute_deprecated
FILE *av_fopen_utf8(const char *path, const char *mode); FILE *av_fopen_utf8(const char *path, const char *mode);
#endif
/** /**
* Return the fractional representation of the internal time base. * Return the fractional representation of the internal time base.

37
ffmpeg/include/libavutil/buffer.h

@ -28,6 +28,8 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "version.h"
/** /**
* @defgroup lavu_buffer AVBuffer * @defgroup lavu_buffer AVBuffer
* @ingroup lavu_data * @ingroup lavu_data
@ -91,7 +93,11 @@ typedef struct AVBufferRef {
/** /**
* Size of data in bytes. * Size of data in bytes.
*/ */
#if FF_API_BUFFER_SIZE_T
int size;
#else
size_t size; size_t size;
#endif
} AVBufferRef; } AVBufferRef;
/** /**
@ -99,13 +105,21 @@ typedef struct AVBufferRef {
* *
* @return an AVBufferRef of given size or NULL when out of memory * @return an AVBufferRef of given size or NULL when out of memory
*/ */
#if FF_API_BUFFER_SIZE_T
AVBufferRef *av_buffer_alloc(int size);
#else
AVBufferRef *av_buffer_alloc(size_t size); AVBufferRef *av_buffer_alloc(size_t size);
#endif
/** /**
* Same as av_buffer_alloc(), except the returned buffer will be initialized * Same as av_buffer_alloc(), except the returned buffer will be initialized
* to zero. * to zero.
*/ */
#if FF_API_BUFFER_SIZE_T
AVBufferRef *av_buffer_allocz(int size);
#else
AVBufferRef *av_buffer_allocz(size_t size); AVBufferRef *av_buffer_allocz(size_t size);
#endif
/** /**
* Always treat the buffer as read-only, even when it has only one * Always treat the buffer as read-only, even when it has only one
@ -128,7 +142,11 @@ AVBufferRef *av_buffer_allocz(size_t size);
* *
* @return an AVBufferRef referring to data on success, NULL on failure. * @return an AVBufferRef referring to data on success, NULL on failure.
*/ */
#if FF_API_BUFFER_SIZE_T
AVBufferRef *av_buffer_create(uint8_t *data, int size,
#else
AVBufferRef *av_buffer_create(uint8_t *data, size_t size, AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
#endif
void (*free)(void *opaque, uint8_t *data), void (*free)(void *opaque, uint8_t *data),
void *opaque, int flags); void *opaque, int flags);
@ -145,7 +163,7 @@ void av_buffer_default_free(void *opaque, uint8_t *data);
* @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
* failure. * failure.
*/ */
AVBufferRef *av_buffer_ref(const AVBufferRef *buf); AVBufferRef *av_buffer_ref(AVBufferRef *buf);
/** /**
* Free a given reference and automatically free the buffer if there are no more * Free a given reference and automatically free the buffer if there are no more
@ -196,7 +214,11 @@ int av_buffer_make_writable(AVBufferRef **buf);
* reference to it (i.e. the one passed to this function). In all other cases * reference to it (i.e. the one passed to this function). In all other cases
* a new buffer is allocated and the data is copied. * a new buffer is allocated and the data is copied.
*/ */
#if FF_API_BUFFER_SIZE_T
int av_buffer_realloc(AVBufferRef **buf, int size);
#else
int av_buffer_realloc(AVBufferRef **buf, size_t size); int av_buffer_realloc(AVBufferRef **buf, size_t size);
#endif
/** /**
* Ensure dst refers to the same data as src. * Ensure dst refers to the same data as src.
@ -212,7 +234,7 @@ int av_buffer_realloc(AVBufferRef **buf, size_t size);
* @return 0 on success * @return 0 on success
* AVERROR(ENOMEM) on memory allocation failure. * AVERROR(ENOMEM) on memory allocation failure.
*/ */
int av_buffer_replace(AVBufferRef **dst, const AVBufferRef *src); int av_buffer_replace(AVBufferRef **dst, AVBufferRef *src);
/** /**
* @} * @}
@ -263,7 +285,11 @@ typedef struct AVBufferPool AVBufferPool;
* (av_buffer_alloc()). * (av_buffer_alloc()).
* @return newly created buffer pool on success, NULL on error. * @return newly created buffer pool on success, NULL on error.
*/ */
#if FF_API_BUFFER_SIZE_T
AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
#else
AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size)); AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size));
#endif
/** /**
* Allocate and initialize a buffer pool with a more complex allocator. * Allocate and initialize a buffer pool with a more complex allocator.
@ -280,8 +306,13 @@ AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size
* data. May be NULL. * data. May be NULL.
* @return newly created buffer pool on success, NULL on error. * @return newly created buffer pool on success, NULL on error.
*/ */
#if FF_API_BUFFER_SIZE_T
AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
AVBufferRef* (*alloc)(void *opaque, int size),
#else
AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque, AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
AVBufferRef* (*alloc)(void *opaque, size_t size), AVBufferRef* (*alloc)(void *opaque, size_t size),
#endif
void (*pool_free)(void *opaque)); void (*pool_free)(void *opaque));
/** /**
@ -313,7 +344,7 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
* therefore you have to use this function to access the original opaque * therefore you have to use this function to access the original opaque
* parameter of an allocated buffer. * parameter of an allocated buffer.
*/ */
void *av_buffer_pool_buffer_get_opaque(const AVBufferRef *ref); void *av_buffer_pool_buffer_get_opaque(AVBufferRef *ref);
/** /**
* @} * @}

575
ffmpeg/include/libavutil/channel_layout.h

@ -23,10 +23,6 @@
#define AVUTIL_CHANNEL_LAYOUT_H #define AVUTIL_CHANNEL_LAYOUT_H
#include <stdint.h> #include <stdint.h>
#include <stdlib.h>
#include "version.h"
#include "attributes.h"
/** /**
* @file * @file
@ -38,111 +34,6 @@
* @{ * @{
*/ */
enum AVChannel {
///< Invalid channel index
AV_CHAN_NONE = -1,
AV_CHAN_FRONT_LEFT,
AV_CHAN_FRONT_RIGHT,
AV_CHAN_FRONT_CENTER,
AV_CHAN_LOW_FREQUENCY,
AV_CHAN_BACK_LEFT,
AV_CHAN_BACK_RIGHT,
AV_CHAN_FRONT_LEFT_OF_CENTER,
AV_CHAN_FRONT_RIGHT_OF_CENTER,
AV_CHAN_BACK_CENTER,
AV_CHAN_SIDE_LEFT,
AV_CHAN_SIDE_RIGHT,
AV_CHAN_TOP_CENTER,
AV_CHAN_TOP_FRONT_LEFT,
AV_CHAN_TOP_FRONT_CENTER,
AV_CHAN_TOP_FRONT_RIGHT,
AV_CHAN_TOP_BACK_LEFT,
AV_CHAN_TOP_BACK_CENTER,
AV_CHAN_TOP_BACK_RIGHT,
/** Stereo downmix. */
AV_CHAN_STEREO_LEFT = 29,
/** See above. */
AV_CHAN_STEREO_RIGHT,
AV_CHAN_WIDE_LEFT,
AV_CHAN_WIDE_RIGHT,
AV_CHAN_SURROUND_DIRECT_LEFT,
AV_CHAN_SURROUND_DIRECT_RIGHT,
AV_CHAN_LOW_FREQUENCY_2,
AV_CHAN_TOP_SIDE_LEFT,
AV_CHAN_TOP_SIDE_RIGHT,
AV_CHAN_BOTTOM_FRONT_CENTER,
AV_CHAN_BOTTOM_FRONT_LEFT,
AV_CHAN_BOTTOM_FRONT_RIGHT,
/** Channel is empty can be safely skipped. */
AV_CHAN_UNUSED = 0x200,
/** Channel contains data, but its position is unknown. */
AV_CHAN_UNKNOWN = 0x300,
/**
* Range of channels between AV_CHAN_AMBISONIC_BASE and
* AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN system.
*
* Given a channel id <i> between AV_CHAN_AMBISONIC_BASE and
* AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel <n> is
* <n> = <i> - AV_CHAN_AMBISONIC_BASE.
*
* @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
* orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the channels
* implicitly by their position in the stream.
*/
AV_CHAN_AMBISONIC_BASE = 0x400,
// leave space for 1024 ids, which correspond to maximum order-32 harmonics,
// which should be enough for the foreseeable use cases
AV_CHAN_AMBISONIC_END = 0x7ff,
};
enum AVChannelOrder {
/**
* Only the channel count is specified, without any further information
* about the channel order.
*/
AV_CHANNEL_ORDER_UNSPEC,
/**
* The native channel order, i.e. the channels are in the same order in
* which they are defined in the AVChannel enum. This supports up to 63
* different channels.
*/
AV_CHANNEL_ORDER_NATIVE,
/**
* The channel order does not correspond to any other predefined order and
* is stored as an explicit map. For example, this could be used to support
* layouts with 64 or more channels, or with empty/skipped (AV_CHAN_SILENCE)
* channels at arbitrary positions.
*/
AV_CHANNEL_ORDER_CUSTOM,
/**
* The audio is represented as the decomposition of the sound field into
* spherical harmonics. Each channel corresponds to a single expansion
* component. Channels are ordered according to ACN (Ambisonic Channel
* Number).
*
* The channel with the index n in the stream contains the spherical
* harmonic of degree l and order m given by
* @code{.unparsed}
* l = floor(sqrt(n)),
* m = n - l * (l + 1).
* @endcode
*
* Conversely given a spherical harmonic of degree l and order m, the
* corresponding channel index n is given by
* @code{.unparsed}
* n = l * (l + 1) + m.
* @endcode
*
* Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
* as defined in AmbiX format $ 2.1.
*/
AV_CHANNEL_ORDER_AMBISONIC,
};
/** /**
* @defgroup channel_masks Audio channel masks * @defgroup channel_masks Audio channel masks
* *
@ -155,46 +46,41 @@ enum AVChannelOrder {
* *
* @{ * @{
*/ */
#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT ) #define AV_CH_FRONT_LEFT 0x00000001
#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT ) #define AV_CH_FRONT_RIGHT 0x00000002
#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER ) #define AV_CH_FRONT_CENTER 0x00000004
#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY ) #define AV_CH_LOW_FREQUENCY 0x00000008
#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT ) #define AV_CH_BACK_LEFT 0x00000010
#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT ) #define AV_CH_BACK_RIGHT 0x00000020
#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER ) #define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER) #define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER ) #define AV_CH_BACK_CENTER 0x00000100
#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT ) #define AV_CH_SIDE_LEFT 0x00000200
#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT ) #define AV_CH_SIDE_RIGHT 0x00000400
#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER ) #define AV_CH_TOP_CENTER 0x00000800
#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT ) #define AV_CH_TOP_FRONT_LEFT 0x00001000
#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER ) #define AV_CH_TOP_FRONT_CENTER 0x00002000
#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT ) #define AV_CH_TOP_FRONT_RIGHT 0x00004000
#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT ) #define AV_CH_TOP_BACK_LEFT 0x00008000
#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER ) #define AV_CH_TOP_BACK_CENTER 0x00010000
#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT ) #define AV_CH_TOP_BACK_RIGHT 0x00020000
#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT ) #define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT ) #define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT ) #define AV_CH_WIDE_LEFT 0x0000000080000000ULL
#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT ) #define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT ) #define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT) #define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2 ) #define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT ) #define AV_CH_TOP_SIDE_LEFT 0x0000001000000000ULL
#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT ) #define AV_CH_TOP_SIDE_RIGHT 0x0000002000000000ULL
#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER ) #define AV_CH_BOTTOM_FRONT_CENTER 0x0000004000000000ULL
#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT ) #define AV_CH_BOTTOM_FRONT_LEFT 0x0000008000000000ULL
#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT ) #define AV_CH_BOTTOM_FRONT_RIGHT 0x0000010000000000ULL
#if FF_API_OLD_CHANNEL_LAYOUT
/** Channel mask value used for AVCodecContext.request_channel_layout /** Channel mask value used for AVCodecContext.request_channel_layout
to indicate that the user requests the channel order of the decoder output to indicate that the user requests the channel order of the decoder output
to be the native codec channel order. to be the native codec channel order. */
@deprecated channel order is now indicated in a special field in
AVChannelLayout
*/
#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL #define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
#endif
/** /**
* @} * @}
@ -242,149 +128,6 @@ enum AVMatrixEncoding {
AV_MATRIX_ENCODING_NB AV_MATRIX_ENCODING_NB
}; };
/**
* @}
*/
/**
* An AVChannelCustom defines a single channel within a custom order layout
*
* Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of the
* public ABI.
*
* No new fields may be added to it without a major version bump.
*/
typedef struct AVChannelCustom {
enum AVChannel id;
char name[16];
void *opaque;
} AVChannelCustom;
/**
* An AVChannelLayout holds information about the channel layout of audio data.
*
* A channel layout here is defined as a set of channels ordered in a specific
* way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case an
* AVChannelLayout carries only the channel count).
*
* Unlike most structures in Libav, sizeof(AVChannelLayout) is a part of the
* public ABI and may be used by the caller. E.g. it may be allocated on stack
* or embedded in caller-defined structs.
*
* AVChannelLayout can be initialized as follows:
* - default initialization with {0}, followed by setting all used fields
* correctly;
* - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
* - with a constructor function, such as av_channel_layout_default(),
* av_channel_layout_from_mask() or av_channel_layout_from_string().
*
* The channel layout must be unitialized with av_channel_layout_uninit()
*
* Copying an AVChannelLayout via assigning is forbidden,
* av_channel_layout_copy() must be used instead (and its return value should
* be checked)
*
* No new fields may be added to it without a major version bump, except for
* new elements of the union fitting in sizeof(uint64_t).
*/
typedef struct AVChannelLayout {
/**
* Channel order used in this layout.
* This is a mandatory field.
*/
enum AVChannelOrder order;
/**
* Number of channels in this layout. Mandatory field.
*/
int nb_channels;
/**
* Details about which channels are present in this layout.
* For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
* used.
*/
union {
/**
* This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be used
* for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
* It is a bitmask, where the position of each set bit means that the
* AVChannel with the corresponding value is present.
*
* I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then AV_CHAN_FOO
* is present in the layout. Otherwise it is not present.
*
* @note when a channel layout using a bitmask is constructed or
* modified manually (i.e. not using any of the av_channel_layout_*
* functions), the code doing it must ensure that the number of set bits
* is equal to nb_channels.
*/
uint64_t mask;
/**
* This member must be used when the channel order is
* AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with each
* element signalling the presence of the AVChannel with the
* corresponding value in map[i].id.
*
* I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is the
* i-th channel in the audio data.
*
* When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
* AV_CHAN_AMBISONIC_END (inclusive), the channel contains an ambisonic
* component with ACN index (as defined above)
* n = map[i].id - AV_CHAN_AMBISONIC_BASE.
*
* map[i].name may be filled with a 0-terminated string, in which case
* it will be used for the purpose of identifying the channel with the
* convenience functions below. Otherise it must be zeroed.
*/
AVChannelCustom *map;
} u;
/**
* For some private data of the user.
*/
void *opaque;
} AVChannelLayout;
#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
{ .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = (nb), .u = { .mask = (m) }}
#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1, AV_CH_LAYOUT_MONO)
#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO)
#define AV_CHANNEL_LAYOUT_2POINT1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2POINT1)
#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2_1)
#define AV_CHANNEL_LAYOUT_SURROUND AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_SURROUND)
#define AV_CHANNEL_LAYOUT_3POINT1 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_3POINT1)
#define AV_CHANNEL_LAYOUT_4POINT0 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_4POINT0)
#define AV_CHANNEL_LAYOUT_4POINT1 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_4POINT1)
#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_2_2)
#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_QUAD)
#define AV_CHANNEL_LAYOUT_5POINT0 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0)
#define AV_CHANNEL_LAYOUT_5POINT1 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1)
#define AV_CHANNEL_LAYOUT_5POINT0_BACK AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0_BACK)
#define AV_CHANNEL_LAYOUT_5POINT1_BACK AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1_BACK)
#define AV_CHANNEL_LAYOUT_6POINT0 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0)
#define AV_CHANNEL_LAYOUT_6POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0_FRONT)
#define AV_CHANNEL_LAYOUT_HEXAGONAL AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_HEXAGONAL)
#define AV_CHANNEL_LAYOUT_6POINT1 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1)
#define AV_CHANNEL_LAYOUT_6POINT1_BACK AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_BACK)
#define AV_CHANNEL_LAYOUT_6POINT1_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_FRONT)
#define AV_CHANNEL_LAYOUT_7POINT0 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0)
#define AV_CHANNEL_LAYOUT_7POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0_FRONT)
#define AV_CHANNEL_LAYOUT_7POINT1 AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
#define AV_CHANNEL_LAYOUT_OCTAGONAL AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
#define AV_CHANNEL_LAYOUT_HEXADECAGONAL AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO_DOWNMIX)
#define AV_CHANNEL_LAYOUT_22POINT2 AV_CHANNEL_LAYOUT_MASK(24, AV_CH_LAYOUT_22POINT2)
#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
{ .order = AV_CHANNEL_ORDER_AMBISONIC, .nb_channels = 4, .u = { .mask = 0 }}
struct AVBPrint;
#if FF_API_OLD_CHANNEL_LAYOUT
/** /**
* Return a channel layout id that matches name, or 0 if no match is found. * Return a channel layout id that matches name, or 0 if no match is found.
* *
@ -401,10 +144,7 @@ struct AVBPrint;
* AV_CH_* macros). * AV_CH_* macros).
* *
* Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
*
* @deprecated use av_channel_layout_from_string()
*/ */
attribute_deprecated
uint64_t av_get_channel_layout(const char *name); uint64_t av_get_channel_layout(const char *name);
/** /**
@ -418,9 +158,7 @@ uint64_t av_get_channel_layout(const char *name);
* @param[out] nb_channels number of channels * @param[out] nb_channels number of channels
* *
* @return 0 on success, AVERROR(EINVAL) if the parsing fails. * @return 0 on success, AVERROR(EINVAL) if the parsing fails.
* @deprecated use av_channel_layout_from_string()
*/ */
attribute_deprecated
int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels);
/** /**
@ -429,31 +167,23 @@ int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, i
* *
* @param buf put here the string containing the channel layout * @param buf put here the string containing the channel layout
* @param buf_size size in bytes of the buffer * @param buf_size size in bytes of the buffer
* @deprecated use av_channel_layout_describe()
*/ */
attribute_deprecated
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
struct AVBPrint;
/** /**
* Append a description of a channel layout to a bprint buffer. * Append a description of a channel layout to a bprint buffer.
* @deprecated use av_channel_layout_describe()
*/ */
attribute_deprecated
void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
/** /**
* Return the number of channels in the channel layout. * Return the number of channels in the channel layout.
* @deprecated use AVChannelLayout.nb_channels
*/ */
attribute_deprecated
int av_get_channel_layout_nb_channels(uint64_t channel_layout); int av_get_channel_layout_nb_channels(uint64_t channel_layout);
/** /**
* Return default channel layout for a given number of channels. * Return default channel layout for a given number of channels.
*
* @deprecated use av_channel_layout_default()
*/ */
attribute_deprecated
int64_t av_get_default_channel_layout(int nb_channels); int64_t av_get_default_channel_layout(int nb_channels);
/** /**
@ -464,28 +194,20 @@ int64_t av_get_default_channel_layout(int nb_channels);
* *
* @return index of channel in channel_layout on success, a negative AVERROR * @return index of channel in channel_layout on success, a negative AVERROR
* on error. * on error.
*
* @deprecated use av_channel_layout_index_from_channel()
*/ */
attribute_deprecated
int av_get_channel_layout_channel_index(uint64_t channel_layout, int av_get_channel_layout_channel_index(uint64_t channel_layout,
uint64_t channel); uint64_t channel);
/** /**
* Get the channel with the given index in channel_layout. * Get the channel with the given index in channel_layout.
* @deprecated use av_channel_layout_channel_from_index()
*/ */
attribute_deprecated
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
/** /**
* Get the name of a given channel. * Get the name of a given channel.
* *
* @return channel name on success, NULL on error. * @return channel name on success, NULL on error.
*
* @deprecated use av_channel_name()
*/ */
attribute_deprecated
const char *av_get_channel_name(uint64_t channel); const char *av_get_channel_name(uint64_t channel);
/** /**
@ -493,9 +215,7 @@ const char *av_get_channel_name(uint64_t channel);
* *
* @param channel a channel layout with a single channel * @param channel a channel layout with a single channel
* @return channel description on success, NULL on error * @return channel description on success, NULL on error
* @deprecated use av_channel_description()
*/ */
attribute_deprecated
const char *av_get_channel_description(uint64_t channel); const char *av_get_channel_description(uint64_t channel);
/** /**
@ -506,240 +226,9 @@ const char *av_get_channel_description(uint64_t channel);
* @param[out] name name of the layout * @param[out] name name of the layout
* @return 0 if the layout exists, * @return 0 if the layout exists,
* <0 if index is beyond the limits * <0 if index is beyond the limits
* @deprecated use av_channel_layout_standard()
*/ */
attribute_deprecated
int av_get_standard_channel_layout(unsigned index, uint64_t *layout, int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
const char **name); const char **name);
#endif
/**
* Get a human readable string in an abbreviated form describing a given channel.
* This is the inverse function of @ref av_channel_from_string().
*
* @param buf pre-allocated buffer where to put the generated string
* @param buf_size size in bytes of the buffer.
* @return amount of bytes needed to hold the output string, or a negative AVERROR
* on failure. If the returned value is bigger than buf_size, then the
* string was truncated.
*/
int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel);
/**
* bprint variant of av_channel_name().
*
* @note the string will be appended to the bprint buffer.
*/
void av_channel_name_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
/**
* Get a human readable string describing a given channel.
*
* @param buf pre-allocated buffer where to put the generated string
* @param buf_size size in bytes of the buffer.
* @return amount of bytes needed to hold the output string, or a negative AVERROR
* on failure. If the returned value is bigger than buf_size, then the
* string was truncated.
*/
int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel);
/**
* bprint variant of av_channel_description().
*
* @note the string will be appended to the bprint buffer.
*/
void av_channel_description_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
/**
* This is the inverse function of @ref av_channel_name().
*
* @return the channel with the given name
* AV_CHAN_NONE when name does not identify a known channel
*/
enum AVChannel av_channel_from_string(const char *name);
/**
* Initialize a native channel layout from a bitmask indicating which channels
* are present.
*
* @param channel_layout the layout structure to be initialized
* @param mask bitmask describing the channel layout
*
* @return 0 on success
* AVERROR(EINVAL) for invalid mask values
*/
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask);
/**
* Initialize a channel layout from a given string description.
* The input string can be represented by:
* - the formal channel layout name (returned by av_channel_layout_describe())
* - single or multiple channel names (returned by av_channel_name(), eg. "FL",
* or concatenated with "+", each optionally containing a custom name after
* a "@", eg. "FL@Left+FR@Right+LFE")
* - a decimal or hexadecimal value of a native channel layout (eg. "4" or "0x4")
* - the number of channels with default layout (eg. "4c")
* - the number of unordered channels (eg. "4C" or "4 channels")
* - the ambisonic order followed by optional non-diegetic channels (eg.
* "ambisonic 2+stereo")
*
* @param channel_layout input channel layout
* @param str string describing the channel layout
* @return 0 channel layout was detected, AVERROR_INVALIDATATA otherwise
*/
int av_channel_layout_from_string(AVChannelLayout *channel_layout,
const char *str);
/**
* Get the default channel layout for a given number of channels.
*
* @param channel_layout the layout structure to be initialized
* @param nb_channels number of channels
*/
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels);
/**
* Iterate over all standard channel layouts.
*
* @param opaque a pointer where libavutil will store the iteration state. Must
* point to NULL to start the iteration.
*
* @return the standard channel layout or NULL when the iteration is
* finished
*/
const AVChannelLayout *av_channel_layout_standard(void **opaque);
/**
* Free any allocated data in the channel layout and reset the channel
* count to 0.
*
* @param channel_layout the layout structure to be uninitialized
*/
void av_channel_layout_uninit(AVChannelLayout *channel_layout);
/**
* Make a copy of a channel layout. This differs from just assigning src to dst
* in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
*
* @note the destination channel_layout will be always uninitialized before copy.
*
* @param dst destination channel layout
* @param src source channel layout
* @return 0 on success, a negative AVERROR on error.
*/
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src);
/**
* Get a human-readable string describing the channel layout properties.
* The string will be in the same format that is accepted by
* @ref av_channel_layout_from_string(), allowing to rebuild the same
* channel layout, except for opaque pointers.
*
* @param channel_layout channel layout to be described
* @param buf pre-allocated buffer where to put the generated string
* @param buf_size size in bytes of the buffer.
* @return amount of bytes needed to hold the output string, or a negative AVERROR
* on failure. If the returned value is bigger than buf_size, then the
* string was truncated.
*/
int av_channel_layout_describe(const AVChannelLayout *channel_layout,
char *buf, size_t buf_size);
/**
* bprint variant of av_channel_layout_describe().
*
* @note the string will be appended to the bprint buffer.
* @return 0 on success, or a negative AVERROR value on failure.
*/
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
struct AVBPrint *bp);
/**
* Get the channel with the given index in a channel layout.
*
* @param channel_layout input channel layout
* @return channel with the index idx in channel_layout on success or
* AV_CHAN_NONE on failure (if idx is not valid or the channel order is
* unspecified)
*/
enum AVChannel
av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx);
/**
* Get the index of a given channel in a channel layout. In case multiple
* channels are found, only the first match will be returned.
*
* @param channel_layout input channel layout
* @return index of channel in channel_layout on success or a negative number if
* channel is not present in channel_layout.
*/
int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout,
enum AVChannel channel);
/**
* Get the index in a channel layout of a channel described by the given string.
* In case multiple channels are found, only the first match will be returned.
*
* This function accepts channel names in the same format as
* @ref av_channel_from_string().
*
* @param channel_layout input channel layout
* @return a channel index described by the given string, or a negative AVERROR
* value.
*/
int av_channel_layout_index_from_string(const AVChannelLayout *channel_layout,
const char *name);
/**
* Get a channel described by the given string.
*
* This function accepts channel names in the same format as
* @ref av_channel_from_string().
*
* @param channel_layout input channel layout
* @return a channel described by the given string in channel_layout on success
* or AV_CHAN_NONE on failure (if the string is not valid or the channel
* order is unspecified)
*/
enum AVChannel
av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
const char *name);
/**
* Find out what channels from a given set are present in a channel layout,
* without regard for their positions.
*
* @param channel_layout input channel layout
* @param mask a combination of AV_CH_* representing a set of channels
* @return a bitfield representing all the channels from mask that are present
* in channel_layout
*/
uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
uint64_t mask);
/**
* Check whether a channel layout is valid, i.e. can possibly describe audio
* data.
*
* @param channel_layout input channel layout
* @return 1 if channel_layout is valid, 0 otherwise.
*/
int av_channel_layout_check(const AVChannelLayout *channel_layout);
/**
* Check whether two channel layouts are semantically the same, i.e. the same
* channels are present on the same positions in both.
*
* If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the other is
* not, they are considered to be unequal. If both are AV_CHANNEL_ORDER_UNSPEC,
* they are considered equal iff the channel counts are the same in both.
*
* @param chl input channel layout
* @param chl1 input channel layout
* @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
* AVERROR code if one or both are invalid.
*/
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1);
/** /**
* @} * @}

42
ffmpeg/include/libavutil/common.h

@ -41,6 +41,14 @@
#include "attributes.h" #include "attributes.h"
#include "macros.h" #include "macros.h"
#include "version.h"
#include "libavutil/avconfig.h"
#if AV_HAVE_BIGENDIAN
# define AV_NE(be, le) (be)
#else
# define AV_NE(be, le) (le)
#endif
//rounded division & shift //rounded division & shift
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) #define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
@ -81,6 +89,25 @@
#define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a)) #define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a))
#define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a)) #define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a))
/**
* Comparator.
* For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
* if x == y. This is useful for instance in a qsort comparator callback.
* Furthermore, compilers are able to optimize this to branchless code, and
* there is no risk of overflow with signed types.
* As with many macros, this evaluates its argument multiple times, it thus
* must not have a side-effect.
*/
#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
/* misc math functions */ /* misc math functions */
#ifdef HAVE_AV_CONFIG_H #ifdef HAVE_AV_CONFIG_H
@ -378,8 +405,6 @@ static av_always_inline int64_t av_sat_sub64_c(int64_t a, int64_t b) {
/** /**
* Clip a float value into the amin-amax range. * Clip a float value into the amin-amax range.
* If a is nan or -inf amin will be returned.
* If a is +inf amax will be returned.
* @param a value to clip * @param a value to clip
* @param amin minimum value of the clip range * @param amin minimum value of the clip range
* @param amax maximum value of the clip range * @param amax maximum value of the clip range
@ -390,13 +415,13 @@ static av_always_inline av_const float av_clipf_c(float a, float amin, float ama
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 #if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort(); if (amin > amax) abort();
#endif #endif
return FFMIN(FFMAX(a, amin), amax); if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
} }
/** /**
* Clip a double value into the amin-amax range. * Clip a double value into the amin-amax range.
* If a is nan or -inf amin will be returned.
* If a is +inf amax will be returned.
* @param a value to clip * @param a value to clip
* @param amin minimum value of the clip range * @param amin minimum value of the clip range
* @param amax maximum value of the clip range * @param amax maximum value of the clip range
@ -407,7 +432,9 @@ static av_always_inline av_const double av_clipd_c(double a, double amin, double
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 #if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort(); if (amin > amax) abort();
#endif #endif
return FFMIN(FFMAX(a, amin), amax); if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
} }
/** Compute ceil(log2(x)). /** Compute ceil(log2(x)).
@ -448,6 +475,9 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
return av_popcount(v) & 1; return av_popcount(v) & 1;
} }
#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
/** /**
* Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
* *

33
ffmpeg/include/libavutil/cpu.h

@ -23,6 +23,8 @@
#include <stddef.h> #include <stddef.h>
#include "attributes.h"
#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ #define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
/* lower 16 bits - CPU features */ /* lower 16 bits - CPU features */
@ -54,8 +56,6 @@
#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 #define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 #define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used #define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used
#define AV_CPU_FLAG_AVX512ICL 0x200000 ///< F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ
#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers.
#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard #define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 #define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
@ -74,10 +74,6 @@
#define AV_CPU_FLAG_MMI (1 << 0) #define AV_CPU_FLAG_MMI (1 << 0)
#define AV_CPU_FLAG_MSA (1 << 1) #define AV_CPU_FLAG_MSA (1 << 1)
//Loongarch SIMD extension.
#define AV_CPU_FLAG_LSX (1 << 0)
#define AV_CPU_FLAG_LASX (1 << 1)
/** /**
* Return the flags which specify extensions supported by the CPU. * Return the flags which specify extensions supported by the CPU.
* The returned value is affected by av_force_cpu_flags() if that was used * The returned value is affected by av_force_cpu_flags() if that was used
@ -92,6 +88,25 @@ int av_get_cpu_flags(void);
*/ */
void av_force_cpu_flags(int flags); void av_force_cpu_flags(int flags);
/**
* Set a mask on flags returned by av_get_cpu_flags().
* This function is mainly useful for testing.
* Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
*/
attribute_deprecated void av_set_cpu_flags_mask(int mask);
/**
* Parse CPU flags from a string.
*
* The returned flags contain the specified flags as well as related unspecified flags.
*
* This function exists only for compatibility with libav.
* Please use av_parse_cpu_caps() when possible.
* @return a combination of AV_CPU_* flags, negative on error.
*/
attribute_deprecated
int av_parse_cpu_flags(const char *s);
/** /**
* Parse CPU caps from a string and update the given AV_CPU_* flags based on that. * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
* *
@ -104,12 +119,6 @@ int av_parse_cpu_caps(unsigned *flags, const char *s);
*/ */
int av_cpu_count(void); int av_cpu_count(void);
/**
* Overrides cpu count detection and forces the specified count.
* Count < 1 disables forcing of specific count.
*/
void av_cpu_force_count(int count);
/** /**
* Get the maximum data alignment that may be required by FFmpeg. * Get the maximum data alignment that may be required by FFmpeg.
* *

1
ffmpeg/include/libavutil/crc.h

@ -30,6 +30,7 @@
#include <stdint.h> #include <stdint.h>
#include <stddef.h> #include <stddef.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_crc32 CRC * @defgroup lavu_crc32 CRC

2
ffmpeg/include/libavutil/dict.h

@ -32,6 +32,8 @@
#include <stdint.h> #include <stdint.h>
#include "version.h"
/** /**
* @addtogroup lavu_dict AVDictionary * @addtogroup lavu_dict AVDictionary
* @ingroup lavu_data * @ingroup lavu_data

3
ffmpeg/include/libavutil/display.h

@ -27,6 +27,7 @@
#define AVUTIL_DISPLAY_H #define AVUTIL_DISPLAY_H
#include <stdint.h> #include <stdint.h>
#include "common.h"
/** /**
* @addtogroup lavu_video * @addtogroup lavu_video
@ -87,7 +88,7 @@
double av_display_rotation_get(const int32_t matrix[9]); double av_display_rotation_get(const int32_t matrix[9]);
/** /**
* Initialize a transformation matrix describing a pure clockwise * Initialize a transformation matrix describing a pure counterclockwise
* rotation by the specified angle (in degrees). * rotation by the specified angle (in degrees).
* *
* @param matrix an allocated transformation matrix (will be fully overwritten * @param matrix an allocated transformation matrix (will be fully overwritten

166
ffmpeg/include/libavutil/dovi_meta.h

@ -29,7 +29,6 @@
#include <stdint.h> #include <stdint.h>
#include <stddef.h> #include <stddef.h>
#include "rational.h"
/* /*
* DOVI configuration * DOVI configuration
@ -68,169 +67,4 @@ typedef struct AVDOVIDecoderConfigurationRecord {
*/ */
AVDOVIDecoderConfigurationRecord *av_dovi_alloc(size_t *size); AVDOVIDecoderConfigurationRecord *av_dovi_alloc(size_t *size);
/**
* Dolby Vision RPU data header.
*
* @note sizeof(AVDOVIRpuDataHeader) is not part of the public ABI.
*/
typedef struct AVDOVIRpuDataHeader {
uint8_t rpu_type;
uint16_t rpu_format;
uint8_t vdr_rpu_profile;
uint8_t vdr_rpu_level;
uint8_t chroma_resampling_explicit_filter_flag;
uint8_t coef_data_type; /* informative, lavc always converts to fixed */
uint8_t coef_log2_denom;
uint8_t vdr_rpu_normalized_idc;
uint8_t bl_video_full_range_flag;
uint8_t bl_bit_depth; /* [8, 16] */
uint8_t el_bit_depth; /* [8, 16] */
uint8_t vdr_bit_depth; /* [8, 16] */
uint8_t spatial_resampling_filter_flag;
uint8_t el_spatial_resampling_filter_flag;
uint8_t disable_residual_flag;
} AVDOVIRpuDataHeader;
enum AVDOVIMappingMethod {
AV_DOVI_MAPPING_POLYNOMIAL = 0,
AV_DOVI_MAPPING_MMR = 1,
};
/**
* Coefficients of a piece-wise function. The pieces of the function span the
* value ranges between two adjacent pivot values.
*/
#define AV_DOVI_MAX_PIECES 8
typedef struct AVDOVIReshapingCurve {
uint8_t num_pivots; /* [2, 9] */
uint16_t pivots[AV_DOVI_MAX_PIECES + 1]; /* sorted ascending */
enum AVDOVIMappingMethod mapping_idc[AV_DOVI_MAX_PIECES];
/* AV_DOVI_MAPPING_POLYNOMIAL */
uint8_t poly_order[AV_DOVI_MAX_PIECES]; /* [1, 2] */
int64_t poly_coef[AV_DOVI_MAX_PIECES][3]; /* x^0, x^1, x^2 */
/* AV_DOVI_MAPPING_MMR */
uint8_t mmr_order[AV_DOVI_MAX_PIECES]; /* [1, 3] */
int64_t mmr_constant[AV_DOVI_MAX_PIECES];
int64_t mmr_coef[AV_DOVI_MAX_PIECES][3/* order - 1 */][7];
} AVDOVIReshapingCurve;
enum AVDOVINLQMethod {
AV_DOVI_NLQ_NONE = -1,
AV_DOVI_NLQ_LINEAR_DZ = 0,
};
/**
* Coefficients of the non-linear inverse quantization. For the interpretation
* of these, see ETSI GS CCM 001.
*/
typedef struct AVDOVINLQParams {
uint16_t nlq_offset;
uint64_t vdr_in_max;
/* AV_DOVI_NLQ_LINEAR_DZ */
uint64_t linear_deadzone_slope;
uint64_t linear_deadzone_threshold;
} AVDOVINLQParams;
/**
* Dolby Vision RPU data mapping parameters.
*
* @note sizeof(AVDOVIDataMapping) is not part of the public ABI.
*/
typedef struct AVDOVIDataMapping {
uint8_t vdr_rpu_id;
uint8_t mapping_color_space;
uint8_t mapping_chroma_format_idc;
AVDOVIReshapingCurve curves[3]; /* per component */
/* Non-linear inverse quantization */
enum AVDOVINLQMethod nlq_method_idc;
uint32_t num_x_partitions;
uint32_t num_y_partitions;
AVDOVINLQParams nlq[3]; /* per component */
} AVDOVIDataMapping;
/**
* Dolby Vision RPU colorspace metadata parameters.
*
* @note sizeof(AVDOVIColorMetadata) is not part of the public ABI.
*/
typedef struct AVDOVIColorMetadata {
uint8_t dm_metadata_id;
uint8_t scene_refresh_flag;
/**
* Coefficients of the custom Dolby Vision IPT-PQ matrices. These are to be
* used instead of the matrices indicated by the frame's colorspace tags.
* The output of rgb_to_lms_matrix is to be fed into a BT.2020 LMS->RGB
* matrix based on a Hunt-Pointer-Estevez transform, but without any
* crosstalk. (See the definition of the ICtCp colorspace for more
* information.)
*/
AVRational ycc_to_rgb_matrix[9]; /* before PQ linearization */
AVRational ycc_to_rgb_offset[3]; /* input offset of neutral value */
AVRational rgb_to_lms_matrix[9]; /* after PQ linearization */
/**
* Extra signal metadata (see Dolby patents for more info).
*/
uint16_t signal_eotf;
uint16_t signal_eotf_param0;
uint16_t signal_eotf_param1;
uint32_t signal_eotf_param2;
uint8_t signal_bit_depth;
uint8_t signal_color_space;
uint8_t signal_chroma_format;
uint8_t signal_full_range_flag; /* [0, 3] */
uint16_t source_min_pq;
uint16_t source_max_pq;
uint16_t source_diagonal;
} AVDOVIColorMetadata;
/**
* Combined struct representing a combination of header, mapping and color
* metadata, for attaching to frames as side data.
*
* @note The struct must be allocated with av_dovi_metadata_alloc() and
* its size is not a part of the public ABI.
*/
typedef struct AVDOVIMetadata {
/**
* Offset in bytes from the beginning of this structure at which the
* respective structs start.
*/
size_t header_offset; /* AVDOVIRpuDataHeader */
size_t mapping_offset; /* AVDOVIDataMapping */
size_t color_offset; /* AVDOVIColorMetadata */
} AVDOVIMetadata;
static av_always_inline AVDOVIRpuDataHeader *
av_dovi_get_header(const AVDOVIMetadata *data)
{
return (AVDOVIRpuDataHeader *)((uint8_t *) data + data->header_offset);
}
static av_always_inline AVDOVIDataMapping *
av_dovi_get_mapping(const AVDOVIMetadata *data)
{
return (AVDOVIDataMapping *)((uint8_t *) data + data->mapping_offset);
}
static av_always_inline AVDOVIColorMetadata *
av_dovi_get_color(const AVDOVIMetadata *data)
{
return (AVDOVIColorMetadata *)((uint8_t *) data + data->color_offset);
}
/**
* Allocate an AVDOVIMetadata structure and initialize its
* fields to default values.
*
* @param size If this parameter is non-NULL, the size in bytes of the
* allocated struct will be written here on success
*
* @return the newly allocated struct or NULL on failure
*/
AVDOVIMetadata *av_dovi_metadata_alloc(size_t *size);
#endif /* AVUTIL_DOVI_META_H */ #endif /* AVUTIL_DOVI_META_H */

2
ffmpeg/include/libavutil/error.h

@ -27,8 +27,6 @@
#include <errno.h> #include <errno.h>
#include <stddef.h> #include <stddef.h>
#include "macros.h"
/** /**
* @addtogroup lavu_error * @addtogroup lavu_error
* *

2
ffmpeg/include/libavutil/eval.h

@ -26,6 +26,8 @@
#ifndef AVUTIL_EVAL_H #ifndef AVUTIL_EVAL_H
#define AVUTIL_EVAL_H #define AVUTIL_EVAL_H
#include "avutil.h"
typedef struct AVExpr AVExpr; typedef struct AVExpr AVExpr;
/** /**

2
ffmpeg/include/libavutil/ffversion.h

@ -1,5 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */ /* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H #ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H #define AVUTIL_FFVERSION_H
#define FFMPEG_VERSION "N-107810-gd5544f6457" #define FFMPEG_VERSION "4.4.1"
#endif /* AVUTIL_FFVERSION_H */ #endif /* AVUTIL_FFVERSION_H */

251
ffmpeg/include/libavutil/fifo.h

@ -24,205 +24,10 @@
#ifndef AVUTIL_FIFO_H #ifndef AVUTIL_FIFO_H
#define AVUTIL_FIFO_H #define AVUTIL_FIFO_H
#include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "avutil.h"
#include "attributes.h" #include "attributes.h"
#include "version.h"
typedef struct AVFifo AVFifo;
/**
* Callback for writing or reading from a FIFO, passed to (and invoked from) the
* av_fifo_*_cb() functions. It may be invoked multiple times from a single
* av_fifo_*_cb() call and may process less data than the maximum size indicated
* by nb_elems.
*
* @param opaque the opaque pointer provided to the av_fifo_*_cb() function
* @param buf the buffer for reading or writing the data, depending on which
* av_fifo_*_cb function is called
* @param nb_elems On entry contains the maximum number of elements that can be
* read from / written into buf. On success, the callback should
* update it to contain the number of elements actually written.
*
* @return 0 on success, a negative error code on failure (will be returned from
* the invoking av_fifo_*_cb() function)
*/
typedef int AVFifoCB(void *opaque, void *buf, size_t *nb_elems);
/**
* Automatically resize the FIFO on writes, so that the data fits. This
* automatic resizing happens up to a limit that can be modified with
* av_fifo_auto_grow_limit().
*/
#define AV_FIFO_FLAG_AUTO_GROW (1 << 0)
/**
* Allocate and initialize an AVFifo with a given element size.
*
* @param elems initial number of elements that can be stored in the FIFO
* @param elem_size Size in bytes of a single element. Further operations on
* the returned FIFO will implicitly use this element size.
* @param flags a combination of AV_FIFO_FLAG_*
*
* @return newly-allocated AVFifo on success, a negative error code on failure
*/
AVFifo *av_fifo_alloc2(size_t elems, size_t elem_size,
unsigned int flags);
/**
* @return Element size for FIFO operations. This element size is set at
* FIFO allocation and remains constant during its lifetime
*/
size_t av_fifo_elem_size(const AVFifo *f);
/**
* Set the maximum size (in elements) to which the FIFO can be resized
* automatically. Has no effect unless AV_FIFO_FLAG_AUTO_GROW is used.
*/
void av_fifo_auto_grow_limit(AVFifo *f, size_t max_elems);
/**
* @return number of elements available for reading from the given FIFO.
*/
size_t av_fifo_can_read(const AVFifo *f);
/**
* @return number of elements that can be written into the given FIFO.
*/
size_t av_fifo_can_write(const AVFifo *f);
/**
* Enlarge an AVFifo.
*
* On success, the FIFO will be large enough to hold exactly
* inc + av_fifo_can_read() + av_fifo_can_write()
* elements. In case of failure, the old FIFO is kept unchanged.
*
* @param f AVFifo to resize
* @param inc number of elements to allocate for, in addition to the current
* allocated size
* @return a non-negative number on success, a negative error code on failure
*/
int av_fifo_grow2(AVFifo *f, size_t inc);
/**
* Write data into a FIFO.
*
* In case nb_elems > av_fifo_can_write(f), nothing is written and an error
* is returned.
*
* @param f the FIFO buffer
* @param buf Data to be written. nb_elems * av_fifo_elem_size(f) bytes will be
* read from buf on success.
* @param nb_elems number of elements to write into FIFO
*
* @return a non-negative number on success, a negative error code on failure
*/
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems);
/**
* Write data from a user-provided callback into a FIFO.
*
* @param f the FIFO buffer
* @param read_cb Callback supplying the data to the FIFO. May be called
* multiple times.
* @param opaque opaque user data to be provided to read_cb
* @param nb_elems Should point to the maximum number of elements that can be
* written. Will be updated to contain the number of elements
* actually written.
*
* @return non-negative number on success, a negative error code on failure
*/
int av_fifo_write_from_cb(AVFifo *f, AVFifoCB read_cb,
void *opaque, size_t *nb_elems);
/**
* Read data from a FIFO.
*
* In case nb_elems > av_fifo_can_read(f), nothing is read and an error
* is returned.
*
* @param f the FIFO buffer
* @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
* will be written into buf on success.
* @param nb_elems number of elements to read from FIFO
*
* @return a non-negative number on success, a negative error code on failure
*/
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems);
/**
* Feed data from a FIFO into a user-provided callback.
*
* @param f the FIFO buffer
* @param write_cb Callback the data will be supplied to. May be called
* multiple times.
* @param opaque opaque user data to be provided to write_cb
* @param nb_elems Should point to the maximum number of elements that can be
* read. Will be updated to contain the total number of elements
* actually sent to the callback.
*
* @return non-negative number on success, a negative error code on failure
*/
int av_fifo_read_to_cb(AVFifo *f, AVFifoCB write_cb,
void *opaque, size_t *nb_elems);
/**
* Read data from a FIFO without modifying FIFO state.
*
* Returns an error if an attempt is made to peek to nonexistent elements
* (i.e. if offset + nb_elems is larger than av_fifo_can_read(f)).
*
* @param f the FIFO buffer
* @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
* will be written into buf.
* @param nb_elems number of elements to read from FIFO
* @param offset number of initial elements to skip.
*
* @return a non-negative number on success, a negative error code on failure
*/
int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset);
/**
* Feed data from a FIFO into a user-provided callback.
*
* @param f the FIFO buffer
* @param write_cb Callback the data will be supplied to. May be called
* multiple times.
* @param opaque opaque user data to be provided to write_cb
* @param nb_elems Should point to the maximum number of elements that can be
* read. Will be updated to contain the total number of elements
* actually sent to the callback.
* @param offset number of initial elements to skip; offset + *nb_elems must not
* be larger than av_fifo_can_read(f).
*
* @return a non-negative number on success, a negative error code on failure
*/
int av_fifo_peek_to_cb(AVFifo *f, AVFifoCB write_cb, void *opaque,
size_t *nb_elems, size_t offset);
/**
* Discard the specified amount of data from an AVFifo.
* @param size number of elements to discard, MUST NOT be larger than
* av_fifo_can_read(f)
*/
void av_fifo_drain2(AVFifo *f, size_t size);
/*
* Empty the AVFifo.
* @param f AVFifo to reset
*/
void av_fifo_reset2(AVFifo *f);
/**
* Free an AVFifo and reset pointer to NULL.
* @param f Pointer to an AVFifo to free. *f == NULL is allowed.
*/
void av_fifo_freep2(AVFifo **f);
#if FF_API_FIFO_OLD_API
typedef struct AVFifoBuffer { typedef struct AVFifoBuffer {
uint8_t *buffer; uint8_t *buffer;
uint8_t *rptr, *wptr, *end; uint8_t *rptr, *wptr, *end;
@ -233,9 +38,7 @@ typedef struct AVFifoBuffer {
* Initialize an AVFifoBuffer. * Initialize an AVFifoBuffer.
* @param size of FIFO * @param size of FIFO
* @return AVFifoBuffer or NULL in case of memory allocation failure * @return AVFifoBuffer or NULL in case of memory allocation failure
* @deprecated use av_fifo_alloc2()
*/ */
attribute_deprecated
AVFifoBuffer *av_fifo_alloc(unsigned int size); AVFifoBuffer *av_fifo_alloc(unsigned int size);
/** /**
@ -243,33 +46,25 @@ AVFifoBuffer *av_fifo_alloc(unsigned int size);
* @param nmemb number of elements * @param nmemb number of elements
* @param size size of the single element * @param size size of the single element
* @return AVFifoBuffer or NULL in case of memory allocation failure * @return AVFifoBuffer or NULL in case of memory allocation failure
* @deprecated use av_fifo_alloc2()
*/ */
attribute_deprecated
AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size); AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
/** /**
* Free an AVFifoBuffer. * Free an AVFifoBuffer.
* @param f AVFifoBuffer to free * @param f AVFifoBuffer to free
* @deprecated use the AVFifo API with av_fifo_freep2()
*/ */
attribute_deprecated
void av_fifo_free(AVFifoBuffer *f); void av_fifo_free(AVFifoBuffer *f);
/** /**
* Free an AVFifoBuffer and reset pointer to NULL. * Free an AVFifoBuffer and reset pointer to NULL.
* @param f AVFifoBuffer to free * @param f AVFifoBuffer to free
* @deprecated use the AVFifo API with av_fifo_freep2()
*/ */
attribute_deprecated
void av_fifo_freep(AVFifoBuffer **f); void av_fifo_freep(AVFifoBuffer **f);
/** /**
* Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
* @param f AVFifoBuffer to reset * @param f AVFifoBuffer to reset
* @deprecated use av_fifo_reset2() with the new AVFifo-API
*/ */
attribute_deprecated
void av_fifo_reset(AVFifoBuffer *f); void av_fifo_reset(AVFifoBuffer *f);
/** /**
@ -277,9 +72,7 @@ void av_fifo_reset(AVFifoBuffer *f);
* amount of data you can read from it. * amount of data you can read from it.
* @param f AVFifoBuffer to read from * @param f AVFifoBuffer to read from
* @return size * @return size
* @deprecated use av_fifo_can_read() with the new AVFifo-API
*/ */
attribute_deprecated
int av_fifo_size(const AVFifoBuffer *f); int av_fifo_size(const AVFifoBuffer *f);
/** /**
@ -287,9 +80,7 @@ int av_fifo_size(const AVFifoBuffer *f);
* amount of data you can write into it. * amount of data you can write into it.
* @param f AVFifoBuffer to write into * @param f AVFifoBuffer to write into
* @return size * @return size
* @deprecated use av_fifo_can_write() with the new AVFifo-API
*/ */
attribute_deprecated
int av_fifo_space(const AVFifoBuffer *f); int av_fifo_space(const AVFifoBuffer *f);
/** /**
@ -300,13 +91,7 @@ int av_fifo_space(const AVFifoBuffer *f);
* @param buf_size number of bytes to read * @param buf_size number of bytes to read
* @param func generic read function * @param func generic read function
* @param dest data destination * @param dest data destination
*
* @return a non-negative number on success, a negative error code on failure
*
* @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
* av_fifo_peek_to_cb() otherwise
*/ */
attribute_deprecated
int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int)); int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int));
/** /**
@ -316,13 +101,7 @@ int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_siz
* @param buf_size number of bytes to read * @param buf_size number of bytes to read
* @param func generic read function * @param func generic read function
* @param dest data destination * @param dest data destination
*
* @return a non-negative number on success, a negative error code on failure
*
* @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
* av_fifo_peek_to_cb() otherwise
*/ */
attribute_deprecated
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/** /**
@ -331,13 +110,7 @@ int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)
* @param buf_size number of bytes to read * @param buf_size number of bytes to read
* @param func generic read function * @param func generic read function
* @param dest data destination * @param dest data destination
*
* @return a non-negative number on success, a negative error code on failure
*
* @deprecated use the new AVFifo-API with av_fifo_read() when func == NULL,
* av_fifo_read_to_cb() otherwise
*/ */
attribute_deprecated
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/** /**
@ -351,12 +124,8 @@ int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)
* func must return the number of bytes written to dest_buf, or <= 0 to * func must return the number of bytes written to dest_buf, or <= 0 to
* indicate no more data available to write. * indicate no more data available to write.
* If func is NULL, src is interpreted as a simple byte array for source data. * If func is NULL, src is interpreted as a simple byte array for source data.
* @return the number of bytes written to the FIFO or a negative error code on failure * @return the number of bytes written to the FIFO
*
* @deprecated use the new AVFifo-API with av_fifo_write() when func == NULL,
* av_fifo_write_from_cb() otherwise
*/ */
attribute_deprecated
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
/** /**
@ -366,11 +135,7 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void
* @param f AVFifoBuffer to resize * @param f AVFifoBuffer to resize
* @param size new AVFifoBuffer size in bytes * @param size new AVFifoBuffer size in bytes
* @return <0 for failure, >=0 otherwise * @return <0 for failure, >=0 otherwise
*
* @deprecated use the new AVFifo-API with av_fifo_grow2() to increase FIFO size,
* decreasing FIFO size is not supported
*/ */
attribute_deprecated
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
/** /**
@ -381,24 +146,16 @@ int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
* @param f AVFifoBuffer to resize * @param f AVFifoBuffer to resize
* @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
* @return <0 for failure, >=0 otherwise * @return <0 for failure, >=0 otherwise
*
* @deprecated use the new AVFifo-API with av_fifo_grow2(); note that unlike
* this function it adds to the allocated size, rather than to the used size
*/ */
attribute_deprecated
int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
/** /**
* Read and discard the specified amount of data from an AVFifoBuffer. * Read and discard the specified amount of data from an AVFifoBuffer.
* @param f AVFifoBuffer to read from * @param f AVFifoBuffer to read from
* @param size amount of data to read in bytes * @param size amount of data to read in bytes
*
* @deprecated use the new AVFifo-API with av_fifo_drain2()
*/ */
attribute_deprecated
void av_fifo_drain(AVFifoBuffer *f, int size); void av_fifo_drain(AVFifoBuffer *f, int size);
#if FF_API_FIFO_PEEK2
/** /**
* Return a pointer to the data stored in a FIFO buffer at a certain offset. * Return a pointer to the data stored in a FIFO buffer at a certain offset.
* The FIFO buffer is not modified. * The FIFO buffer is not modified.
@ -408,9 +165,7 @@ void av_fifo_drain(AVFifoBuffer *f, int size);
* than the used buffer size or the returned pointer will * than the used buffer size or the returned pointer will
* point outside to the buffer data. * point outside to the buffer data.
* The used buffer size can be checked with av_fifo_size(). * The used buffer size can be checked with av_fifo_size().
* @deprecated use the new AVFifo-API with av_fifo_peek() or av_fifo_peek_to_cb()
*/ */
attribute_deprecated
static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
{ {
uint8_t *ptr = f->rptr + offs; uint8_t *ptr = f->rptr + offs;
@ -420,7 +175,5 @@ static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
ptr = f->end - (f->buffer - ptr); ptr = f->end - (f->buffer - ptr);
return ptr; return ptr;
} }
#endif
#endif
#endif /* AVUTIL_FIFO_H */ #endif /* AVUTIL_FIFO_H */

3
ffmpeg/include/libavutil/file.h

@ -19,10 +19,9 @@
#ifndef AVUTIL_FILE_H #ifndef AVUTIL_FILE_H
#define AVUTIL_FILE_H #define AVUTIL_FILE_H
#include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "avutil.h"
/** /**
* @file * @file

92
ffmpeg/include/libavutil/film_grain_params.h

@ -28,11 +28,6 @@ enum AVFilmGrainParamsType {
* The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom) * The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
*/ */
AV_FILM_GRAIN_PARAMS_AV1, AV_FILM_GRAIN_PARAMS_AV1,
/**
* The union is valid when interpreted as AVFilmGrainH274Params (codec.h274)
*/
AV_FILM_GRAIN_PARAMS_H274,
}; };
/** /**
@ -122,89 +117,6 @@ typedef struct AVFilmGrainAOMParams {
int limit_output_range; int limit_output_range;
} AVFilmGrainAOMParams; } AVFilmGrainAOMParams;
/**
* This structure describes how to handle film grain synthesis for codecs using
* the ITU-T H.274 Versatile suplemental enhancement information message.
*
* @note The struct must be allocated as part of AVFilmGrainParams using
* av_film_grain_params_alloc(). Its size is not a part of the public ABI.
*/
typedef struct AVFilmGrainH274Params {
/**
* Specifies the film grain simulation mode.
* 0 = Frequency filtering, 1 = Auto-regression
*/
int model_id;
/**
* Specifies the bit depth used for the luma component.
*/
int bit_depth_luma;
/**
* Specifies the bit depth used for the chroma components.
*/
int bit_depth_chroma;
enum AVColorRange color_range;
enum AVColorPrimaries color_primaries;
enum AVColorTransferCharacteristic color_trc;
enum AVColorSpace color_space;
/**
* Specifies the blending mode used to blend the simulated film grain
* with the decoded images.
*
* 0 = Additive, 1 = Multiplicative
*/
int blending_mode_id;
/**
* Specifies a scale factor used in the film grain characterization equations.
*/
int log2_scale_factor;
/**
* Indicates if the modelling of film grain for a given component is present.
*/
int component_model_present[3 /* y, cb, cr */];
/**
* Specifies the number of intensity intervals for which a specific set of
* model values has been estimated, with a range of [1, 256].
*/
uint16_t num_intensity_intervals[3 /* y, cb, cr */];
/**
* Specifies the number of model values present for each intensity interval
* in which the film grain has been modelled, with a range of [1, 6].
*/
uint8_t num_model_values[3 /* y, cb, cr */];
/**
* Specifies the lower ounds of each intensity interval for whichthe set of
* model values applies for the component.
*/
uint8_t intensity_interval_lower_bound[3 /* y, cb, cr */][256 /* intensity interval */];
/**
* Specifies the upper bound of each intensity interval for which the set of
* model values applies for the component.
*/
uint8_t intensity_interval_upper_bound[3 /* y, cb, cr */][256 /* intensity interval */];
/**
* Specifies the model values for the component for each intensity interval.
* - When model_id == 0, the following applies:
* For comp_model_value[y], the range of values is [0, 2^bit_depth_luma - 1]
* For comp_model_value[cb..cr], the range of values is [0, 2^bit_depth_chroma - 1]
* - Otherwise, the following applies:
* For comp_model_value[y], the range of values is [-2^(bit_depth_luma - 1), 2^(bit_depth_luma - 1) - 1]
* For comp_model_value[cb..cr], the range of values is [-2^(bit_depth_chroma - 1), 2^(bit_depth_chroma - 1) - 1]
*/
int16_t comp_model_value[3 /* y, cb, cr */][256 /* intensity interval */][6 /* model value */];
} AVFilmGrainH274Params;
/** /**
* This structure describes how to handle film grain synthesis in video * This structure describes how to handle film grain synthesis in video
* for specific codecs. Must be present on every frame where film grain is * for specific codecs. Must be present on every frame where film grain is
@ -221,9 +133,6 @@ typedef struct AVFilmGrainParams {
/** /**
* Seed to use for the synthesis process, if the codec allows for it. * Seed to use for the synthesis process, if the codec allows for it.
*
* @note For H.264, this refers to `pic_offset` as defined in
* SMPTE RDD 5-2006.
*/ */
uint64_t seed; uint64_t seed;
@ -234,7 +143,6 @@ typedef struct AVFilmGrainParams {
*/ */
union { union {
AVFilmGrainAOMParams aom; AVFilmGrainAOMParams aom;
AVFilmGrainH274Params h274;
} codec; } codec;
} AVFilmGrainParams; } AVFilmGrainParams;

217
ffmpeg/include/libavutil/frame.h

@ -30,7 +30,6 @@
#include "avutil.h" #include "avutil.h"
#include "buffer.h" #include "buffer.h"
#include "channel_layout.h"
#include "dict.h" #include "dict.h"
#include "rational.h" #include "rational.h"
#include "samplefmt.h" #include "samplefmt.h"
@ -143,6 +142,23 @@ enum AVFrameSideDataType {
*/ */
AV_FRAME_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE,
#if FF_API_FRAME_QP
/**
* Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
* The contents of this side data are undocumented and internal; use
* av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a
* meaningful way instead.
*/
AV_FRAME_DATA_QP_TABLE_PROPERTIES,
/**
* Raw QP table data. Its format is described by
* AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and
* av_frame_get_qp_table() to access this instead.
*/
AV_FRAME_DATA_QP_TABLE_DATA,
#endif
/** /**
* Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t
* where the first uint32_t describes how many (1-3) of the other timecodes are used. * where the first uint32_t describes how many (1-3) of the other timecodes are used.
@ -182,33 +198,6 @@ enum AVFrameSideDataType {
* Must be present for every frame which should have film grain applied. * Must be present for every frame which should have film grain applied.
*/ */
AV_FRAME_DATA_FILM_GRAIN_PARAMS, AV_FRAME_DATA_FILM_GRAIN_PARAMS,
/**
* Bounding boxes for object detection and classification,
* as described by AVDetectionBBoxHeader.
*/
AV_FRAME_DATA_DETECTION_BBOXES,
/**
* Dolby Vision RPU raw data, suitable for passing to x265
* or other libraries. Array of uint8_t, with NAL emulation
* bytes intact.
*/
AV_FRAME_DATA_DOVI_RPU_BUFFER,
/**
* Parsed Dolby Vision metadata, suitable for passing to a software
* implementation. The payload is the AVDOVIMetadata struct defined in
* libavutil/dovi_meta.h.
*/
AV_FRAME_DATA_DOVI_METADATA,
/**
* HDR Vivid dynamic metadata associated with a video frame. The payload is
* an AVDynamicHDRVivid type and contains information for color
* volume transform - CUVA 005.1-2021.
*/
AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
}; };
enum AVActiveFormatDescription { enum AVActiveFormatDescription {
@ -231,7 +220,11 @@ enum AVActiveFormatDescription {
typedef struct AVFrameSideData { typedef struct AVFrameSideData {
enum AVFrameSideDataType type; enum AVFrameSideDataType type;
uint8_t *data; uint8_t *data;
#if FF_API_BUFFER_SIZE_T
int size;
#else
size_t size; size_t size;
#endif
AVDictionary *metadata; AVDictionary *metadata;
AVBufferRef *buf; AVBufferRef *buf;
} AVFrameSideData; } AVFrameSideData;
@ -326,32 +319,21 @@ typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8 #define AV_NUM_DATA_POINTERS 8
/** /**
* pointer to the picture/channel planes. * pointer to the picture/channel planes.
* This might be different from the first allocated byte. For video, * This might be different from the first allocated byte
* it could even point to the end of the image data.
*
* All pointers in data and extended_data must point into one of the
* AVBufferRef in buf or extended_buf.
* *
* Some decoders access areas outside 0,0 - width,height, please * Some decoders access areas outside 0,0 - width,height, please
* see avcodec_align_dimensions2(). Some filters and swscale can read * see avcodec_align_dimensions2(). Some filters and swscale can read
* up to 16 bytes beyond the planes, if these filters are to be used, * up to 16 bytes beyond the planes, if these filters are to be used,
* then 16 extra bytes must be allocated. * then 16 extra bytes must be allocated.
* *
* NOTE: Pointers not needed by the format MUST be set to NULL. * NOTE: Except for hwaccel formats, pointers not needed by the format
* * MUST be set to NULL.
* @attention In case of video, the data[] pointers can point to the
* end of image data in order to reverse line order, when used in
* combination with negative values in the linesize[] array.
*/ */
uint8_t *data[AV_NUM_DATA_POINTERS]; uint8_t *data[AV_NUM_DATA_POINTERS];
/** /**
* For video, a positive or negative value, which is typically indicating * For video, size in bytes of each picture line.
* the size in bytes of each picture line, but it can also be: * For audio, size in bytes of each plane.
* - the negative byte size of lines for vertical flipping
* (with data[n] pointing to the end of the data
* - a positive or negative multiple of the byte size as for accessing
* even and odd fields of a frame (possibly flipped)
* *
* For audio, only linesize[0] may be set. For planar audio, each channel * For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size. * plane must be the same size.
@ -363,9 +345,6 @@ typedef struct AVFrame {
* *
* @note The linesize may be larger than the size of usable data -- there * @note The linesize may be larger than the size of usable data -- there
* may be extra padding present for performance reasons. * may be extra padding present for performance reasons.
*
* @attention In case of video, line size values can be negative to achieve
* a vertically inverted iteration over image lines.
*/ */
int linesize[AV_NUM_DATA_POINTERS]; int linesize[AV_NUM_DATA_POINTERS];
@ -431,6 +410,15 @@ typedef struct AVFrame {
*/ */
int64_t pts; int64_t pts;
#if FF_API_PKT_PTS
/**
* PTS copied from the AVPacket that was decoded to produce this frame.
* @deprecated use the pts field instead
*/
attribute_deprecated
int64_t pkt_pts;
#endif
/** /**
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
* This is also the Presentation time of this AVFrame calculated from * This is also the Presentation time of this AVFrame calculated from
@ -438,14 +426,6 @@ typedef struct AVFrame {
*/ */
int64_t pkt_dts; int64_t pkt_dts;
/**
* Time base for the timestamps in this frame.
* In the future, this field may be set on frames output by decoders or
* filters, but its value will be by default ignored on input to encoders
* or filters.
*/
AVRational time_base;
/** /**
* picture number in bitstream order * picture number in bitstream order
*/ */
@ -465,6 +445,14 @@ typedef struct AVFrame {
*/ */
void *opaque; void *opaque;
#if FF_API_ERROR_FRAME
/**
* @deprecated unused
*/
attribute_deprecated
uint64_t error[AV_NUM_DATA_POINTERS];
#endif
/** /**
* When decoding, this signals how much the picture must be delayed. * When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps) * extra_delay = repeat_pict / (2*fps)
@ -501,20 +489,16 @@ typedef struct AVFrame {
*/ */
int sample_rate; int sample_rate;
#if FF_API_OLD_CHANNEL_LAYOUT
/** /**
* Channel layout of the audio data. * Channel layout of the audio data.
* @deprecated use ch_layout instead
*/ */
attribute_deprecated
uint64_t channel_layout; uint64_t channel_layout;
#endif
/** /**
* AVBuffer references backing the data for this frame. All the pointers in * AVBuffer references backing the data for this frame. If all elements of
* data and extended_data must point inside one of the buffers in buf or * this array are NULL, then this frame is not reference counted. This array
* extended_buf. This array must be filled contiguously -- if buf[i] is * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must
* non-NULL then buf[j] must also be non-NULL for all j < i. * also be non-NULL for all j < i.
* *
* There may be at most one AVBuffer per data plane, so for video this array * There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than * always contains all the references. For planar audio with more than
@ -604,18 +588,13 @@ typedef struct AVFrame {
*/ */
int64_t pkt_pos; int64_t pkt_pos;
#if FF_API_PKT_DURATION
/** /**
* duration of the corresponding packet, expressed in * duration of the corresponding packet, expressed in
* AVStream->time_base units, 0 if unknown. * AVStream->time_base units, 0 if unknown.
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: Read by user.
*
* @deprecated use duration instead
*/ */
attribute_deprecated
int64_t pkt_duration; int64_t pkt_duration;
#endif
/** /**
* metadata. * metadata.
@ -637,16 +616,12 @@ typedef struct AVFrame {
#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4 #define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
#define FF_DECODE_ERROR_DECODE_SLICES 8 #define FF_DECODE_ERROR_DECODE_SLICES 8
#if FF_API_OLD_CHANNEL_LAYOUT
/** /**
* number of audio channels, only used for audio. * number of audio channels, only used for audio.
* - encoding: unused * - encoding: unused
* - decoding: Read by user. * - decoding: Read by user.
* @deprecated use ch_layout instead
*/ */
attribute_deprecated
int channels; int channels;
#endif
/** /**
* size of the corresponding packet containing the compressed * size of the corresponding packet containing the compressed
@ -657,6 +632,24 @@ typedef struct AVFrame {
*/ */
int pkt_size; int pkt_size;
#if FF_API_FRAME_QP
/**
* QP table
*/
attribute_deprecated
int8_t *qscale_table;
/**
* QP store stride
*/
attribute_deprecated
int qstride;
attribute_deprecated
int qscale_type;
attribute_deprecated
AVBufferRef *qp_table_buf;
#endif
/** /**
* For hwaccel-format frames, this should be a reference to the * For hwaccel-format frames, this should be a reference to the
* AVHWFramesContext describing the frame. * AVHWFramesContext describing the frame.
@ -702,28 +695,71 @@ typedef struct AVFrame {
* for the target frame's private_ref field. * for the target frame's private_ref field.
*/ */
AVBufferRef *private_ref; AVBufferRef *private_ref;
/**
* Channel layout of the audio data.
*/
AVChannelLayout ch_layout;
/**
* Duration of the frame, in the same units as pts. 0 if unknown.
*/
int64_t duration;
} AVFrame; } AVFrame;
#if FF_API_FRAME_GET_SET
/**
* Accessors for some AVFrame fields. These used to be provided for ABI
* compatibility, and do not need to be used anymore.
*/
attribute_deprecated
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
attribute_deprecated
void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_pkt_duration (const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_duration (AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_pkt_pos (const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_pos (AVFrame *frame, int64_t val);
attribute_deprecated
int64_t av_frame_get_channel_layout (const AVFrame *frame);
attribute_deprecated
void av_frame_set_channel_layout (AVFrame *frame, int64_t val);
attribute_deprecated
int av_frame_get_channels (const AVFrame *frame);
attribute_deprecated
void av_frame_set_channels (AVFrame *frame, int val);
attribute_deprecated
int av_frame_get_sample_rate (const AVFrame *frame);
attribute_deprecated
void av_frame_set_sample_rate (AVFrame *frame, int val);
attribute_deprecated
AVDictionary *av_frame_get_metadata (const AVFrame *frame);
attribute_deprecated
void av_frame_set_metadata (AVFrame *frame, AVDictionary *val);
attribute_deprecated
int av_frame_get_decode_error_flags (const AVFrame *frame);
attribute_deprecated
void av_frame_set_decode_error_flags (AVFrame *frame, int val);
attribute_deprecated
int av_frame_get_pkt_size(const AVFrame *frame);
attribute_deprecated
void av_frame_set_pkt_size(AVFrame *frame, int val);
#if FF_API_FRAME_QP
attribute_deprecated
int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
attribute_deprecated
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
#endif
attribute_deprecated
enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
attribute_deprecated
void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
attribute_deprecated
enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
attribute_deprecated
void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
#endif
#if FF_API_COLORSPACE_NAME
/** /**
* Get the name of a colorspace. * Get the name of a colorspace.
* @return a static string identifying the colorspace; can be NULL. * @return a static string identifying the colorspace; can be NULL.
* @deprecated use av_color_space_name()
*/ */
attribute_deprecated
const char *av_get_colorspace_name(enum AVColorSpace val); const char *av_get_colorspace_name(enum AVColorSpace val);
#endif
/** /**
* Allocate an AVFrame and set its fields to default values. The resulting * Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free(). * struct must be freed using av_frame_free().
@ -791,7 +827,7 @@ void av_frame_move_ref(AVFrame *dst, AVFrame *src);
* The following fields must be set on frame before calling this function: * The following fields must be set on frame before calling this function:
* - format (pixel format for video, sample format for audio) * - format (pixel format for video, sample format for audio)
* - width and height for video * - width and height for video
* - nb_samples and ch_layout for audio * - nb_samples and channel_layout for audio
* *
* This function will fill AVFrame.data and AVFrame.buf arrays and, if * This function will fill AVFrame.data and AVFrame.buf arrays and, if
* necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
@ -828,8 +864,7 @@ int av_frame_is_writable(AVFrame *frame);
* Ensure that the frame data is writable, avoiding data copy if possible. * Ensure that the frame data is writable, avoiding data copy if possible.
* *
* Do nothing if the frame is writable, allocate new buffers and copy the data * Do nothing if the frame is writable, allocate new buffers and copy the data
* if it is not. Non-refcounted frames behave as non-writable, i.e. a copy * if it is not.
* is always made.
* *
* @return 0 on success, a negative AVERROR on error. * @return 0 on success, a negative AVERROR on error.
* *
@ -882,7 +917,11 @@ AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
*/ */
AVFrameSideData *av_frame_new_side_data(AVFrame *frame, AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
enum AVFrameSideDataType type, enum AVFrameSideDataType type,
#if FF_API_BUFFER_SIZE_T
int size);
#else
size_t size); size_t size);
#endif
/** /**
* Add a new side data to a frame from an existing AVBufferRef * Add a new side data to a frame from an existing AVBufferRef

6
ffmpeg/include/libavutil/hash.h

@ -30,6 +30,8 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "version.h"
/** /**
* @defgroup lavu_hash Hash Functions * @defgroup lavu_hash Hash Functions
* @ingroup lavu_crypto * @ingroup lavu_crypto
@ -180,7 +182,11 @@ void av_hash_init(struct AVHashContext *ctx);
* @param[in] src Data to be added to the hash context * @param[in] src Data to be added to the hash context
* @param[in] len Size of the additional data * @param[in] len Size of the additional data
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);
#else
void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, size_t len); void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, size_t len);
#endif
/** /**
* Finalize a hash context and compute the actual hash value. * Finalize a hash context and compute the actual hash value.

1
ffmpeg/include/libavutil/hmac.h

@ -23,6 +23,7 @@
#include <stdint.h> #include <stdint.h>
#include "version.h"
/** /**
* @defgroup lavu_hmac HMAC * @defgroup lavu_hmac HMAC
* @ingroup lavu_crypto * @ingroup lavu_crypto

4
ffmpeg/include/libavutil/hwcontext.h

@ -571,10 +571,6 @@ enum {
* possible with the given arguments and hwframe setup, while other return * possible with the given arguments and hwframe setup, while other return
* values indicate that it failed somehow. * values indicate that it failed somehow.
* *
* On failure, the destination frame will be left blank, except for the
* hw_frames_ctx/format fields thay may have been set by the caller - those will
* be preserved as they were.
*
* @param dst Destination frame, to contain the mapping. * @param dst Destination frame, to contain the mapping.
* @param src Source frame, to be mapped. * @param src Source frame, to be mapped.
* @param flags Some combination of AV_HWFRAME_MAP_* flags. * @param flags Some combination of AV_HWFRAME_MAP_* flags.

9
ffmpeg/include/libavutil/hwcontext_d3d11va.h

@ -164,15 +164,6 @@ typedef struct AVD3D11VAFramesContext {
* This field is ignored/invalid if a user-allocated texture is provided. * This field is ignored/invalid if a user-allocated texture is provided.
*/ */
UINT MiscFlags; UINT MiscFlags;
/**
* In case if texture structure member above is not NULL contains the same texture
* pointer for all elements and different indexes into the array texture.
* In case if texture structure member above is NULL, all elements contains
* pointers to separate non-array textures and 0 indexes.
* This field is ignored/invalid if a user-allocated texture is provided.
*/
AVD3D11FrameDescriptor *texture_infos;
} AVD3D11VAFramesContext; } AVD3D11VAFramesContext;
#endif /* AVUTIL_HWCONTEXT_D3D11VA_H */ #endif /* AVUTIL_HWCONTEXT_D3D11VA_H */

13
ffmpeg/include/libavutil/hwcontext_qsv.h

@ -19,7 +19,7 @@
#ifndef AVUTIL_HWCONTEXT_QSV_H #ifndef AVUTIL_HWCONTEXT_QSV_H
#define AVUTIL_HWCONTEXT_QSV_H #define AVUTIL_HWCONTEXT_QSV_H
#include <mfxvideo.h> #include <mfx/mfxvideo.h>
/** /**
* @file * @file
@ -34,17 +34,6 @@
*/ */
typedef struct AVQSVDeviceContext { typedef struct AVQSVDeviceContext {
mfxSession session; mfxSession session;
/**
* The mfxLoader handle used for mfxSession creation
*
* This field is only available for oneVPL user. For non-oneVPL user, this
* field must be set to NULL.
*
* Filled by the user before calling av_hwdevice_ctx_init() and should be
* cast to mfxLoader handle. Deallocating the AVHWDeviceContext will always
* release this interface.
*/
void *loader;
} AVQSVDeviceContext; } AVQSVDeviceContext;
/** /**

40
ffmpeg/include/libavutil/hwcontext_videotoolbox.h

@ -23,21 +23,17 @@
#include <VideoToolbox/VideoToolbox.h> #include <VideoToolbox/VideoToolbox.h>
#include "frame.h"
#include "pixfmt.h" #include "pixfmt.h"
/** /**
* @file * @file
* An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX. * An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX.
* *
* This API supports frame allocation using a native CVPixelBufferPool * This API currently does not support frame allocation, as the raw VideoToolbox
* instead of an AVBufferPool. * API does allocation, and FFmpeg itself never has the need to allocate frames.
* *
* If the API user sets a custom pool, AVHWFramesContext.pool must return * If the API user sets a custom pool, AVHWFramesContext.pool must return
* AVBufferRefs whose data pointer is a CVImageBufferRef or CVPixelBufferRef. * AVBufferRefs whose data pointer is a CVImageBufferRef or CVPixelBufferRef.
* Note that the underlying CVPixelBuffer could be retained by OS frameworks
* depending on application usage, so it is preferable to let CoreVideo manage
* the pool using the default implementation.
* *
* Currently AVHWDeviceContext.hwctx and AVHWFramesContext.hwctx are always * Currently AVHWDeviceContext.hwctx and AVHWFramesContext.hwctx are always
* NULL. * NULL.
@ -61,36 +57,4 @@ uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt);
*/ */
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range); uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range);
/**
* Convert an AVChromaLocation to a VideoToolbox/CoreVideo chroma location string.
* Returns 0 if no known equivalent was found.
*/
CFStringRef av_map_videotoolbox_chroma_loc_from_av(enum AVChromaLocation loc);
/**
* Convert an AVColorSpace to a VideoToolbox/CoreVideo color matrix string.
* Returns 0 if no known equivalent was found.
*/
CFStringRef av_map_videotoolbox_color_matrix_from_av(enum AVColorSpace space);
/**
* Convert an AVColorPrimaries to a VideoToolbox/CoreVideo color primaries string.
* Returns 0 if no known equivalent was found.
*/
CFStringRef av_map_videotoolbox_color_primaries_from_av(enum AVColorPrimaries pri);
/**
* Convert an AVColorTransferCharacteristic to a VideoToolbox/CoreVideo color transfer
* function string.
* Returns 0 if no known equivalent was found.
*/
CFStringRef av_map_videotoolbox_color_trc_from_av(enum AVColorTransferCharacteristic trc);
/**
* Update a CVPixelBufferRef's metadata to based on an AVFrame.
* Returns 0 if no known equivalent was found.
*/
int av_vt_pixbuf_set_attachments(void *log_ctx,
CVPixelBufferRef pixbuf, const struct AVFrame *src);
#endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */ #endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */

151
ffmpeg/include/libavutil/hwcontext_vulkan.h

@ -19,9 +19,6 @@
#ifndef AVUTIL_HWCONTEXT_VULKAN_H #ifndef AVUTIL_HWCONTEXT_VULKAN_H
#define AVUTIL_HWCONTEXT_VULKAN_H #define AVUTIL_HWCONTEXT_VULKAN_H
#if defined(_WIN32) && !defined(VK_USE_PLATFORM_WIN32_KHR)
#define VK_USE_PLATFORM_WIN32_KHR
#endif
#include <vulkan/vulkan.h> #include <vulkan/vulkan.h>
#include "pixfmt.h" #include "pixfmt.h"
@ -44,37 +41,43 @@ typedef struct AVVulkanDeviceContext {
* Custom memory allocator, else NULL * Custom memory allocator, else NULL
*/ */
const VkAllocationCallbacks *alloc; const VkAllocationCallbacks *alloc;
/**
* Pointer to the instance-provided vkGetInstanceProcAddr loading function.
* If NULL, will pick either libvulkan or libvolk, depending on libavutil's
* compilation settings, and set this field.
*/
PFN_vkGetInstanceProcAddr get_proc_addr;
/** /**
* Vulkan instance. Must be at least version 1.2. * Vulkan instance. Must be at least version 1.1.
*/ */
VkInstance inst; VkInstance inst;
/** /**
* Physical device * Physical device
*/ */
VkPhysicalDevice phys_dev; VkPhysicalDevice phys_dev;
/** /**
* Active device * Active device
*/ */
VkDevice act_dev; VkDevice act_dev;
/** /**
* This structure should be set to the set of features that present and enabled * Queue family index for graphics
* during device creation. When a device is created by FFmpeg, it will default to * @note av_hwdevice_create() will set all 3 queue indices if unset
* enabling all that are present of the shaderImageGatherExtended, * If there is no dedicated queue for compute or transfer operations,
* fragmentStoresAndAtomics, shaderInt64 and vertexPipelineStoresAndAtomics features. * they will be set to the graphics queue index which can handle both.
* nb_graphics_queues indicates how many queues were enabled for the
* graphics queue (must be at least 1)
*/ */
VkPhysicalDeviceFeatures2 device_features; int queue_family_index;
int nb_graphics_queues;
/**
* Queue family index to use for transfer operations, and the amount of queues
* enabled. In case there is no dedicated transfer queue, nb_tx_queues
* must be 0 and queue_family_tx_index must be the same as either the graphics
* queue or the compute queue, if available.
*/
int queue_family_tx_index;
int nb_tx_queues;
/**
* Queue family index for compute ops, and the amount of queues enabled.
* In case there are no dedicated compute queues, nb_comp_queues must be
* 0 and its queue family index must be set to the graphics queue.
*/
int queue_family_comp_index;
int nb_comp_queues;
/** /**
* Enabled instance extensions. * Enabled instance extensions.
* If supplying your own device context, set this to an array of strings, with * If supplying your own device context, set this to an array of strings, with
@ -84,7 +87,6 @@ typedef struct AVVulkanDeviceContext {
*/ */
const char * const *enabled_inst_extensions; const char * const *enabled_inst_extensions;
int nb_enabled_inst_extensions; int nb_enabled_inst_extensions;
/** /**
* Enabled device extensions. By default, VK_KHR_external_memory_fd, * Enabled device extensions. By default, VK_KHR_external_memory_fd,
* VK_EXT_external_memory_dma_buf, VK_EXT_image_drm_format_modifier, * VK_EXT_external_memory_dma_buf, VK_EXT_image_drm_format_modifier,
@ -95,91 +97,32 @@ typedef struct AVVulkanDeviceContext {
*/ */
const char * const *enabled_dev_extensions; const char * const *enabled_dev_extensions;
int nb_enabled_dev_extensions; int nb_enabled_dev_extensions;
/**
* Queue family index for graphics operations, and the number of queues
* enabled for it. If unavaiable, will be set to -1. Not required.
* av_hwdevice_create() will attempt to find a dedicated queue for each
* queue family, or pick the one with the least unrelated flags set.
* Queue indices here may overlap if a queue has to share capabilities.
*/
int queue_family_index;
int nb_graphics_queues;
/**
* Queue family index for transfer operations and the number of queues
* enabled. Required.
*/
int queue_family_tx_index;
int nb_tx_queues;
/**
* Queue family index for compute operations and the number of queues
* enabled. Required.
*/
int queue_family_comp_index;
int nb_comp_queues;
/** /**
* Queue family index for video encode ops, and the amount of queues enabled. * This structure should be set to the set of features that present and enabled
* If the device doesn't support such, queue_family_encode_index will be -1. * during device creation. When a device is created by FFmpeg, it will default to
* Not required. * enabling all that are present of the shaderImageGatherExtended,
*/ * fragmentStoresAndAtomics, shaderInt64 and vertexPipelineStoresAndAtomics features.
int queue_family_encode_index;
int nb_encode_queues;
/**
* Queue family index for video decode ops, and the amount of queues enabled.
* If the device doesn't support such, queue_family_decode_index will be -1.
* Not required.
*/ */
int queue_family_decode_index; VkPhysicalDeviceFeatures2 device_features;
int nb_decode_queues;
} AVVulkanDeviceContext; } AVVulkanDeviceContext;
/**
* Defines the behaviour of frame allocation.
*/
typedef enum AVVkFrameFlags {
/* Unless this flag is set, autodetected flags will be OR'd based on the
* device and tiling during av_hwframe_ctx_init(). */
AV_VK_FRAME_FLAG_NONE = (1ULL << 0),
/* Image planes will be allocated in a single VkDeviceMemory, rather
* than as per-plane VkDeviceMemory allocations. Required for exporting
* to VAAPI on Intel devices. */
AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY = (1ULL << 1),
} AVVkFrameFlags;
/** /**
* Allocated as AVHWFramesContext.hwctx, used to set pool-specific options * Allocated as AVHWFramesContext.hwctx, used to set pool-specific options
*/ */
typedef struct AVVulkanFramesContext { typedef struct AVVulkanFramesContext {
/** /**
* Controls the tiling of allocated frames. If left as optimal tiling, * Controls the tiling of allocated frames.
* then during av_hwframe_ctx_init() will decide based on whether the device
* supports DRM modifiers, or if the linear_images flag is set, otherwise
* will allocate optimally-tiled images.
*/ */
VkImageTiling tiling; VkImageTiling tiling;
/** /**
* Defines extra usage of output frames. If left as 0, the following bits * Defines extra usage of output frames. If left as 0, the following bits
* are set: TRANSFER_SRC, TRANSFER_DST. SAMPLED and STORAGE. * are set: TRANSFER_SRC, TRANSFER_DST. SAMPLED and STORAGE.
*/ */
VkImageUsageFlagBits usage; VkImageUsageFlagBits usage;
/** /**
* Extension data for image creation. * Extension data for image creation.
* If VkImageDrmFormatModifierListCreateInfoEXT is present in the chain,
* and the device supports DRM modifiers, then images will be allocated
* with the specific requested DRM modifiers.
* Additional structures may be added at av_hwframe_ctx_init() time,
* which will be freed automatically on uninit(), so users need only free
* any structures they've allocated themselves.
*/ */
void *create_pnext; void *create_pnext;
/** /**
* Extension data for memory allocation. Must have as many entries as * Extension data for memory allocation. Must have as many entries as
* the number of planes of the sw_format. * the number of planes of the sw_format.
@ -188,13 +131,6 @@ typedef struct AVVulkanFramesContext {
* extensions are present in enabled_dev_extensions. * extensions are present in enabled_dev_extensions.
*/ */
void *alloc_pnext[AV_NUM_DATA_POINTERS]; void *alloc_pnext[AV_NUM_DATA_POINTERS];
/**
* A combination of AVVkFrameFlags. Unless AV_VK_FRAME_FLAG_NONE is set,
* autodetected flags will be OR'd based on the device and tiling during
* av_hwframe_ctx_init().
*/
AVVkFrameFlags flags;
} AVVulkanFramesContext; } AVVulkanFramesContext;
/* /*
@ -203,7 +139,7 @@ typedef struct AVVulkanFramesContext {
* All frames, imported or allocated, will be created with the * All frames, imported or allocated, will be created with the
* VK_IMAGE_CREATE_ALIAS_BIT flag set, so the memory may be aliased if needed. * VK_IMAGE_CREATE_ALIAS_BIT flag set, so the memory may be aliased if needed.
* *
* If all queue family indices in the device context are the same, * If all three queue family indices in the device context are the same,
* images will be created with the EXCLUSIVE sharing mode. Otherwise, all images * images will be created with the EXCLUSIVE sharing mode. Otherwise, all images
* will be created using the CONCURRENT sharing mode. * will be created using the CONCURRENT sharing mode.
* *
@ -222,9 +158,8 @@ typedef struct AVVkFrame {
VkImageTiling tiling; VkImageTiling tiling;
/** /**
* Memory backing the images. Could be less than the amount of planes, * Memory backing the images. Could be less than the amount of images
* in which case the offset value will indicate the binding offset of * if importing from a DRM or VAAPI frame.
* each plane in the memory.
*/ */
VkDeviceMemory mem[AV_NUM_DATA_POINTERS]; VkDeviceMemory mem[AV_NUM_DATA_POINTERS];
size_t size[AV_NUM_DATA_POINTERS]; size_t size[AV_NUM_DATA_POINTERS];
@ -241,29 +176,17 @@ typedef struct AVVkFrame {
VkImageLayout layout[AV_NUM_DATA_POINTERS]; VkImageLayout layout[AV_NUM_DATA_POINTERS];
/** /**
* Synchronization timeline semaphores, one for each sw_format plane. * Synchronization semaphores. Must not be freed manually. Must be waited on
* Must not be freed manually. Must be waited on at every submission using * and signalled at every queue submission.
* the value in sem_value, and must be signalled at every submission, * Could be less than the amount of images: either one per VkDeviceMemory
* using an incremented value. * or one for the entire frame. All others will be set to VK_NULL_HANDLE.
*/ */
VkSemaphore sem[AV_NUM_DATA_POINTERS]; VkSemaphore sem[AV_NUM_DATA_POINTERS];
/**
* Up to date semaphore value at which each image becomes accessible.
* Clients must wait on this value when submitting a command queue,
* and increment it when signalling.
*/
uint64_t sem_value[AV_NUM_DATA_POINTERS];
/** /**
* Internal data. * Internal data.
*/ */
struct AVVkFrameInternal *internal; struct AVVkFrameInternal *internal;
/**
* Describes the binding offset of each plane to the VkDeviceMemory.
*/
ptrdiff_t offset[AV_NUM_DATA_POINTERS];
} AVVkFrame; } AVVkFrame;
/** /**

22
ffmpeg/include/libavutil/imgutils.h

@ -27,10 +27,8 @@
* @{ * @{
*/ */
#include <stddef.h> #include "avutil.h"
#include <stdint.h>
#include "pixdesc.h" #include "pixdesc.h"
#include "pixfmt.h"
#include "rational.h" #include "rational.h"
/** /**
@ -126,24 +124,6 @@ void av_image_copy_plane(uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize, const uint8_t *src, int src_linesize,
int bytewidth, int height); int bytewidth, int height);
/**
* Copy image data located in uncacheable (e.g. GPU mapped) memory. Where
* available, this function will use special functionality for reading from such
* memory, which may result in greatly improved performance compared to plain
* av_image_copy_plane().
*
* bytewidth must be contained by both absolute values of dst_linesize
* and src_linesize, otherwise the function behavior is undefined.
*
* @note The linesize parameters have the type ptrdiff_t here, while they are
* int for av_image_copy_plane().
* @note On x86, the linesizes currently need to be aligned to the cacheline
* size (i.e. 64) to get improved performance.
*/
void av_image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
const uint8_t *src, ptrdiff_t src_linesize,
ptrdiff_t bytewidth, int height);
/** /**
* Copy image in src_data to dst_data. * Copy image in src_data to dst_data.
* *

24
ffmpeg/include/libavutil/log.h

@ -22,6 +22,7 @@
#define AVUTIL_LOG_H #define AVUTIL_LOG_H
#include <stdarg.h> #include <stdarg.h>
#include "avutil.h"
#include "attributes.h" #include "attributes.h"
#include "version.h" #include "version.h"
@ -106,6 +107,24 @@ typedef struct AVClass {
*/ */
int parent_log_context_offset; int parent_log_context_offset;
/**
* Return next AVOptions-enabled child or NULL
*/
void* (*child_next)(void *obj, void *prev);
#if FF_API_CHILD_CLASS_NEXT
/**
* Return an AVClass corresponding to the next potential
* AVOptions-enabled child.
*
* The difference between child_next and this is that
* child_next iterates over _already existing_ objects, while
* child_class_next iterates over _all possible_ children.
*/
attribute_deprecated
const struct AVClass* (*child_class_next)(const struct AVClass *prev);
#endif
/** /**
* Category used for visualization (like color) * Category used for visualization (like color)
* This is only set if the category is equal for all objects using this class. * This is only set if the category is equal for all objects using this class.
@ -125,11 +144,6 @@ typedef struct AVClass {
*/ */
int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
/**
* Return next AVOptions-enabled child or NULL
*/
void* (*child_next)(void *obj, void *prev);
/** /**
* Iterate over the AVClasses corresponding to potential AVOptions-enabled * Iterate over the AVClasses corresponding to potential AVOptions-enabled
* children. * children.

30
ffmpeg/include/libavutil/macros.h

@ -25,36 +25,6 @@
#ifndef AVUTIL_MACROS_H #ifndef AVUTIL_MACROS_H
#define AVUTIL_MACROS_H #define AVUTIL_MACROS_H
#include "libavutil/avconfig.h"
#if AV_HAVE_BIGENDIAN
# define AV_NE(be, le) (be)
#else
# define AV_NE(be, le) (le)
#endif
/**
* Comparator.
* For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
* if x == y. This is useful for instance in a qsort comparator callback.
* Furthermore, compilers are able to optimize this to branchless code, and
* there is no risk of overflow with signed types.
* As with many macros, this evaluates its argument multiple times, it thus
* must not have a side-effect.
*/
#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
/** /**
* @addtogroup preproc_misc Preprocessor String Macros * @addtogroup preproc_misc Preprocessor String Macros
* *

9
ffmpeg/include/libavutil/md5.h

@ -31,6 +31,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_md5 MD5 * @defgroup lavu_md5 MD5
@ -63,7 +64,11 @@ void av_md5_init(struct AVMD5 *ctx);
* @param src input data to update hash with * @param src input data to update hash with
* @param len input data length * @param len input data length
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len);
#else
void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, size_t len); void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, size_t len);
#endif
/** /**
* Finish hashing and output digest value. * Finish hashing and output digest value.
@ -80,7 +85,11 @@ void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
* @param src The data to hash * @param src The data to hash
* @param len The length of the data, in bytes * @param len The length of the data, in bytes
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
#else
void av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len); void av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len);
#endif
/** /**
* @} * @}

46
ffmpeg/include/libavutil/mem.h

@ -31,6 +31,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "error.h"
#include "avutil.h" #include "avutil.h"
#include "version.h" #include "version.h"
@ -237,20 +238,20 @@ av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
* @see av_mallocz() * @see av_mallocz()
* @see av_malloc_array() * @see av_malloc_array()
*/ */
void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2); av_alloc_size(1, 2) void *av_mallocz_array(size_t nmemb, size_t size);
#if FF_API_AV_MALLOCZ_ARRAY
/** /**
* @deprecated use av_calloc() * Non-inlined equivalent of av_mallocz_array().
*
* Created for symmetry with the calloc() C function.
*/ */
attribute_deprecated void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
void *av_mallocz_array(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
#endif
/** /**
* Allocate, reallocate, or free a block of memory. * Allocate, reallocate, or free a block of memory.
* *
* If `ptr` is `NULL` and `size` > 0, allocate a new block. Otherwise, expand or * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is
* zero, free the memory block pointed to by `ptr`. Otherwise, expand or
* shrink that block of memory according to `size`. * shrink that block of memory according to `size`.
* *
* @param ptr Pointer to a memory block already allocated with * @param ptr Pointer to a memory block already allocated with
@ -259,11 +260,10 @@ void *av_mallocz_array(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size
* reallocated * reallocated
* *
* @return Pointer to a newly-reallocated block or `NULL` if the block * @return Pointer to a newly-reallocated block or `NULL` if the block
* cannot be reallocated * cannot be reallocated or the function is used to free the memory block
* *
* @warning Unlike av_malloc(), the returned pointer is not guaranteed to be * @warning Unlike av_malloc(), the returned pointer is not guaranteed to be
* correctly aligned. The returned pointer must be freed after even * correctly aligned.
* if size is zero.
* @see av_fast_realloc() * @see av_fast_realloc()
* @see av_reallocp() * @see av_reallocp()
*/ */
@ -311,7 +311,8 @@ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
/** /**
* Allocate, reallocate, or free an array. * Allocate, reallocate, or free an array.
* *
* If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If
* `nmemb` is zero, free the memory block pointed to by `ptr`.
* *
* @param ptr Pointer to a memory block already allocated with * @param ptr Pointer to a memory block already allocated with
* av_realloc() or `NULL` * av_realloc() or `NULL`
@ -319,19 +320,19 @@ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
* @param size Size of the single element of the array * @param size Size of the single element of the array
* *
* @return Pointer to a newly-reallocated block or NULL if the block * @return Pointer to a newly-reallocated block or NULL if the block
* cannot be reallocated * cannot be reallocated or the function is used to free the memory block
* *
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned. The returned pointer must be freed after even if * correctly aligned.
* nmemb is zero.
* @see av_reallocp_array() * @see av_reallocp_array()
*/ */
av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
/** /**
* Allocate, reallocate an array through a pointer to a pointer. * Allocate, reallocate, or free an array through a pointer to a pointer.
* *
* If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is
* zero, free the memory block pointed to by `*ptr`.
* *
* @param[in,out] ptr Pointer to a pointer to a memory block already * @param[in,out] ptr Pointer to a pointer to a memory block already
* allocated with av_realloc(), or a pointer to `NULL`. * allocated with av_realloc(), or a pointer to `NULL`.
@ -342,7 +343,7 @@ av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
* @return Zero on success, an AVERROR error code on failure * @return Zero on success, an AVERROR error code on failure
* *
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
* correctly aligned. *ptr must be freed after even if nmemb is zero. * correctly aligned.
*/ */
int av_reallocp_array(void *ptr, size_t nmemb, size_t size); int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
@ -671,7 +672,16 @@ void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
* @param[out] r Pointer to the result of the operation * @param[out] r Pointer to the result of the operation
* @return 0 on success, AVERROR(EINVAL) on overflow * @return 0 on success, AVERROR(EINVAL) on overflow
*/ */
int av_size_mult(size_t a, size_t b, size_t *r); static inline int av_size_mult(size_t a, size_t b, size_t *r)
{
size_t t = a * b;
/* Hack inspired from glibc: don't try the division if nelem and elsize
* are both less than sqrt(SIZE_MAX). */
if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
return AVERROR(EINVAL);
*r = t;
return 0;
}
/** /**
* Set the maximum size that may be allocated in one block. * Set the maximum size that may be allocated in one block.

6
ffmpeg/include/libavutil/murmur3.h

@ -30,6 +30,8 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "version.h"
/** /**
* @defgroup lavu_murmur3 Murmur3 * @defgroup lavu_murmur3 Murmur3
* @ingroup lavu_hash * @ingroup lavu_hash
@ -98,7 +100,11 @@ void av_murmur3_init(struct AVMurMur3 *c);
* @param[in] src Input data to update hash with * @param[in] src Input data to update hash with
* @param[in] len Number of bytes to read from `src` * @param[in] len Number of bytes to read from `src`
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len);
#else
void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, size_t len); void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, size_t len);
#endif
/** /**
* Finish hashing and output digest value. * Finish hashing and output digest value.

33
ffmpeg/include/libavutil/opt.h

@ -29,11 +29,11 @@
#include "rational.h" #include "rational.h"
#include "avutil.h" #include "avutil.h"
#include "channel_layout.h"
#include "dict.h" #include "dict.h"
#include "log.h" #include "log.h"
#include "pixfmt.h" #include "pixfmt.h"
#include "samplefmt.h" #include "samplefmt.h"
#include "version.h"
/** /**
* @defgroup avoptions AVOptions * @defgroup avoptions AVOptions
@ -238,11 +238,8 @@ enum AVOptionType{
AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational
AV_OPT_TYPE_DURATION, AV_OPT_TYPE_DURATION,
AV_OPT_TYPE_COLOR, AV_OPT_TYPE_COLOR,
#if FF_API_OLD_CHANNEL_LAYOUT
AV_OPT_TYPE_CHANNEL_LAYOUT, AV_OPT_TYPE_CHANNEL_LAYOUT,
#endif
AV_OPT_TYPE_BOOL, AV_OPT_TYPE_BOOL,
AV_OPT_TYPE_CHLAYOUT,
}; };
/** /**
@ -651,6 +648,19 @@ const AVOption *av_opt_next(const void *obj, const AVOption *prev);
*/ */
void *av_opt_child_next(void *obj, void *prev); void *av_opt_child_next(void *obj, void *prev);
#if FF_API_CHILD_CLASS_NEXT
/**
* Iterate over potential AVOptions-enabled children of parent.
*
* @param prev result of a previous call to this function or NULL
* @return AVClass corresponding to next potential child or NULL
*
* @deprecated use av_opt_child_class_iterate
*/
attribute_deprecated
const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
#endif
/** /**
* Iterate over potential AVOptions-enabled children of parent. * Iterate over potential AVOptions-enabled children of parent.
* *
@ -697,11 +707,7 @@ int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_
int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);
int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);
#if FF_API_OLD_CHANNEL_LAYOUT
attribute_deprecated
int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);
#endif
int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *layout, int search_flags);
/** /**
* @note Any old dictionary present is discarded and replaced with a copy of the new one. The * @note Any old dictionary present is discarded and replaced with a copy of the new one. The
* caller still owns val is and responsible for freeing it. * caller still owns val is and responsible for freeing it.
@ -756,11 +762,7 @@ int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_
int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);
int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);
int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);
#if FF_API_OLD_CHANNEL_LAYOUT
attribute_deprecated
int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);
#endif
int av_opt_get_chlayout(void *obj, const char *name, int search_flags, AVChannelLayout *layout);
/** /**
* @param[out] out_val The returned dictionary is a copy of the actual value and must * @param[out] out_val The returned dictionary is a copy of the actual value and must
* be freed with av_dict_free() by the caller * be freed with av_dict_free() by the caller
@ -802,16 +804,9 @@ int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags
/** /**
* Copy options from src object into dest object. * Copy options from src object into dest object.
* *
* The underlying AVClass of both src and dest must coincide. The guarantee
* below does not apply if this is not fulfilled.
*
* Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object. * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.
* Original memory allocated for such options is freed unless both src and dest options points to the same memory. * Original memory allocated for such options is freed unless both src and dest options points to the same memory.
* *
* Even on error it is guaranteed that allocated options from src and dest
* no longer alias each other afterwards; in particular calling av_opt_free()
* on both src and dest is safe afterwards if dest has been memdup'ed from src.
*
* @param dest Object to copy from * @param dest Object to copy from
* @param src Object to copy into * @param src Object to copy into
* @return 0 on success, negative on error * @return 0 on success, negative on error

32
ffmpeg/include/libavutil/pixdesc.h

@ -26,6 +26,7 @@
#include "attributes.h" #include "attributes.h"
#include "pixfmt.h" #include "pixfmt.h"
#include "version.h"
typedef struct AVComponentDescriptor { typedef struct AVComponentDescriptor {
/** /**
@ -55,6 +56,17 @@ typedef struct AVComponentDescriptor {
* Number of bits in the component. * Number of bits in the component.
*/ */
int depth; int depth;
#if FF_API_PLUS1_MINUS1
/** deprecated, use step instead */
attribute_deprecated int step_minus1;
/** deprecated, use depth instead */
attribute_deprecated int depth_minus1;
/** deprecated, use offset instead */
attribute_deprecated int offset_plus1;
#endif
} AVComponentDescriptor; } AVComponentDescriptor;
/** /**
@ -135,6 +147,26 @@ typedef struct AVPixFmtDescriptor {
*/ */
#define AV_PIX_FMT_FLAG_RGB (1 << 5) #define AV_PIX_FMT_FLAG_RGB (1 << 5)
#if FF_API_PSEUDOPAL
/**
* The pixel format is "pseudo-paletted". This means that it contains a
* fixed palette in the 2nd plane but the palette is fixed/constant for each
* PIX_FMT. This allows interpreting the data as if it was PAL8, which can
* in some cases be simpler. Or the data can be interpreted purely based on
* the pixel format without using the palette.
* An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8
*
* @deprecated This flag is deprecated, and will be removed. When it is removed,
* the extra palette allocation in AVFrame.data[1] is removed as well. Only
* actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a
* palette. Starting with FFmpeg versions which have this flag deprecated, the
* extra "pseudo" palette is already ignored, and API users are not required to
* allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required
* before the deprecation, though).
*/
#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6)
#endif
/** /**
* The pixel format has an alpha channel. This is set on all formats that * The pixel format has an alpha channel. This is set on all formats that
* support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always

1
ffmpeg/include/libavutil/pixelutils.h

@ -21,6 +21,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "common.h"
/** /**
* Sum of abs(src1[x] - src2[x]) * Sum of abs(src1[x] - src2[x])

76
ffmpeg/include/libavutil/pixfmt.h

@ -112,11 +112,21 @@ enum AVPixelFormat {
AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
#if FF_API_VAAPI
/** @name Deprecated pixel formats */
/**@{*/
AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID
/**@}*/
AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
#else
/** /**
* Hardware acceleration through VA-API, data[3] contains a * Hardware acceleration through VA-API, data[3] contains a
* VASurfaceID. * VASurfaceID.
*/ */
AV_PIX_FMT_VAAPI, AV_PIX_FMT_VAAPI,
#endif
AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ -260,9 +270,7 @@ enum AVPixelFormat {
AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian
AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian
#if FF_API_XVMC
AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
#endif
AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ -352,26 +360,6 @@ enum AVPixelFormat {
AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, big-endian
AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, little-endian
AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, big-endian
AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, little-endian
AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp, big-endian
AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp, little-endian
AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp, big-endian
AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp, little-endian
AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4, 32bpp, VUYAVUYA...
AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., big-endian
AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., little-endian
AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
}; };
@ -462,43 +450,35 @@ enum AVPixelFormat {
#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE) #define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE) #define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
/** /**
* Chromaticity coordinates of the source primaries. * Chromaticity coordinates of the source primaries.
* These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.1 and ITU-T H.273. * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1.
*/ */
enum AVColorPrimaries { enum AVColorPrimaries {
AVCOL_PRI_RESERVED0 = 0, AVCOL_PRI_RESERVED0 = 0,
AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
AVCOL_PRI_UNSPECIFIED = 2, AVCOL_PRI_UNSPECIFIED = 2,
AVCOL_PRI_RESERVED = 3, AVCOL_PRI_RESERVED = 3,
AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
AVCOL_PRI_SMPTE240M = 7, ///< identical to above, also called "SMPTE C" even though it uses D65 AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E / JEDEC P22 phosphors
AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213, AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
AVCOL_PRI_NB ///< Not part of ABI AVCOL_PRI_NB ///< Not part of ABI
}; };
/** /**
* Color Transfer Characteristic. * Color Transfer Characteristic.
* These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.2. * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2.
*/ */
enum AVColorTransferCharacteristic { enum AVColorTransferCharacteristic {
AVCOL_TRC_RESERVED0 = 0, AVCOL_TRC_RESERVED0 = 0,
@ -527,18 +507,18 @@ enum AVColorTransferCharacteristic {
/** /**
* YUV colorspace type. * YUV colorspace type.
* These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.3. * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3.
*/ */
enum AVColorSpace { enum AVColorSpace {
AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1 AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
AVCOL_SPC_UNSPECIFIED = 2, AVCOL_SPC_UNSPECIFIED = 2,
AVCOL_SPC_RESERVED = 3, ///< reserved for future use by ITU-T and ISO/IEC just like 15-255 are AVCOL_SPC_RESERVED = 3,
AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
AVCOL_SPC_SMPTE240M = 7, ///< derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above
AVCOL_SPC_YCGCO = 8, ///< used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
@ -560,9 +540,9 @@ enum AVColorSpace {
* recommended, as it also defines the full range representation. * recommended, as it also defines the full range representation.
* *
* Common definitions: * Common definitions:
* - For RGB and luma planes such as Y in YCbCr and I in ICtCp, * - For RGB and luminance planes such as Y in YCbCr and I in ICtCp,
* 'E' is the original value in range of 0.0 to 1.0. * 'E' is the original value in range of 0.0 to 1.0.
* - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original * - For chrominance planes such as Cb,Cr and Ct,Cp, 'E' is the original
* value in range of -0.5 to 0.5. * value in range of -0.5 to 0.5.
* - 'n' is the output bit depth. * - 'n' is the output bit depth.
* - For additional definitions such as rounding and clipping to valid n * - For additional definitions such as rounding and clipping to valid n
@ -574,13 +554,13 @@ enum AVColorRange {
/** /**
* Narrow or limited range content. * Narrow or limited range content.
* *
* - For luma planes: * - For luminance planes:
* *
* (219 * E + 16) * 2^(n-8) * (219 * E + 16) * 2^(n-8)
* *
* F.ex. the range of 16-235 for 8 bits * F.ex. the range of 16-235 for 8 bits
* *
* - For chroma planes: * - For chrominance planes:
* *
* (224 * E + 128) * 2^(n-8) * (224 * E + 128) * 2^(n-8)
* *
@ -591,13 +571,13 @@ enum AVColorRange {
/** /**
* Full range content. * Full range content.
* *
* - For RGB and luma planes: * - For RGB and luminance planes:
* *
* (2^n - 1) * E * (2^n - 1) * E
* *
* F.ex. the range of 0-255 for 8 bits * F.ex. the range of 0-255 for 8 bits
* *
* - For chroma planes: * - For chrominance planes:
* *
* (2^n - 1) * E + 2^(n - 1) * (2^n - 1) * E + 2^(n - 1)
* *

5
ffmpeg/include/libavutil/ripemd.h

@ -32,6 +32,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_ripemd RIPEMD * @defgroup lavu_ripemd RIPEMD
@ -66,7 +67,11 @@ int av_ripemd_init(struct AVRIPEMD* context, int bits);
* @param data input data to update hash with * @param data input data to update hash with
* @param len input data length * @param len input data length
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len);
#else
void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, size_t len); void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, size_t len);
#endif
/** /**
* Finish hashing and output digest value. * Finish hashing and output digest value.

8
ffmpeg/include/libavutil/samplefmt.h

@ -21,6 +21,9 @@
#include <stdint.h> #include <stdint.h>
#include "avutil.h"
#include "attributes.h"
/** /**
* @addtogroup lavu_audio * @addtogroup lavu_audio
* @{ * @{
@ -192,8 +195,9 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
* @param nb_samples the number of samples in a single channel * @param nb_samples the number of samples in a single channel
* @param sample_fmt the sample format * @param sample_fmt the sample format
* @param align buffer size alignment (0 = default, 1 = no alignment) * @param align buffer size alignment (0 = default, 1 = no alignment)
* @return minimum size in bytes required for the buffer on success, * @return >=0 on success or a negative error code on failure
* or a negative error code on failure * @todo return minimum size in bytes required for the buffer in case
* of success at the next bump
*/ */
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
const uint8_t *buf, const uint8_t *buf,

5
ffmpeg/include/libavutil/sha.h

@ -31,6 +31,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_sha SHA * @defgroup lavu_sha SHA
@ -73,7 +74,11 @@ int av_sha_init(struct AVSHA* context, int bits);
* @param data input data to update hash with * @param data input data to update hash with
* @param len input data length * @param len input data length
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_sha_update(struct AVSHA *ctx, const uint8_t *data, unsigned int len);
#else
void av_sha_update(struct AVSHA *ctx, const uint8_t *data, size_t len); void av_sha_update(struct AVSHA *ctx, const uint8_t *data, size_t len);
#endif
/** /**
* Finish hashing and output digest value. * Finish hashing and output digest value.

5
ffmpeg/include/libavutil/sha512.h

@ -32,6 +32,7 @@
#include <stdint.h> #include <stdint.h>
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @defgroup lavu_sha512 SHA-512 * @defgroup lavu_sha512 SHA-512
@ -75,7 +76,11 @@ int av_sha512_init(struct AVSHA512* context, int bits);
* @param data input data to update hash with * @param data input data to update hash with
* @param len input data length * @param len input data length
*/ */
#if FF_API_CRYPTO_SIZE_T
void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);
#else
void av_sha512_update(struct AVSHA512* context, const uint8_t* data, size_t len); void av_sha512_update(struct AVSHA512* context, const uint8_t* data, size_t len);
#endif
/** /**
* Finish hashing and output digest value. * Finish hashing and output digest value.

1
ffmpeg/include/libavutil/tree.h

@ -28,6 +28,7 @@
#define AVUTIL_TREE_H #define AVUTIL_TREE_H
#include "attributes.h" #include "attributes.h"
#include "version.h"
/** /**
* @addtogroup lavu_tree AVTree * @addtogroup lavu_tree AVTree

84
ffmpeg/include/libavutil/tx.h

@ -38,59 +38,47 @@ typedef struct AVComplexInt32 {
enum AVTXType { enum AVTXType {
/** /**
* Standard complex to complex FFT with sample data type of AVComplexFloat, * Standard complex to complex FFT with sample data type AVComplexFloat.
* AVComplexDouble or AVComplexInt32, for each respective variant.
*
* Output is not 1/len normalized. Scaling currently unsupported. * Output is not 1/len normalized. Scaling currently unsupported.
* The stride parameter must be set to the size of a single sample in bytes. * The stride parameter is ignored.
*/ */
AV_TX_FLOAT_FFT = 0, AV_TX_FLOAT_FFT = 0,
AV_TX_DOUBLE_FFT = 2,
AV_TX_INT32_FFT = 4,
/** /**
* Standard MDCT with a sample data type of float, double or int32_t, * Standard MDCT with sample data type of float and a scale type of
* respecively. For the float and int32 variants, the scale type is * float. Length is the frame size, not the window size (which is 2x frame)
* 'float', while for the double variant, it's 'double'.
* If scale is NULL, 1.0 will be used as a default.
*
* Length is the frame size, not the window size (which is 2x frame).
* For forward transforms, the stride specifies the spacing between each * For forward transforms, the stride specifies the spacing between each
* sample in the output array in bytes. The input must be a flat array. * sample in the output array in bytes. The input must be a flat array.
*
* For inverse transforms, the stride specifies the spacing between each * For inverse transforms, the stride specifies the spacing between each
* sample in the input array in bytes. The output must be a flat array. * sample in the input array in bytes. The output will be a flat array.
* * Stride must be a non-zero multiple of sizeof(float).
* NOTE: the inverse transform is half-length, meaning the output will not * NOTE: the inverse transform is half-length, meaning the output will not
* contain redundant data. This is what most codecs work with. To do a full * contain redundant data. This is what most codecs work with.
* inverse transform, set the AV_TX_FULL_IMDCT flag on init. */
AV_TX_FLOAT_MDCT = 1,
/**
* Same as AV_TX_FLOAT_FFT with a data type of AVComplexDouble.
*/
AV_TX_DOUBLE_FFT = 2,
/**
* Same as AV_TX_FLOAT_MDCT with data and scale type of double.
* Stride must be a non-zero multiple of sizeof(double).
*/ */
AV_TX_FLOAT_MDCT = 1,
AV_TX_DOUBLE_MDCT = 3, AV_TX_DOUBLE_MDCT = 3,
AV_TX_INT32_MDCT = 5,
/** /**
* Real to complex and complex to real DFTs. * Same as AV_TX_FLOAT_FFT with a data type of AVComplexInt32.
* For the float and int32 variants, the scale type is 'float', while for
* the double variant, it's a 'double'. If scale is NULL, 1.0 will be used
* as a default.
*
* The stride parameter must be set to the size of a single sample in bytes.
*
* The forward transform performs a real-to-complex DFT of N samples to
* N/2+1 complex values.
*
* The inverse transform performs a complex-to-real DFT of N/2+1 complex
* values to N real samples. The output is not normalized, but can be
* made so by setting the scale value to 1.0/len.
* NOTE: the inverse transform always overwrites the input.
*/ */
AV_TX_FLOAT_RDFT = 6, AV_TX_INT32_FFT = 4,
AV_TX_DOUBLE_RDFT = 7,
AV_TX_INT32_RDFT = 8,
/* Not part of the API, do not use */ /**
AV_TX_NB, * Same as AV_TX_FLOAT_MDCT with data type of int32_t and scale type of float.
* Only scale values less than or equal to 1.0 are supported.
* Stride must be a non-zero multiple of sizeof(int32_t).
*/
AV_TX_INT32_MDCT = 5,
}; };
/** /**
@ -105,7 +93,7 @@ enum AVTXType {
* @param stride the input or output stride in bytes * @param stride the input or output stride in bytes
* *
* The out and in arrays must be aligned to the maximum required by the CPU * The out and in arrays must be aligned to the maximum required by the CPU
* architecture unless the AV_TX_UNALIGNED flag was set in av_tx_init(). * architecture.
* The stride must follow the constraints the transform type has specified. * The stride must follow the constraints the transform type has specified.
*/ */
typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride); typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride);
@ -120,20 +108,6 @@ enum AVTXFlags {
* transform types. * transform types.
*/ */
AV_TX_INPLACE = 1ULL << 0, AV_TX_INPLACE = 1ULL << 0,
/**
* Relaxes alignment requirement for the in and out arrays of av_tx_fn().
* May be slower with certain transform types.
*/
AV_TX_UNALIGNED = 1ULL << 1,
/**
* Performs a full inverse MDCT rather than leaving out samples that can be
* derived through symmetry. Requires an output array of 'len' floats,
* rather than the usual 'len/2' floats.
* Ignored for all transforms but inverse MDCTs.
*/
AV_TX_FULL_IMDCT = 1ULL << 2,
}; };
/** /**
@ -154,7 +128,7 @@ int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type,
int inv, int len, const void *scale, uint64_t flags); int inv, int len, const void *scale, uint64_t flags);
/** /**
* Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL. * Frees a context and sets ctx to NULL, does nothing when ctx == NULL
*/ */
void av_tx_uninit(AVTXContext **ctx); void av_tx_uninit(AVTXContext **ctx);

48
ffmpeg/include/libavutil/version.h

@ -78,9 +78,9 @@
* @{ * @{
*/ */
#define LIBAVUTIL_VERSION_MAJOR 57 #define LIBAVUTIL_VERSION_MAJOR 56
#define LIBAVUTIL_VERSION_MINOR 33 #define LIBAVUTIL_VERSION_MINOR 70
#define LIBAVUTIL_VERSION_MICRO 101 #define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \ LIBAVUTIL_VERSION_MINOR, \
@ -105,16 +105,42 @@
* @{ * @{
*/ */
#ifndef FF_API_VAAPI
#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_FRAME_QP
#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_PLUS1_MINUS1
#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_ERROR_FRAME
#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_PKT_PTS
#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_CRYPTO_SIZE_T
#define FF_API_CRYPTO_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_FRAME_GET_SET
#define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_PSEUDOPAL
#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_CHILD_CLASS_NEXT
#define FF_API_CHILD_CLASS_NEXT (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_BUFFER_SIZE_T
#define FF_API_BUFFER_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 57)
#endif
#ifndef FF_API_D2STR
#define FF_API_D2STR (LIBAVUTIL_VERSION_MAJOR < 58) #define FF_API_D2STR (LIBAVUTIL_VERSION_MAJOR < 58)
#endif
#ifndef FF_API_DECLARE_ALIGNED
#define FF_API_DECLARE_ALIGNED (LIBAVUTIL_VERSION_MAJOR < 58) #define FF_API_DECLARE_ALIGNED (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_COLORSPACE_NAME (LIBAVUTIL_VERSION_MAJOR < 58) #endif
#define FF_API_AV_MALLOCZ_ARRAY (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 58)
#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 58)
/** /**
* @} * @}

117
ffmpeg/include/libswscale/swscale.h

@ -30,16 +30,9 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#include "libavutil/frame.h"
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/pixfmt.h" #include "libavutil/pixfmt.h"
#include "version_major.h"
#ifndef HAVE_AV_CONFIG_H
/* When included as part of the ffmpeg build, only include the major version
* to avoid unnecessary rebuilds. When included externally, keep including
* the full version information. */
#include "version.h" #include "version.h"
#endif
/** /**
* @defgroup libsws libswscale * @defgroup libsws libswscale
@ -225,97 +218,6 @@ int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH, const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]); uint8_t *const dst[], const int dstStride[]);
/**
* Scale source data from src and write the output to dst.
*
* This is merely a convenience wrapper around
* - sws_frame_start()
* - sws_send_slice(0, src->height)
* - sws_receive_slice(0, dst->height)
* - sws_frame_end()
*
* @param dst The destination frame. See documentation for sws_frame_start() for
* more details.
* @param src The source frame.
*
* @return 0 on success, a negative AVERROR code on failure
*/
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src);
/**
* Initialize the scaling process for a given pair of source/destination frames.
* Must be called before any calls to sws_send_slice() and sws_receive_slice().
*
* This function will retain references to src and dst, so they must both use
* refcounted buffers (if allocated by the caller, in case of dst).
*
* @param dst The destination frame.
*
* The data buffers may either be already allocated by the caller or
* left clear, in which case they will be allocated by the scaler.
* The latter may have performance advantages - e.g. in certain cases
* some output planes may be references to input planes, rather than
* copies.
*
* Output data will be written into this frame in successful
* sws_receive_slice() calls.
* @param src The source frame. The data buffers must be allocated, but the
* frame data does not have to be ready at this point. Data
* availability is then signalled by sws_send_slice().
* @return 0 on success, a negative AVERROR code on failure
*
* @see sws_frame_end()
*/
int sws_frame_start(struct SwsContext *c, AVFrame *dst, const AVFrame *src);
/**
* Finish the scaling process for a pair of source/destination frames previously
* submitted with sws_frame_start(). Must be called after all sws_send_slice()
* and sws_receive_slice() calls are done, before any new sws_frame_start()
* calls.
*/
void sws_frame_end(struct SwsContext *c);
/**
* Indicate that a horizontal slice of input data is available in the source
* frame previously provided to sws_frame_start(). The slices may be provided in
* any order, but may not overlap. For vertically subsampled pixel formats, the
* slices must be aligned according to subsampling.
*
* @param slice_start first row of the slice
* @param slice_height number of rows in the slice
*
* @return a non-negative number on success, a negative AVERROR code on failure.
*/
int sws_send_slice(struct SwsContext *c, unsigned int slice_start,
unsigned int slice_height);
/**
* Request a horizontal slice of the output data to be written into the frame
* previously provided to sws_frame_start().
*
* @param slice_start first row of the slice; must be a multiple of
* sws_receive_slice_alignment()
* @param slice_height number of rows in the slice; must be a multiple of
* sws_receive_slice_alignment(), except for the last slice
* (i.e. when slice_start+slice_height is equal to output
* frame height)
*
* @return a non-negative number if the data was successfully written into the output
* AVERROR(EAGAIN) if more input data needs to be provided before the
* output can be produced
* another negative AVERROR code on other kinds of scaling failure
*/
int sws_receive_slice(struct SwsContext *c, unsigned int slice_start,
unsigned int slice_height);
/**
* @return alignment required for output slices requested with sws_receive_slice().
* Slice offsets and sizes passed to sws_receive_slice() must be
* multiples of the value returned from this function.
*/
unsigned int sws_receive_slice_alignment(const struct SwsContext *c);
/** /**
* @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)
* @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)
@ -324,22 +226,14 @@ unsigned int sws_receive_slice_alignment(const struct SwsContext *c);
* @param brightness 16.16 fixed point brightness correction * @param brightness 16.16 fixed point brightness correction
* @param contrast 16.16 fixed point contrast correction * @param contrast 16.16 fixed point contrast correction
* @param saturation 16.16 fixed point saturation correction * @param saturation 16.16 fixed point saturation correction
#if LIBSWSCALE_VERSION_MAJOR > 6
* @return negative error code on error, non negative otherwise
#else
* @return -1 if not supported * @return -1 if not supported
#endif
*/ */
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
int srcRange, const int table[4], int dstRange, int srcRange, const int table[4], int dstRange,
int brightness, int contrast, int saturation); int brightness, int contrast, int saturation);
/** /**
#if LIBSWSCALE_VERSION_MAJOR > 6
* @return negative error code on error, non negative otherwise
#else
* @return -1 if not supported * @return -1 if not supported
#endif
*/ */
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
int *srcRange, int **table, int *dstRange, int *srcRange, int **table, int *dstRange,
@ -366,6 +260,17 @@ void sws_scaleVec(SwsVector *a, double scalar);
*/ */
void sws_normalizeVec(SwsVector *a, double height); void sws_normalizeVec(SwsVector *a, double height);
#if FF_API_SWS_VECTOR
attribute_deprecated SwsVector *sws_getConstVec(double c, int length);
attribute_deprecated SwsVector *sws_getIdentityVec(void);
attribute_deprecated void sws_convVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_addVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_subVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_shiftVec(SwsVector *a, int shift);
attribute_deprecated SwsVector *sws_cloneVec(SwsVector *a);
attribute_deprecated void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
#endif
void sws_freeVec(SwsVector *a); void sws_freeVec(SwsVector *a);
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,

17
ffmpeg/include/libswscale/version.h

@ -26,10 +26,9 @@
#include "libavutil/version.h" #include "libavutil/version.h"
#include "version_major.h" #define LIBSWSCALE_VERSION_MAJOR 5
#define LIBSWSCALE_VERSION_MINOR 9
#define LIBSWSCALE_VERSION_MINOR 8 #define LIBSWSCALE_VERSION_MICRO 100
#define LIBSWSCALE_VERSION_MICRO 102
#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ #define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
LIBSWSCALE_VERSION_MINOR, \ LIBSWSCALE_VERSION_MINOR, \
@ -41,4 +40,14 @@
#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) #define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#ifndef FF_API_SWS_VECTOR
#define FF_API_SWS_VECTOR (LIBSWSCALE_VERSION_MAJOR < 6)
#endif
#endif /* SWSCALE_VERSION_H */ #endif /* SWSCALE_VERSION_H */

BIN
ffmpeg/lib/libavcodec.a

Binary file not shown.

BIN
ffmpeg/lib/libavformat.a

Binary file not shown.

BIN
ffmpeg/lib/libavutil.a

Binary file not shown.

BIN
ffmpeg/lib/libswscale.a

Binary file not shown.

10
ffmpeg/lib/pkgconfig/libavcodec.pc

@ -1,12 +1,12 @@
prefix=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg prefix=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg
exec_prefix=${prefix} exec_prefix=${prefix}
libdir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/lib libdir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/lib
includedir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/include includedir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/include
Name: libavcodec Name: libavcodec
Description: FFmpeg codec library Description: FFmpeg codec library
Version: 59.42.101 Version: 58.134.100
Requires: libavutil >= 57.33.101 Requires: libavutil >= 56.70.100
Requires.private: Requires.private:
Conflicts: Conflicts:
Libs: -L${libdir} -lavcodec -pthread -lm Libs: -L${libdir} -lavcodec -pthread -lm

14
ffmpeg/lib/pkgconfig/libavformat.pc

@ -1,14 +0,0 @@
prefix=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg
exec_prefix=${prefix}
libdir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/lib
includedir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/include
Name: libavformat
Description: FFmpeg container format library
Version: 59.30.100
Requires: libavcodec >= 59.42.101, libavutil >= 57.33.101
Requires.private:
Conflicts:
Libs: -L${libdir} -lavformat -lm
Libs.private:
Cflags: -I${includedir}

10
ffmpeg/lib/pkgconfig/libavutil.pc

@ -1,14 +1,14 @@
prefix=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg prefix=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg
exec_prefix=${prefix} exec_prefix=${prefix}
libdir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/lib libdir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/lib
includedir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/include includedir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/include
Name: libavutil Name: libavutil
Description: FFmpeg utility library Description: FFmpeg utility library
Version: 57.33.101 Version: 56.70.100
Requires: Requires:
Requires.private: Requires.private:
Conflicts: Conflicts:
Libs: -L${libdir} -lavutil -pthread -lm -lX11 Libs: -L${libdir} -lavutil -pthread -lm
Libs.private: Libs.private:
Cflags: -I${includedir} Cflags: -I${includedir}

10
ffmpeg/lib/pkgconfig/libswscale.pc

@ -1,12 +1,12 @@
prefix=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg prefix=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg
exec_prefix=${prefix} exec_prefix=${prefix}
libdir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/lib libdir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/lib
includedir=/home/develop/nightingale/src/modules/agent/web_client/FFmpeg/../decoder_wasm/ffmpeg/include includedir=/home/develop/nightingale/src/modules/agent/web_client/ffmpeg-4.4.1/../decoder_wasm/ffmpeg/include
Name: libswscale Name: libswscale
Description: FFmpeg image rescaling library Description: FFmpeg image rescaling library
Version: 6.8.102 Version: 5.9.100
Requires: libavutil >= 57.33.101 Requires: libavutil >= 56.70.100
Requires.private: Requires.private:
Conflicts: Conflicts:
Libs: -L${libdir} -lswscale -lm Libs: -L${libdir} -lswscale -lm

4
ffmpeg/share/ffmpeg/examples/decode_audio.c

@ -97,7 +97,7 @@ static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
exit(1); exit(1);
} }
for (i = 0; i < frame->nb_samples; i++) for (i = 0; i < frame->nb_samples; i++)
for (ch = 0; ch < dec_ctx->ch_layout.nb_channels; ch++) for (ch = 0; ch < dec_ctx->channels; ch++)
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile); fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
} }
} }
@ -215,7 +215,7 @@ int main(int argc, char **argv)
sfmt = av_get_packed_sample_fmt(sfmt); sfmt = av_get_packed_sample_fmt(sfmt);
} }
n_channels = c->ch_layout.nb_channels; n_channels = c->channels;
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0) if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
goto end; goto end;

12
ffmpeg/share/ffmpeg/examples/decode_video.c

@ -92,7 +92,6 @@ int main(int argc, char **argv)
uint8_t *data; uint8_t *data;
size_t data_size; size_t data_size;
int ret; int ret;
int eof;
AVPacket *pkt; AVPacket *pkt;
if (argc <= 2) { if (argc <= 2) {
@ -151,16 +150,15 @@ int main(int argc, char **argv)
exit(1); exit(1);
} }
do { while (!feof(f)) {
/* read raw data from the input file */ /* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, f); data_size = fread(inbuf, 1, INBUF_SIZE, f);
if (ferror(f)) if (!data_size)
break; break;
eof = !data_size;
/* use the parser to split the data into frames */ /* use the parser to split the data into frames */
data = inbuf; data = inbuf;
while (data_size > 0 || eof) { while (data_size > 0) {
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size, ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) { if (ret < 0) {
@ -172,10 +170,8 @@ int main(int argc, char **argv)
if (pkt->size) if (pkt->size)
decode(c, frame, pkt, outfilename); decode(c, frame, pkt, outfilename);
else if (eof)
break;
} }
} while (!eof); }
/* flush the decoder */ /* flush the decoder */
decode(c, frame, NULL, outfilename); decode(c, frame, NULL, outfilename);

8
ffmpeg/share/ffmpeg/examples/demuxing_decoding.c

@ -32,7 +32,6 @@
#include <libavutil/imgutils.h> #include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h> #include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h> #include <libavutil/timestamp.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL; static AVFormatContext *fmt_ctx = NULL;
@ -150,7 +149,8 @@ static int open_codec_context(int *stream_idx,
{ {
int ret, stream_index; int ret, stream_index;
AVStream *st; AVStream *st;
const AVCodec *dec = NULL; AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) { if (ret < 0) {
@ -185,7 +185,7 @@ static int open_codec_context(int *stream_idx,
} }
/* Init the decoders */ /* Init the decoders */
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) { if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n", fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type)); av_get_media_type_string(type));
return ret; return ret;
@ -345,7 +345,7 @@ int main (int argc, char **argv)
if (audio_stream) { if (audio_stream) {
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt; enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
int n_channels = audio_dec_ctx->ch_layout.nb_channels; int n_channels = audio_dec_ctx->channels;
const char *fmt; const char *fmt;
if (av_sample_fmt_is_planar(sfmt)) { if (av_sample_fmt_is_planar(sfmt)) {

30
ffmpeg/share/ffmpeg/examples/encode_audio.c

@ -70,25 +70,26 @@ static int select_sample_rate(const AVCodec *codec)
} }
/* select layout with the highest channel count */ /* select layout with the highest channel count */
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst) static int select_channel_layout(const AVCodec *codec)
{ {
const AVChannelLayout *p, *best_ch_layout; const uint64_t *p;
uint64_t best_ch_layout = 0;
int best_nb_channels = 0; int best_nb_channels = 0;
if (!codec->ch_layouts) if (!codec->channel_layouts)
return av_channel_layout_copy(dst, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO); return AV_CH_LAYOUT_STEREO;
p = codec->ch_layouts; p = codec->channel_layouts;
while (p->nb_channels) { while (*p) {
int nb_channels = p->nb_channels; int nb_channels = av_get_channel_layout_nb_channels(*p);
if (nb_channels > best_nb_channels) { if (nb_channels > best_nb_channels) {
best_ch_layout = p; best_ch_layout = *p;
best_nb_channels = nb_channels; best_nb_channels = nb_channels;
} }
p++; p++;
} }
return av_channel_layout_copy(dst, best_ch_layout); return best_ch_layout;
} }
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt, static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
@ -163,9 +164,8 @@ int main(int argc, char **argv)
/* select other audio parameters supported by the encoder */ /* select other audio parameters supported by the encoder */
c->sample_rate = select_sample_rate(codec); c->sample_rate = select_sample_rate(codec);
ret = select_channel_layout(codec, &c->ch_layout); c->channel_layout = select_channel_layout(codec);
if (ret < 0) c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
exit(1);
/* open it */ /* open it */
if (avcodec_open2(c, codec, NULL) < 0) { if (avcodec_open2(c, codec, NULL) < 0) {
@ -195,9 +195,7 @@ int main(int argc, char **argv)
frame->nb_samples = c->frame_size; frame->nb_samples = c->frame_size;
frame->format = c->sample_fmt; frame->format = c->sample_fmt;
ret = av_channel_layout_copy(&frame->ch_layout, &c->ch_layout); frame->channel_layout = c->channel_layout;
if (ret < 0)
exit(1);
/* allocate the data buffers */ /* allocate the data buffers */
ret = av_frame_get_buffer(frame, 0); ret = av_frame_get_buffer(frame, 0);
@ -220,7 +218,7 @@ int main(int argc, char **argv)
for (j = 0; j < c->frame_size; j++) { for (j = 0; j < c->frame_size; j++) {
samples[2*j] = (int)(sin(t) * 10000); samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->ch_layout.nb_channels; k++) for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j]; samples[2*j + k] = samples[2*j];
t += tincr; t += tincr;
} }

24
ffmpeg/share/ffmpeg/examples/encode_video.c

@ -155,25 +155,12 @@ int main(int argc, char **argv)
for (i = 0; i < 25; i++) { for (i = 0; i < 25; i++) {
fflush(stdout); fflush(stdout);
/* Make sure the frame data is writable. /* make sure the frame data is writable */
On the first round, the frame is fresh from av_frame_get_buffer()
and therefore we know it is writable.
But on the next rounds, encode() will have called
avcodec_send_frame(), and the codec may have kept a reference to
the frame in its internal structures, that makes the frame
unwritable.
av_frame_make_writable() checks that and allocates a new buffer
for the frame only if necessary.
*/
ret = av_frame_make_writable(frame); ret = av_frame_make_writable(frame);
if (ret < 0) if (ret < 0)
exit(1); exit(1);
/* Prepare a dummy image. /* prepare a dummy image */
In real code, this is where you would have your own logic for
filling the frame. FFmpeg does not care what you put in the
frame.
*/
/* Y */ /* Y */
for (y = 0; y < c->height; y++) { for (y = 0; y < c->height; y++) {
for (x = 0; x < c->width; x++) { for (x = 0; x < c->width; x++) {
@ -198,12 +185,7 @@ int main(int argc, char **argv)
/* flush the encoder */ /* flush the encoder */
encode(c, NULL, pkt, f); encode(c, NULL, pkt, f);
/* Add sequence end code to have a real MPEG file. /* add sequence end code to have a real MPEG file */
It makes only sense because this tiny examples writes packets
directly. This is called "elementary stream" and only works for some
codecs. To create a valid file, you usually need to write packets
into a proper file format or protocol; see muxing.c.
*/
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO) if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
fwrite(endcode, 1, sizeof(endcode), f); fwrite(endcode, 1, sizeof(endcode), f);
fclose(f); fclose(f);

25
ffmpeg/share/ffmpeg/examples/extract_mvs.c

@ -22,7 +22,6 @@
*/ */
#include <libavutil/motion_vector.h> #include <libavutil/motion_vector.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL; static AVFormatContext *fmt_ctx = NULL;
@ -79,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
int ret; int ret;
AVStream *st; AVStream *st;
AVCodecContext *dec_ctx = NULL; AVCodecContext *dec_ctx = NULL;
const AVCodec *dec = NULL; AVCodec *dec = NULL;
AVDictionary *opts = NULL; AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0); ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
@ -105,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
/* Init the video decoder */ /* Init the video decoder */
av_dict_set(&opts, "flags2", "+export_mvs", 0); av_dict_set(&opts, "flags2", "+export_mvs", 0);
ret = avcodec_open2(dec_ctx, dec, &opts); if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
av_dict_free(&opts);
if (ret < 0) {
fprintf(stderr, "Failed to open %s codec\n", fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type)); av_get_media_type_string(type));
return ret; return ret;
@ -124,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
int ret = 0; int ret = 0;
AVPacket *pkt = NULL; AVPacket pkt = { 0 };
if (argc != 2) { if (argc != 2) {
fprintf(stderr, "Usage: %s <video>\n", argv[0]); fprintf(stderr, "Usage: %s <video>\n", argv[0]);
@ -159,20 +156,13 @@ int main(int argc, char **argv)
goto end; goto end;
} }
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
ret = AVERROR(ENOMEM);
goto end;
}
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n"); printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
/* read frames from the file */ /* read frames from the file */
while (av_read_frame(fmt_ctx, pkt) >= 0) { while (av_read_frame(fmt_ctx, &pkt) >= 0) {
if (pkt->stream_index == video_stream_idx) if (pkt.stream_index == video_stream_idx)
ret = decode_packet(pkt); ret = decode_packet(&pkt);
av_packet_unref(pkt); av_packet_unref(&pkt);
if (ret < 0) if (ret < 0)
break; break;
} }
@ -184,6 +174,5 @@ end:
avcodec_free_context(&video_dec_ctx); avcodec_free_context(&video_dec_ctx);
avformat_close_input(&fmt_ctx); avformat_close_input(&fmt_ctx);
av_frame_free(&frame); av_frame_free(&frame);
av_packet_free(&pkt);
return ret < 0; return ret < 0;
} }

13
ffmpeg/share/ffmpeg/examples/filter_audio.c

@ -55,7 +55,7 @@
#define INPUT_SAMPLERATE 48000 #define INPUT_SAMPLERATE 48000
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP #define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
#define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0 #define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
#define VOLUME_VAL 0.90 #define VOLUME_VAL 0.90
@ -100,7 +100,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
} }
/* Set the filter options through the AVOptions API. */ /* Set the filter options through the AVOptions API. */
av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout)); av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN); av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN); av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN); av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
@ -154,8 +154,9 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
/* A third way of passing the options is in a string of the form /* A third way of passing the options is in a string of the form
* key1=value1:key2=value2.... */ * key1=value1:key2=value2.... */
snprintf(options_str, sizeof(options_str), snprintf(options_str, sizeof(options_str),
"sample_fmts=%s:sample_rates=%d:channel_layouts=stereo", "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100); av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
(uint64_t)AV_CH_LAYOUT_STEREO);
err = avfilter_init_str(aformat_ctx, options_str); err = avfilter_init_str(aformat_ctx, options_str);
if (err < 0) { if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n"); av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
@ -214,7 +215,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
static int process_output(struct AVMD5 *md5, AVFrame *frame) static int process_output(struct AVMD5 *md5, AVFrame *frame)
{ {
int planar = av_sample_fmt_is_planar(frame->format); int planar = av_sample_fmt_is_planar(frame->format);
int channels = frame->ch_layout.nb_channels; int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
int planes = planar ? channels : 1; int planes = planar ? channels : 1;
int bps = av_get_bytes_per_sample(frame->format); int bps = av_get_bytes_per_sample(frame->format);
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels); int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
@ -247,7 +248,7 @@ static int get_input(AVFrame *frame, int frame_num)
/* Set up the frame properties and allocate the buffer for the data. */ /* Set up the frame properties and allocate the buffer for the data. */
frame->sample_rate = INPUT_SAMPLERATE; frame->sample_rate = INPUT_SAMPLERATE;
frame->format = INPUT_FORMAT; frame->format = INPUT_FORMAT;
av_channel_layout_copy(&frame->ch_layout, &INPUT_CHANNEL_LAYOUT); frame->channel_layout = INPUT_CHANNEL_LAYOUT;
frame->nb_samples = FRAME_SIZE; frame->nb_samples = FRAME_SIZE;
frame->pts = frame_num * FRAME_SIZE; frame->pts = frame_num * FRAME_SIZE;

36
ffmpeg/share/ffmpeg/examples/filtering_audio.c

@ -34,7 +34,6 @@
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libavfilter/buffersink.h> #include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h> #include <libavfilter/buffersrc.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h> #include <libavutil/opt.h>
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono"; static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
@ -49,8 +48,8 @@ static int audio_stream_index = -1;
static int open_input_file(const char *filename) static int open_input_file(const char *filename)
{ {
const AVCodec *dec;
int ret; int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
@ -94,6 +93,7 @@ static int init_filters(const char *filters_descr)
AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 }; static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
static const int out_sample_rates[] = { 8000, -1 }; static const int out_sample_rates[] = { 8000, -1 };
const AVFilterLink *outlink; const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base; AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
@ -105,13 +105,12 @@ static int init_filters(const char *filters_descr)
} }
/* buffer audio source: the decoded frames from the decoder will be inserted here. */ /* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) if (!dec_ctx->channel_layout)
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels); dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
ret = snprintf(args, sizeof(args), snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=", "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
time_base.num, time_base.den, dec_ctx->sample_rate, time_base.num, time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt)); av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
av_channel_layout_describe(&dec_ctx->ch_layout, args + ret, sizeof(args) - ret);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in", ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph); args, NULL, filter_graph);
if (ret < 0) { if (ret < 0) {
@ -134,7 +133,7 @@ static int init_filters(const char *filters_descr)
goto end; goto end;
} }
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono", ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN); AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
@ -185,7 +184,7 @@ static int init_filters(const char *filters_descr)
/* Print summary of the sink buffer /* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */ * Note: args buffer is reused to store channel layout string */
outlink = buffersink_ctx->inputs[0]; outlink = buffersink_ctx->inputs[0];
av_channel_layout_describe(&outlink->ch_layout, args, sizeof(args)); av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n", av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int)outlink->sample_rate, (int)outlink->sample_rate,
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"), (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
@ -200,7 +199,7 @@ end:
static void print_frame(const AVFrame *frame) static void print_frame(const AVFrame *frame)
{ {
const int n = frame->nb_samples * frame->ch_layout.nb_channels; const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
const uint16_t *p = (uint16_t*)frame->data[0]; const uint16_t *p = (uint16_t*)frame->data[0];
const uint16_t *p_end = p + n; const uint16_t *p_end = p + n;
@ -215,12 +214,12 @@ static void print_frame(const AVFrame *frame)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
int ret; int ret;
AVPacket *packet = av_packet_alloc(); AVPacket packet;
AVFrame *frame = av_frame_alloc(); AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc();
if (!packet || !frame || !filt_frame) { if (!frame || !filt_frame) {
fprintf(stderr, "Could not allocate frame or packet\n"); perror("Could not allocate frame");
exit(1); exit(1);
} }
if (argc != 2) { if (argc != 2) {
@ -235,11 +234,11 @@ int main(int argc, char **argv)
/* read all packets */ /* read all packets */
while (1) { while (1) {
if ((ret = av_read_frame(fmt_ctx, packet)) < 0) if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break; break;
if (packet->stream_index == audio_stream_index) { if (packet.stream_index == audio_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet); ret = avcodec_send_packet(dec_ctx, &packet);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n"); av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break; break;
@ -275,13 +274,12 @@ int main(int argc, char **argv)
} }
} }
} }
av_packet_unref(packet); av_packet_unref(&packet);
} }
end: end:
avfilter_graph_free(&filter_graph); avfilter_graph_free(&filter_graph);
avcodec_free_context(&dec_ctx); avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx); avformat_close_input(&fmt_ctx);
av_packet_free(&packet);
av_frame_free(&frame); av_frame_free(&frame);
av_frame_free(&filt_frame); av_frame_free(&filt_frame);

18
ffmpeg/share/ffmpeg/examples/filtering_video.c

@ -53,8 +53,8 @@ static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char *filename) static int open_input_file(const char *filename)
{ {
const AVCodec *dec;
int ret; int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
@ -210,7 +210,7 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
int ret; int ret;
AVPacket *packet; AVPacket packet;
AVFrame *frame; AVFrame *frame;
AVFrame *filt_frame; AVFrame *filt_frame;
@ -221,9 +221,8 @@ int main(int argc, char **argv)
frame = av_frame_alloc(); frame = av_frame_alloc();
filt_frame = av_frame_alloc(); filt_frame = av_frame_alloc();
packet = av_packet_alloc(); if (!frame || !filt_frame) {
if (!frame || !filt_frame || !packet) { perror("Could not allocate frame");
fprintf(stderr, "Could not allocate frame or packet\n");
exit(1); exit(1);
} }
@ -234,11 +233,11 @@ int main(int argc, char **argv)
/* read all packets */ /* read all packets */
while (1) { while (1) {
if ((ret = av_read_frame(fmt_ctx, packet)) < 0) if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break; break;
if (packet->stream_index == video_stream_index) { if (packet.stream_index == video_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet); ret = avcodec_send_packet(dec_ctx, &packet);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n"); av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break; break;
@ -274,7 +273,7 @@ int main(int argc, char **argv)
av_frame_unref(frame); av_frame_unref(frame);
} }
} }
av_packet_unref(packet); av_packet_unref(&packet);
} }
end: end:
avfilter_graph_free(&filter_graph); avfilter_graph_free(&filter_graph);
@ -282,7 +281,6 @@ end:
avformat_close_input(&fmt_ctx); avformat_close_input(&fmt_ctx);
av_frame_free(&frame); av_frame_free(&frame);
av_frame_free(&filt_frame); av_frame_free(&filt_frame);
av_packet_free(&packet);
if (ret < 0 && ret != AVERROR_EOF) { if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));

24
ffmpeg/share/ffmpeg/examples/hw_decode.c

@ -152,8 +152,8 @@ int main(int argc, char *argv[])
int video_stream, ret; int video_stream, ret;
AVStream *video = NULL; AVStream *video = NULL;
AVCodecContext *decoder_ctx = NULL; AVCodecContext *decoder_ctx = NULL;
const AVCodec *decoder = NULL; AVCodec *decoder = NULL;
AVPacket *packet = NULL; AVPacket packet;
enum AVHWDeviceType type; enum AVHWDeviceType type;
int i; int i;
@ -172,12 +172,6 @@ int main(int argc, char *argv[])
return -1; return -1;
} }
packet = av_packet_alloc();
if (!packet) {
fprintf(stderr, "Failed to allocate AVPacket\n");
return -1;
}
/* open the input file */ /* open the input file */
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) { if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]); fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
@ -233,21 +227,23 @@ int main(int argc, char *argv[])
/* actual decoding and dump the raw data */ /* actual decoding and dump the raw data */
while (ret >= 0) { while (ret >= 0) {
if ((ret = av_read_frame(input_ctx, packet)) < 0) if ((ret = av_read_frame(input_ctx, &packet)) < 0)
break; break;
if (video_stream == packet->stream_index) if (video_stream == packet.stream_index)
ret = decode_write(decoder_ctx, packet); ret = decode_write(decoder_ctx, &packet);
av_packet_unref(packet); av_packet_unref(&packet);
} }
/* flush the decoder */ /* flush the decoder */
ret = decode_write(decoder_ctx, NULL); packet.data = NULL;
packet.size = 0;
ret = decode_write(decoder_ctx, &packet);
av_packet_unref(&packet);
if (output_file) if (output_file)
fclose(output_file); fclose(output_file);
av_packet_free(&packet);
avcodec_free_context(&decoder_ctx); avcodec_free_context(&decoder_ctx);
avformat_close_input(&input_ctx); avformat_close_input(&input_ctx);
av_buffer_unref(&hw_device_ctx); av_buffer_unref(&hw_device_ctx);

2
ffmpeg/share/ffmpeg/examples/metadata.c

@ -34,7 +34,7 @@
int main (int argc, char **argv) int main (int argc, char **argv)
{ {
AVFormatContext *fmt_ctx = NULL; AVFormatContext *fmt_ctx = NULL;
const AVDictionaryEntry *tag = NULL; AVDictionaryEntry *tag = NULL;
int ret; int ret;
if (argc != 2) { if (argc != 2) {

73
ffmpeg/share/ffmpeg/examples/muxing.c

@ -39,7 +39,6 @@
#include <libavutil/opt.h> #include <libavutil/opt.h>
#include <libavutil/mathematics.h> #include <libavutil/mathematics.h>
#include <libavutil/timestamp.h> #include <libavutil/timestamp.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
#include <libswresample/swresample.h> #include <libswresample/swresample.h>
@ -62,8 +61,6 @@ typedef struct OutputStream {
AVFrame *frame; AVFrame *frame;
AVFrame *tmp_frame; AVFrame *tmp_frame;
AVPacket *tmp_pkt;
float t, tincr, tincr2; float t, tincr, tincr2;
struct SwsContext *sws_ctx; struct SwsContext *sws_ctx;
@ -82,7 +79,7 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
} }
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
AVStream *st, AVFrame *frame, AVPacket *pkt) AVStream *st, AVFrame *frame)
{ {
int ret; int ret;
@ -95,7 +92,9 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
} }
while (ret >= 0) { while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt); AVPacket pkt = { 0 };
ret = avcodec_receive_packet(c, &pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break; break;
else if (ret < 0) { else if (ret < 0) {
@ -104,15 +103,13 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
} }
/* rescale output packet timestamp values from codec to stream timebase */ /* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, c->time_base, st->time_base); av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
pkt->stream_index = st->index; pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */ /* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt); log_packet(fmt_ctx, &pkt);
ret = av_interleaved_write_frame(fmt_ctx, pkt); ret = av_interleaved_write_frame(fmt_ctx, &pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of av_packet_unref(&pkt);
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret)); fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
exit(1); exit(1);
@ -124,7 +121,7 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
/* Add an output stream. */ /* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc, static void add_stream(OutputStream *ost, AVFormatContext *oc,
const AVCodec **codec, AVCodec **codec,
enum AVCodecID codec_id) enum AVCodecID codec_id)
{ {
AVCodecContext *c; AVCodecContext *c;
@ -138,12 +135,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
exit(1); exit(1);
} }
ost->tmp_pkt = av_packet_alloc();
if (!ost->tmp_pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
ost->st = avformat_new_stream(oc, NULL); ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) { if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n"); fprintf(stderr, "Could not allocate stream\n");
@ -170,7 +161,16 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
c->sample_rate = 44100; c->sample_rate = 44100;
} }
} }
av_channel_layout_copy(&c->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO); c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (i = 0; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ 1, c->sample_rate }; ost->st->time_base = (AVRational){ 1, c->sample_rate };
break; break;
@ -215,7 +215,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
/* audio output */ /* audio output */
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt, static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
const AVChannelLayout *channel_layout, uint64_t channel_layout,
int sample_rate, int nb_samples) int sample_rate, int nb_samples)
{ {
AVFrame *frame = av_frame_alloc(); AVFrame *frame = av_frame_alloc();
@ -227,7 +227,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
} }
frame->format = sample_fmt; frame->format = sample_fmt;
av_channel_layout_copy(&frame->ch_layout, channel_layout); frame->channel_layout = channel_layout;
frame->sample_rate = sample_rate; frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples; frame->nb_samples = nb_samples;
@ -242,8 +242,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
return frame; return frame;
} }
static void open_audio(AVFormatContext *oc, const AVCodec *codec, static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
OutputStream *ost, AVDictionary *opt_arg)
{ {
AVCodecContext *c; AVCodecContext *c;
int nb_samples; int nb_samples;
@ -272,9 +271,9 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
else else
nb_samples = c->frame_size; nb_samples = c->frame_size;
ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout, ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples); c->sample_rate, nb_samples);
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout, ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples); c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */ /* copy the stream parameters to the muxer */
@ -292,10 +291,10 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
} }
/* set options */ /* set options */
av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0); av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
@ -321,7 +320,7 @@ static AVFrame *get_audio_frame(OutputStream *ost)
for (j = 0; j <frame->nb_samples; j++) { for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000); v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->enc->ch_layout.nb_channels; i++) for (i = 0; i < ost->enc->channels; i++)
*q++ = v; *q++ = v;
ost->t += ost->tincr; ost->t += ost->tincr;
ost->tincr += ost->tincr2; ost->tincr += ost->tincr2;
@ -377,7 +376,7 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
ost->samples_count += dst_nb_samples; ost->samples_count += dst_nb_samples;
} }
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt); return write_frame(oc, c, ost->st, frame);
} }
/**************************************************************/ /**************************************************************/
@ -406,8 +405,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
return picture; return picture;
} }
static void open_video(AVFormatContext *oc, const AVCodec *codec, static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
OutputStream *ost, AVDictionary *opt_arg)
{ {
int ret; int ret;
AVCodecContext *c = ost->enc; AVCodecContext *c = ost->enc;
@ -520,7 +518,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
*/ */
static int write_video_frame(AVFormatContext *oc, OutputStream *ost) static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{ {
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt); return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
} }
static void close_stream(AVFormatContext *oc, OutputStream *ost) static void close_stream(AVFormatContext *oc, OutputStream *ost)
@ -528,7 +526,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
avcodec_free_context(&ost->enc); avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame); av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame); av_frame_free(&ost->tmp_frame);
av_packet_free(&ost->tmp_pkt);
sws_freeContext(ost->sws_ctx); sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx); swr_free(&ost->swr_ctx);
} }
@ -539,10 +536,10 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
OutputStream video_st = { 0 }, audio_st = { 0 }; OutputStream video_st = { 0 }, audio_st = { 0 };
const AVOutputFormat *fmt;
const char *filename; const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc; AVFormatContext *oc;
const AVCodec *audio_codec, *video_codec; AVCodec *audio_codec, *video_codec;
int ret; int ret;
int have_video = 0, have_audio = 0; int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0; int encode_video = 0, encode_audio = 0;
@ -629,6 +626,10 @@ int main(int argc, char **argv)
} }
} }
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc); av_write_trailer(oc);
/* Close each codec. */ /* Close each codec. */

59
ffmpeg/share/ffmpeg/examples/qsvdec.c

@ -44,10 +44,38 @@
#include "libavutil/hwcontext_qsv.h" #include "libavutil/hwcontext_qsv.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
typedef struct DecodeContext {
AVBufferRef *hw_device_ref;
} DecodeContext;
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts) static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
{ {
while (*pix_fmts != AV_PIX_FMT_NONE) { while (*pix_fmts != AV_PIX_FMT_NONE) {
if (*pix_fmts == AV_PIX_FMT_QSV) { if (*pix_fmts == AV_PIX_FMT_QSV) {
DecodeContext *decode = avctx->opaque;
AVHWFramesContext *frames_ctx;
AVQSVFramesContext *frames_hwctx;
int ret;
/* create a pool of surfaces to be used by the decoder */
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
if (!avctx->hw_frames_ctx)
return AV_PIX_FMT_NONE;
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
frames_hwctx = frames_ctx->hwctx;
frames_ctx->format = AV_PIX_FMT_QSV;
frames_ctx->sw_format = avctx->sw_pix_fmt;
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
frames_ctx->initial_pool_size = 32;
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
if (ret < 0)
return AV_PIX_FMT_NONE;
return AV_PIX_FMT_QSV; return AV_PIX_FMT_QSV;
} }
@ -59,7 +87,7 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
return AV_PIX_FMT_NONE; return AV_PIX_FMT_NONE;
} }
static int decode_packet(AVCodecContext *decoder_ctx, static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
AVFrame *frame, AVFrame *sw_frame, AVFrame *frame, AVFrame *sw_frame,
AVPacket *pkt, AVIOContext *output_ctx) AVPacket *pkt, AVIOContext *output_ctx)
{ {
@ -113,15 +141,15 @@ int main(int argc, char **argv)
AVCodecContext *decoder_ctx = NULL; AVCodecContext *decoder_ctx = NULL;
const AVCodec *decoder; const AVCodec *decoder;
AVPacket *pkt = NULL; AVPacket pkt = { 0 };
AVFrame *frame = NULL, *sw_frame = NULL; AVFrame *frame = NULL, *sw_frame = NULL;
DecodeContext decode = { NULL };
AVIOContext *output_ctx = NULL; AVIOContext *output_ctx = NULL;
int ret, i; int ret, i;
AVBufferRef *device_ref = NULL;
if (argc < 3) { if (argc < 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]); fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
return 1; return 1;
@ -149,7 +177,7 @@ int main(int argc, char **argv)
} }
/* open the hardware device */ /* open the hardware device */
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV, ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
"auto", NULL, 0); "auto", NULL, 0);
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "Cannot open the hardware device\n"); fprintf(stderr, "Cannot open the hardware device\n");
@ -181,8 +209,7 @@ int main(int argc, char **argv)
decoder_ctx->extradata_size = video_st->codecpar->extradata_size; decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
} }
decoder_ctx->opaque = &decode;
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
decoder_ctx->get_format = get_format; decoder_ctx->get_format = get_format;
ret = avcodec_open2(decoder_ctx, NULL, NULL); ret = avcodec_open2(decoder_ctx, NULL, NULL);
@ -200,26 +227,27 @@ int main(int argc, char **argv)
frame = av_frame_alloc(); frame = av_frame_alloc();
sw_frame = av_frame_alloc(); sw_frame = av_frame_alloc();
pkt = av_packet_alloc(); if (!frame || !sw_frame) {
if (!frame || !sw_frame || !pkt) {
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto finish; goto finish;
} }
/* actual decoding */ /* actual decoding */
while (ret >= 0) { while (ret >= 0) {
ret = av_read_frame(input_ctx, pkt); ret = av_read_frame(input_ctx, &pkt);
if (ret < 0) if (ret < 0)
break; break;
if (pkt->stream_index == video_st->index) if (pkt.stream_index == video_st->index)
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx); ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
av_packet_unref(pkt); av_packet_unref(&pkt);
} }
/* flush the decoder */ /* flush the decoder */
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx); pkt.data = NULL;
pkt.size = 0;
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
finish: finish:
if (ret < 0) { if (ret < 0) {
@ -232,11 +260,10 @@ finish:
av_frame_free(&frame); av_frame_free(&frame);
av_frame_free(&sw_frame); av_frame_free(&sw_frame);
av_packet_free(&pkt);
avcodec_free_context(&decoder_ctx); avcodec_free_context(&decoder_ctx);
av_buffer_unref(&device_ref); av_buffer_unref(&decode.hw_device_ref);
avio_close(output_ctx); avio_close(output_ctx);

45
ffmpeg/share/ffmpeg/examples/remuxing.c

@ -45,9 +45,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, cons
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
const AVOutputFormat *ofmt = NULL; AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket *pkt = NULL; AVPacket pkt;
const char *in_filename, *out_filename; const char *in_filename, *out_filename;
int ret, i; int ret, i;
int stream_index = 0; int stream_index = 0;
@ -65,12 +65,6 @@ int main(int argc, char **argv)
in_filename = argv[1]; in_filename = argv[1];
out_filename = argv[2]; out_filename = argv[2];
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
return 1;
}
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename); fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end; goto end;
@ -91,7 +85,7 @@ int main(int argc, char **argv)
} }
stream_mapping_size = ifmt_ctx->nb_streams; stream_mapping_size = ifmt_ctx->nb_streams;
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping)); stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
if (!stream_mapping) { if (!stream_mapping) {
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto end; goto end;
@ -146,39 +140,38 @@ int main(int argc, char **argv)
while (1) { while (1) {
AVStream *in_stream, *out_stream; AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, pkt); ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0) if (ret < 0)
break; break;
in_stream = ifmt_ctx->streams[pkt->stream_index]; in_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt->stream_index >= stream_mapping_size || if (pkt.stream_index >= stream_mapping_size ||
stream_mapping[pkt->stream_index] < 0) { stream_mapping[pkt.stream_index] < 0) {
av_packet_unref(pkt); av_packet_unref(&pkt);
continue; continue;
} }
pkt->stream_index = stream_mapping[pkt->stream_index]; pkt.stream_index = stream_mapping[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt->stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, pkt, "in"); log_packet(ifmt_ctx, &pkt, "in");
/* copy packet */ /* copy packet */
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base); pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->pos = -1; pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
log_packet(ofmt_ctx, pkt, "out"); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
ret = av_interleaved_write_frame(ofmt_ctx, pkt); log_packet(ofmt_ctx, &pkt, "out");
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary. ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
* This would be different if one used av_write_frame(). */
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "Error muxing packet\n"); fprintf(stderr, "Error muxing packet\n");
break; break;
} }
av_packet_unref(&pkt);
} }
av_write_trailer(ofmt_ctx); av_write_trailer(ofmt_ctx);
end: end:
av_packet_free(&pkt);
avformat_close_input(&ifmt_ctx); avformat_close_input(&ifmt_ctx);

16
ffmpeg/share/ffmpeg/examples/resampling_audio.c

@ -80,7 +80,7 @@ static void fill_samples(double *dst, int nb_samples, int nb_channels, int sampl
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_STEREO, dst_ch_layout = AV_CHANNEL_LAYOUT_SURROUND; int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
int src_rate = 48000, dst_rate = 44100; int src_rate = 48000, dst_rate = 44100;
uint8_t **src_data = NULL, **dst_data = NULL; uint8_t **src_data = NULL, **dst_data = NULL;
int src_nb_channels = 0, dst_nb_channels = 0; int src_nb_channels = 0, dst_nb_channels = 0;
@ -92,7 +92,6 @@ int main(int argc, char **argv)
int dst_bufsize; int dst_bufsize;
const char *fmt; const char *fmt;
struct SwrContext *swr_ctx; struct SwrContext *swr_ctx;
char buf[64];
double t; double t;
int ret; int ret;
@ -121,11 +120,11 @@ int main(int argc, char **argv)
} }
/* set options */ /* set options */
av_opt_set_chlayout(swr_ctx, "in_chlayout", &src_ch_layout, 0); av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0); av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0); av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0); av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0); av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0); av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
@ -137,7 +136,7 @@ int main(int argc, char **argv)
/* allocate source and destination samples buffers */ /* allocate source and destination samples buffers */
src_nb_channels = src_ch_layout.nb_channels; src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels, ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
src_nb_samples, src_sample_fmt, 0); src_nb_samples, src_sample_fmt, 0);
if (ret < 0) { if (ret < 0) {
@ -152,7 +151,7 @@ int main(int argc, char **argv)
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment */ /* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = dst_ch_layout.nb_channels; dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 0); dst_nb_samples, dst_sample_fmt, 0);
if (ret < 0) { if (ret < 0) {
@ -195,10 +194,9 @@ int main(int argc, char **argv)
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0) if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
goto end; goto end;
av_channel_layout_describe(&dst_ch_layout, buf, sizeof(buf));
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n" fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
"ffplay -f %s -channel_layout %s -channels %d -ar %d %s\n", "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
fmt, buf, dst_nb_channels, dst_rate, dst_filename); fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
end: end:
fclose(dst_file); fclose(dst_file);

64
ffmpeg/share/ffmpeg/examples/transcode_aac.c

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2022 Andreas Unterweger * Copyright (c) 2013-2018 Andreas Unterweger
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
* *
@ -38,7 +38,6 @@
#include "libavutil/audio_fifo.h" #include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/frame.h" #include "libavutil/frame.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
@ -61,8 +60,7 @@ static int open_input_file(const char *filename,
AVCodecContext **input_codec_context) AVCodecContext **input_codec_context)
{ {
AVCodecContext *avctx; AVCodecContext *avctx;
const AVCodec *input_codec; AVCodec *input_codec;
const AVStream *stream;
int error; int error;
/* Open the input file to read from it. */ /* Open the input file to read from it. */
@ -90,10 +88,8 @@ static int open_input_file(const char *filename,
return AVERROR_EXIT; return AVERROR_EXIT;
} }
stream = (*input_format_context)->streams[0];
/* Find a decoder for the audio stream. */ /* Find a decoder for the audio stream. */
if (!(input_codec = avcodec_find_decoder(stream->codecpar->codec_id))) { if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
fprintf(stderr, "Could not find input codec\n"); fprintf(stderr, "Could not find input codec\n");
avformat_close_input(input_format_context); avformat_close_input(input_format_context);
return AVERROR_EXIT; return AVERROR_EXIT;
@ -108,7 +104,7 @@ static int open_input_file(const char *filename,
} }
/* Initialize the stream parameters with demuxer information. */ /* Initialize the stream parameters with demuxer information. */
error = avcodec_parameters_to_context(avctx, stream->codecpar); error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
if (error < 0) { if (error < 0) {
avformat_close_input(input_format_context); avformat_close_input(input_format_context);
avcodec_free_context(&avctx); avcodec_free_context(&avctx);
@ -124,9 +120,6 @@ static int open_input_file(const char *filename,
return error; return error;
} }
/* Set the packet timebase for the decoder. */
avctx->pkt_timebase = stream->time_base;
/* Save the decoder context for easier access later. */ /* Save the decoder context for easier access later. */
*input_codec_context = avctx; *input_codec_context = avctx;
@ -151,7 +144,7 @@ static int open_output_file(const char *filename,
AVCodecContext *avctx = NULL; AVCodecContext *avctx = NULL;
AVIOContext *output_io_context = NULL; AVIOContext *output_io_context = NULL;
AVStream *stream = NULL; AVStream *stream = NULL;
const AVCodec *output_codec = NULL; AVCodec *output_codec = NULL;
int error; int error;
/* Open the output file to write to it. */ /* Open the output file to write to it. */
@ -206,11 +199,15 @@ static int open_output_file(const char *filename,
/* Set the basic encoder parameters. /* Set the basic encoder parameters.
* The input file's sample rate is used to avoid a sample rate conversion. */ * The input file's sample rate is used to avoid a sample rate conversion. */
av_channel_layout_default(&avctx->ch_layout, OUTPUT_CHANNELS); avctx->channels = OUTPUT_CHANNELS;
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
avctx->sample_rate = input_codec_context->sample_rate; avctx->sample_rate = input_codec_context->sample_rate;
avctx->sample_fmt = output_codec->sample_fmts[0]; avctx->sample_fmt = output_codec->sample_fmts[0];
avctx->bit_rate = OUTPUT_BIT_RATE; avctx->bit_rate = OUTPUT_BIT_RATE;
/* Allow the use of the experimental AAC encoder. */
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/* Set the sample rate for the container. */ /* Set the sample rate for the container. */
stream->time_base.den = input_codec_context->sample_rate; stream->time_base.den = input_codec_context->sample_rate;
stream->time_base.num = 1; stream->time_base.num = 1;
@ -292,18 +289,21 @@ static int init_resampler(AVCodecContext *input_codec_context,
/* /*
* Create a resampler context for the conversion. * Create a resampler context for the conversion.
* Set the conversion parameters. * Set the conversion parameters.
* Default channel layouts based on the number of channels
* are assumed for simplicity (they are sometimes not detected
* properly by the demuxer and/or decoder).
*/ */
error = swr_alloc_set_opts2(resample_context, *resample_context = swr_alloc_set_opts(NULL,
&output_codec_context->ch_layout, av_get_default_channel_layout(output_codec_context->channels),
output_codec_context->sample_fmt, output_codec_context->sample_fmt,
output_codec_context->sample_rate, output_codec_context->sample_rate,
&input_codec_context->ch_layout, av_get_default_channel_layout(input_codec_context->channels),
input_codec_context->sample_fmt, input_codec_context->sample_fmt,
input_codec_context->sample_rate, input_codec_context->sample_rate,
0, NULL); 0, NULL);
if (error < 0) { if (!*resample_context) {
fprintf(stderr, "Could not allocate resample context\n"); fprintf(stderr, "Could not allocate resample context\n");
return error; return AVERROR(ENOMEM);
} }
/* /*
* Perform a sanity check so that the number of converted samples is * Perform a sanity check so that the number of converted samples is
@ -331,7 +331,7 @@ static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
{ {
/* Create the FIFO buffer based on the specified output sample format. */ /* Create the FIFO buffer based on the specified output sample format. */
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt, if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
output_codec_context->ch_layout.nb_channels, 1))) { output_codec_context->channels, 1))) {
fprintf(stderr, "Could not allocate FIFO\n"); fprintf(stderr, "Could not allocate FIFO\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
@ -380,8 +380,6 @@ static int decode_audio_frame(AVFrame *frame,
if (error < 0) if (error < 0)
return error; return error;
*data_present = 0;
*finished = 0;
/* Read one audio frame from the input file into a temporary packet. */ /* Read one audio frame from the input file into a temporary packet. */
if ((error = av_read_frame(input_format_context, input_packet)) < 0) { if ((error = av_read_frame(input_format_context, input_packet)) < 0) {
/* If we are at the end of the file, flush the decoder below. */ /* If we are at the end of the file, flush the decoder below. */
@ -451,7 +449,7 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
* Each pointer will later point to the audio samples of the corresponding * Each pointer will later point to the audio samples of the corresponding
* channels (although it may be NULL for interleaved formats). * channels (although it may be NULL for interleaved formats).
*/ */
if (!(*converted_input_samples = calloc(output_codec_context->ch_layout.nb_channels, if (!(*converted_input_samples = calloc(output_codec_context->channels,
sizeof(**converted_input_samples)))) { sizeof(**converted_input_samples)))) {
fprintf(stderr, "Could not allocate converted input sample pointers\n"); fprintf(stderr, "Could not allocate converted input sample pointers\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -460,7 +458,7 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
/* Allocate memory for the samples of all channels in one consecutive /* Allocate memory for the samples of all channels in one consecutive
* block for convenience. */ * block for convenience. */
if ((error = av_samples_alloc(*converted_input_samples, NULL, if ((error = av_samples_alloc(*converted_input_samples, NULL,
output_codec_context->ch_layout.nb_channels, output_codec_context->channels,
frame_size, frame_size,
output_codec_context->sample_fmt, 0)) < 0) { output_codec_context->sample_fmt, 0)) < 0) {
fprintf(stderr, fprintf(stderr,
@ -560,7 +558,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
AVFrame *input_frame = NULL; AVFrame *input_frame = NULL;
/* Temporary storage for the converted input samples. */ /* Temporary storage for the converted input samples. */
uint8_t **converted_input_samples = NULL; uint8_t **converted_input_samples = NULL;
int data_present; int data_present = 0;
int ret = AVERROR_EXIT; int ret = AVERROR_EXIT;
/* Initialize temporary storage for one input frame. */ /* Initialize temporary storage for one input frame. */
@ -634,7 +632,7 @@ static int init_output_frame(AVFrame **frame,
* Default channel layouts based on the number of channels * Default channel layouts based on the number of channels
* are assumed for simplicity. */ * are assumed for simplicity. */
(*frame)->nb_samples = frame_size; (*frame)->nb_samples = frame_size;
av_channel_layout_copy(&(*frame)->ch_layout, &output_codec_context->ch_layout); (*frame)->channel_layout = output_codec_context->channel_layout;
(*frame)->format = output_codec_context->sample_fmt; (*frame)->format = output_codec_context->sample_fmt;
(*frame)->sample_rate = output_codec_context->sample_rate; (*frame)->sample_rate = output_codec_context->sample_rate;
@ -681,16 +679,17 @@ static int encode_audio_frame(AVFrame *frame,
pts += frame->nb_samples; pts += frame->nb_samples;
} }
*data_present = 0;
/* Send the audio frame stored in the temporary packet to the encoder. /* Send the audio frame stored in the temporary packet to the encoder.
* The output audio stream encoder is used to do this. */ * The output audio stream encoder is used to do this. */
error = avcodec_send_frame(output_codec_context, frame); error = avcodec_send_frame(output_codec_context, frame);
/* Check for errors, but proceed with fetching encoded samples if the /* The encoder signals that it has nothing more to encode. */
* encoder signals that it has nothing more to encode. */ if (error == AVERROR_EOF) {
if (error < 0 && error != AVERROR_EOF) { error = 0;
fprintf(stderr, "Could not send packet for encoding (error '%s')\n", goto cleanup;
av_err2str(error)); } else if (error < 0) {
goto cleanup; fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
av_err2str(error));
goto cleanup;
} }
/* Receive one encoded frame from the encoder. */ /* Receive one encoded frame from the encoder. */
@ -861,6 +860,7 @@ int main(int argc, char **argv)
int data_written; int data_written;
/* Flush the encoder as it may have delayed frames. */ /* Flush the encoder as it may have delayed frames. */
do { do {
data_written = 0;
if (encode_audio_frame(NULL, output_format_context, if (encode_audio_frame(NULL, output_format_context,
output_codec_context, &data_written)) output_codec_context, &data_written))
goto cleanup; goto cleanup;

29
ffmpeg/share/ffmpeg/examples/transcoding.c

@ -32,7 +32,6 @@
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libavfilter/buffersink.h> #include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h> #include <libavfilter/buffersrc.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h> #include <libavutil/opt.h>
#include <libavutil/pixdesc.h> #include <libavutil/pixdesc.h>
@ -72,13 +71,13 @@ static int open_input_file(const char *filename)
return ret; return ret;
} }
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx)); stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
if (!stream_ctx) if (!stream_ctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) { for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream = ifmt_ctx->streams[i]; AVStream *stream = ifmt_ctx->streams[i];
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id); AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx; AVCodecContext *codec_ctx;
if (!dec) { if (!dec) {
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i); av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
@ -123,7 +122,7 @@ static int open_output_file(const char *filename)
AVStream *out_stream; AVStream *out_stream;
AVStream *in_stream; AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx; AVCodecContext *dec_ctx, *enc_ctx;
const AVCodec *encoder; AVCodec *encoder;
int ret; int ret;
unsigned int i; unsigned int i;
@ -175,9 +174,8 @@ static int open_output_file(const char *filename)
enc_ctx->time_base = av_inv_q(dec_ctx->framerate); enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
} else { } else {
enc_ctx->sample_rate = dec_ctx->sample_rate; enc_ctx->sample_rate = dec_ctx->sample_rate;
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout); enc_ctx->channel_layout = dec_ctx->channel_layout;
if (ret < 0) enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
return ret;
/* take first format from list of supported formats */ /* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0]; enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate}; enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
@ -290,7 +288,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
goto end; goto end;
} }
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
char buf[64];
buffersrc = avfilter_get_by_name("abuffer"); buffersrc = avfilter_get_by_name("abuffer");
buffersink = avfilter_get_by_name("abuffersink"); buffersink = avfilter_get_by_name("abuffersink");
if (!buffersrc || !buffersink) { if (!buffersrc || !buffersink) {
@ -299,14 +296,14 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
goto end; goto end;
} }
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) if (!dec_ctx->channel_layout)
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels); dec_ctx->channel_layout =
av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf)); av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args), snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s", "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate, dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt), av_get_sample_fmt_name(dec_ctx->sample_fmt),
buf); dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph); args, NULL, filter_graph);
if (ret < 0) { if (ret < 0) {
@ -329,9 +326,9 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
goto end; goto end;
} }
av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf)); ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
ret = av_opt_set(buffersink_ctx, "ch_layouts", (uint8_t*)&enc_ctx->channel_layout,
buf, AV_OPT_SEARCH_CHILDREN); sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end; goto end;

2
ffmpeg/share/ffmpeg/examples/vaapi_encode.c

@ -105,7 +105,7 @@ int main(int argc, char *argv[])
FILE *fin = NULL, *fout = NULL; FILE *fin = NULL, *fout = NULL;
AVFrame *sw_frame = NULL, *hw_frame = NULL; AVFrame *sw_frame = NULL, *hw_frame = NULL;
AVCodecContext *avctx = NULL; AVCodecContext *avctx = NULL;
const AVCodec *codec = NULL; AVCodec *codec = NULL;
const char *enc_name = "h264_vaapi"; const char *enc_name = "h264_vaapi";
if (argc < 5) { if (argc < 5) {

6
ffmpeg/share/ffmpeg/examples/vaapi_transcode.c

@ -62,7 +62,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
static int open_input_file(const char *filename) static int open_input_file(const char *filename)
{ {
int ret; int ret;
const AVCodec *decoder = NULL; AVCodec *decoder = NULL;
AVStream *video = NULL; AVStream *video = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) { if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
@ -142,7 +142,7 @@ end:
return ret; return ret;
} }
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec) static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
{ {
AVFrame *frame; AVFrame *frame;
int ret = 0; int ret = 0;
@ -226,9 +226,9 @@ fail:
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
const AVCodec *enc_codec;
int ret = 0; int ret = 0;
AVPacket *dec_pkt; AVPacket *dec_pkt;
AVCodec *enc_codec;
if (argc != 4) { if (argc != 4) {
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n" fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"

1
libffmpeg_265_js

File diff suppressed because one or more lines are too long

BIN
libffmpeg_265_wasm

Binary file not shown.

1
node_modules/.bin/mime

@ -0,0 +1 @@
../mime/cli.js

1
node_modules/.bin/uglifyjs

@ -0,0 +1 @@
../uglify-js/bin/uglifyjs

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save