Blender  V2.93
writeffmpeg.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  * Partial Copyright (c) 2006 Peter Schlaile
16  */
17 
22 #ifdef WITH_FFMPEG
23 # include <stdio.h>
24 # include <string.h>
25 
26 # include <stdlib.h>
27 
28 # include "MEM_guardedalloc.h"
29 
30 # include "DNA_scene_types.h"
31 
32 # include "BLI_blenlib.h"
33 
34 # ifdef WITH_AUDASPACE
35 # include <AUD_Device.h>
36 # include <AUD_Special.h>
37 # endif
38 
39 # include "BLI_math_base.h"
40 # include "BLI_threads.h"
41 # include "BLI_utildefines.h"
42 
43 # include "BKE_global.h"
44 # include "BKE_idprop.h"
45 # include "BKE_image.h"
46 # include "BKE_lib_id.h"
47 # include "BKE_main.h"
48 # include "BKE_report.h"
49 # include "BKE_sound.h"
50 # include "BKE_writeffmpeg.h"
51 
52 # include "IMB_imbuf.h"
53 
54 /* This needs to be included after BLI_math_base.h otherwise it will redefine some math defines
55  * like M_SQRT1_2 leading to warnings with MSVC */
56 # include <libavcodec/avcodec.h>
57 # include <libavformat/avformat.h>
58 # include <libavutil/imgutils.h>
59 # include <libavutil/opt.h>
60 # include <libavutil/rational.h>
61 # include <libavutil/samplefmt.h>
62 # include <libswscale/swscale.h>
63 
64 # include "ffmpeg_compat.h"
65 
66 struct StampData;
67 
68 typedef struct FFMpegContext {
69  int ffmpeg_type;
70  int ffmpeg_codec;
71  int ffmpeg_audio_codec;
72  int ffmpeg_video_bitrate;
73  int ffmpeg_audio_bitrate;
74  int ffmpeg_gop_size;
75  int ffmpeg_max_b_frames;
76  int ffmpeg_autosplit;
77  int ffmpeg_autosplit_count;
78  bool ffmpeg_preview;
79 
80  int ffmpeg_crf; /* set to 0 to not use CRF mode; we have another flag for lossless anyway. */
81  int ffmpeg_preset; /* see eFFMpegPreset */
82 
83  AVFormatContext *outfile;
84  AVCodecContext *video_codec;
85  AVCodecContext *audio_codec;
86  AVStream *video_stream;
87  AVStream *audio_stream;
88  AVFrame *current_frame; /* Image frame in output pixel format. */
89  int video_time;
90 
91  /* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
92  AVFrame *img_convert_frame;
93  struct SwsContext *img_convert_ctx;
94 
95  uint8_t *audio_input_buffer;
96  uint8_t *audio_deinterleave_buffer;
97  int audio_input_samples;
98  double audio_time;
99  double audio_time_total;
100  bool audio_deinterleave;
101  int audio_sample_size;
102 
103  struct StampData *stamp_data;
104 
105 # ifdef WITH_AUDASPACE
106  AUD_Device *audio_mixdown_device;
107 # endif
108 } FFMpegContext;
109 
110 # define FFMPEG_AUTOSPLIT_SIZE 2000000000
111 
112 # define PRINT \
113  if (G.debug & G_DEBUG_FFMPEG) \
114  printf
115 
116 static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value);
117 static void ffmpeg_dict_set_float(AVDictionary **dict, const char *key, float value);
118 static void ffmpeg_set_expert_options(RenderData *rd);
119 static void ffmpeg_filepath_get(FFMpegContext *context,
120  char *string,
121  const struct RenderData *rd,
122  bool preview,
123  const char *suffix);
124 
125 /* Delete a picture buffer */
126 
127 static void delete_picture(AVFrame *f)
128 {
129  if (f) {
130  if (f->data[0]) {
131  MEM_freeN(f->data[0]);
132  }
133  av_free(f);
134  }
135 }
136 
137 static int request_float_audio_buffer(int codec_id)
138 {
139  /* If any of these codecs, we prefer the float sample format (if supported) */
140  return codec_id == AV_CODEC_ID_AAC || codec_id == AV_CODEC_ID_AC3 ||
141  codec_id == AV_CODEC_ID_VORBIS;
142 }
143 
144 # ifdef WITH_AUDASPACE
145 
146 static int write_audio_frame(FFMpegContext *context)
147 {
148  AVFrame *frame = NULL;
149  AVCodecContext *c = context->audio_codec;
150 
151  AUD_Device_read(
152  context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
153 
154  frame = av_frame_alloc();
155  frame->pts = context->audio_time / av_q2d(c->time_base);
156  frame->nb_samples = context->audio_input_samples;
157  frame->format = c->sample_fmt;
158  frame->channels = c->channels;
159  frame->channel_layout = c->channel_layout;
160 
161  if (context->audio_deinterleave) {
162  int channel, i;
163  uint8_t *temp;
164 
165  for (channel = 0; channel < c->channels; channel++) {
166  for (i = 0; i < frame->nb_samples; i++) {
167  memcpy(context->audio_deinterleave_buffer +
168  (i + channel * frame->nb_samples) * context->audio_sample_size,
169  context->audio_input_buffer +
170  (c->channels * i + channel) * context->audio_sample_size,
171  context->audio_sample_size);
172  }
173  }
174 
175  temp = context->audio_deinterleave_buffer;
176  context->audio_deinterleave_buffer = context->audio_input_buffer;
177  context->audio_input_buffer = temp;
178  }
179 
180  avcodec_fill_audio_frame(frame,
181  c->channels,
182  c->sample_fmt,
183  context->audio_input_buffer,
184  context->audio_input_samples * c->channels * context->audio_sample_size,
185  1);
186 
187  int success = 1;
188 
189  int ret = avcodec_send_frame(c, frame);
190  if (ret < 0) {
191  /* Can't send frame to encoder. This shouldn't happen. */
192  fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
193  success = -1;
194  }
195 
196  AVPacket *pkt = av_packet_alloc();
197 
198  while (ret >= 0) {
199 
200  ret = avcodec_receive_packet(c, pkt);
201  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
202  break;
203  }
204  if (ret < 0) {
205  fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
206  success = -1;
207  }
208 
209  pkt->stream_index = context->audio_stream->index;
210  av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
211 # ifdef FFMPEG_USE_DURATION_WORKAROUND
212  my_guess_pkt_duration(context->outfile, context->audio_stream, pkt);
213 # endif
214 
215  pkt->flags |= AV_PKT_FLAG_KEY;
216 
217  int write_ret = av_interleaved_write_frame(context->outfile, pkt);
218  if (write_ret != 0) {
219  fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
220  success = -1;
221  break;
222  }
223  }
224 
225  av_packet_free(&pkt);
226  av_frame_free(&frame);
227 
228  return success;
229 }
230 # endif /* #ifdef WITH_AUDASPACE */
231 
232 /* Allocate a temporary frame */
233 static AVFrame *alloc_picture(int pix_fmt, int width, int height)
234 {
235  AVFrame *f;
236  uint8_t *buf;
237  int size;
238 
239  /* allocate space for the struct */
240  f = av_frame_alloc();
241  if (!f) {
242  return NULL;
243  }
244  size = av_image_get_buffer_size(pix_fmt, width, height, 1);
245  /* allocate the actual picture buffer */
246  buf = MEM_mallocN(size, "AVFrame buffer");
247  if (!buf) {
248  free(f);
249  return NULL;
250  }
251 
252  av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
253  f->format = pix_fmt;
254  f->width = width;
255  f->height = height;
256 
257  return f;
258 }
259 
260 /* Get the correct file extensions for the requested format,
261  * first is always desired guess_format parameter */
262 static const char **get_file_extensions(int format)
263 {
264  switch (format) {
265  case FFMPEG_DV: {
266  static const char *rv[] = {".dv", NULL};
267  return rv;
268  }
269  case FFMPEG_MPEG1: {
270  static const char *rv[] = {".mpg", ".mpeg", NULL};
271  return rv;
272  }
273  case FFMPEG_MPEG2: {
274  static const char *rv[] = {".dvd", ".vob", ".mpg", ".mpeg", NULL};
275  return rv;
276  }
277  case FFMPEG_MPEG4: {
278  static const char *rv[] = {".mp4", ".mpg", ".mpeg", NULL};
279  return rv;
280  }
281  case FFMPEG_AVI: {
282  static const char *rv[] = {".avi", NULL};
283  return rv;
284  }
285  case FFMPEG_MOV: {
286  static const char *rv[] = {".mov", NULL};
287  return rv;
288  }
289  case FFMPEG_H264: {
290  /* FIXME: avi for now... */
291  static const char *rv[] = {".avi", NULL};
292  return rv;
293  }
294 
295  case FFMPEG_XVID: {
296  /* FIXME: avi for now... */
297  static const char *rv[] = {".avi", NULL};
298  return rv;
299  }
300  case FFMPEG_FLV: {
301  static const char *rv[] = {".flv", NULL};
302  return rv;
303  }
304  case FFMPEG_MKV: {
305  static const char *rv[] = {".mkv", NULL};
306  return rv;
307  }
308  case FFMPEG_OGG: {
309  static const char *rv[] = {".ogv", ".ogg", NULL};
310  return rv;
311  }
312  case FFMPEG_WEBM: {
313  static const char *rv[] = {".webm", NULL};
314  return rv;
315  }
316  default:
317  return NULL;
318  }
319 }
320 
321 /* Write a frame to the output file */
322 static int write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
323 {
324  int ret, success = 1;
325  AVPacket *packet = av_packet_alloc();
326 
327  AVCodecContext *c = context->video_codec;
328 
329  frame->pts = context->video_time;
330  context->video_time++;
331 
332  ret = avcodec_send_frame(c, frame);
333  if (ret < 0) {
334  /* Can't send frame to encoder. This shouldn't happen. */
335  fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
336  success = -1;
337  }
338 
339  while (ret >= 0) {
340  ret = avcodec_receive_packet(c, packet);
341 
342  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
343  /* No more packets available. */
344  break;
345  }
346  if (ret < 0) {
347  fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
348  break;
349  }
350 
351  packet->stream_index = context->video_stream->index;
352  av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
353 # ifdef FFMPEG_USE_DURATION_WORKAROUND
354  my_guess_pkt_duration(context->outfile, context->video_stream, packet);
355 # endif
356 
357  if (av_interleaved_write_frame(context->outfile, packet) != 0) {
358  success = -1;
359  break;
360  }
361  }
362 
363  if (!success) {
364  BKE_report(reports, RPT_ERROR, "Error writing frame");
365  PRINT("Error writing frame: %s\n", av_err2str(ret));
366  }
367 
368  av_packet_free(&packet);
369 
370  return success;
371 }
372 
373 /* read and encode a frame of video from the buffer */
374 static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
375 {
376  AVCodecParameters *codec = context->video_stream->codecpar;
377  int height = codec->height;
378  AVFrame *rgb_frame;
379 
380  if (context->img_convert_frame != NULL) {
381  /* Pixel format conversion is needed. */
382  rgb_frame = context->img_convert_frame;
383  }
384  else {
385  /* The output pixel format is Blender's internal pixel format. */
386  rgb_frame = context->current_frame;
387  }
388 
389  /* Copy the Blender pixels into the FFmpeg datastructure, taking care of endianness and flipping
390  * the image vertically. */
391  int linesize = rgb_frame->linesize[0];
392  for (int y = 0; y < height; y++) {
393  uint8_t *target = rgb_frame->data[0] + linesize * (height - y - 1);
394  const uint8_t *src = pixels + linesize * y;
395 
396 # if ENDIAN_ORDER == L_ENDIAN
397  memcpy(target, src, linesize);
398 
399 # elif ENDIAN_ORDER == B_ENDIAN
400  const uint8_t *end = src + linesize;
401  while (src != end) {
402  target[3] = src[0];
403  target[2] = src[1];
404  target[1] = src[2];
405  target[0] = src[3];
406 
407  target += 4;
408  src += 4;
409  }
410 # else
411 # error ENDIAN_ORDER should either be L_ENDIAN or B_ENDIAN.
412 # endif
413  }
414 
415  /* Convert to the output pixel format, if it's different that Blender's internal one. */
416  if (context->img_convert_frame != NULL) {
417  BLI_assert(context->img_convert_ctx != NULL);
418  sws_scale(context->img_convert_ctx,
419  (const uint8_t *const *)rgb_frame->data,
420  rgb_frame->linesize,
421  0,
422  codec->height,
423  context->current_frame->data,
424  context->current_frame->linesize);
425  }
426 
427  return context->current_frame;
428 }
429 
430 static void set_ffmpeg_property_option(IDProperty *prop, AVDictionary **dictionary)
431 {
432  char name[128];
433  char *param;
434 
435  PRINT("FFMPEG expert option: %s: ", prop->name);
436 
437  BLI_strncpy(name, prop->name, sizeof(name));
438 
439  param = strchr(name, ':');
440 
441  if (param) {
442  *param++ = '\0';
443  }
444 
445  switch (prop->type) {
446  case IDP_STRING:
447  PRINT("%s.\n", IDP_String(prop));
448  av_dict_set(dictionary, name, IDP_String(prop), 0);
449  break;
450  case IDP_FLOAT:
451  PRINT("%g.\n", IDP_Float(prop));
452  ffmpeg_dict_set_float(dictionary, prop->name, IDP_Float(prop));
453  break;
454  case IDP_INT:
455  PRINT("%d.\n", IDP_Int(prop));
456 
457  if (param) {
458  if (IDP_Int(prop)) {
459  av_dict_set(dictionary, name, param, 0);
460  }
461  else {
462  return;
463  }
464  }
465  else {
466  ffmpeg_dict_set_int(dictionary, prop->name, IDP_Int(prop));
467  }
468  break;
469  }
470 }
471 
472 static int ffmpeg_proprty_valid(AVCodecContext *c, const char *prop_name, IDProperty *curr)
473 {
474  int valid = 1;
475 
476  if (STREQ(prop_name, "video")) {
477  if (STREQ(curr->name, "bf")) {
478  /* flash codec doesn't support b frames */
479  valid &= c->codec_id != AV_CODEC_ID_FLV1;
480  }
481  }
482 
483  return valid;
484 }
485 
486 static void set_ffmpeg_properties(RenderData *rd,
487  AVCodecContext *c,
488  const char *prop_name,
489  AVDictionary **dictionary)
490 {
491  IDProperty *prop;
492  IDProperty *curr;
493 
494  /* TODO(sergey): This is actually rather stupid, because changing
495  * codec settings in render panel would also set expert options.
496  *
497  * But we need ti here in order to get rid of deprecated settings
498  * when opening old files in new blender.
499  *
500  * For as long we don't allow editing properties in the interface
501  * it's all good. bug if we allow editing them, we'll need to
502  * replace it with some smarter code which would port settings
503  * from deprecated to new one.
504  */
505  ffmpeg_set_expert_options(rd);
506 
507  if (!rd->ffcodecdata.properties) {
508  return;
509  }
510 
511  prop = IDP_GetPropertyFromGroup(rd->ffcodecdata.properties, prop_name);
512  if (!prop) {
513  return;
514  }
515 
516  for (curr = prop->data.group.first; curr; curr = curr->next) {
517  if (ffmpeg_proprty_valid(c, prop_name, curr)) {
518  set_ffmpeg_property_option(curr, dictionary);
519  }
520  }
521 }
522 
523 static AVRational calc_time_base(uint den, double num, int codec_id)
524 {
525  /* Convert the input 'num' to an integer. Simply shift the decimal places until we get an integer
526  * (within a floating point error range).
527  * For example if we have den = 3 and num = 0.1 then the fps is: den/num = 30 fps.
528  * When converthing this to a ffmpeg time base, we want num to be an integer.
529  * So we simply move the decimal places of both numbers. IE den = 30, num = 1.*/
530  float eps = FLT_EPSILON;
531  const uint DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
532 
533  /* Calculate the precision of the initial floating point number. */
534  if (num > 1.0) {
535  const uint num_integer_bits = log2_floor_u((unsigned int)num);
536 
537  /* Formula for calculating the epsilon value: (power of two range) / (pow mantissa bits)
538  * For example, a float has 23 manitissa bits and the float value 3.5f as a pow2 range of
539  * (4-2=2):
540  * (2) / pow2(23) = floating point precision for 3.5f
541  */
542  eps = (float)(1 << num_integer_bits) * FLT_EPSILON;
543  }
544 
545  /* Calculate how many decimal shifts we can do until we run out of precision. */
546  const int max_num_shift = fabsf(log10f(eps));
547  /* Calculate how many times we can shift the denominator. */
548  const int max_den_shift = log10f(DENUM_MAX) - log10f(den);
549  const int max_iter = min_ii(max_num_shift, max_den_shift);
550 
551  for (int i = 0; i < max_iter && fabs(num - round(num)) > eps; i++) {
552  /* Increase the number and denominator until both are integers. */
553  num *= 10;
554  den *= 10;
555  eps *= 10;
556  }
557 
558  AVRational time_base;
559  time_base.den = den;
560  time_base.num = (int)num;
561 
562  return time_base;
563 }
564 
565 /* prepare a video stream for the output file */
566 
567 static AVStream *alloc_video_stream(FFMpegContext *context,
568  RenderData *rd,
569  int codec_id,
570  AVFormatContext *of,
571  int rectx,
572  int recty,
573  char *error,
574  int error_size)
575 {
576  AVStream *st;
577  AVCodec *codec;
578  AVDictionary *opts = NULL;
579 
580  error[0] = '\0';
581 
582  st = avformat_new_stream(of, NULL);
583  if (!st) {
584  return NULL;
585  }
586  st->id = 0;
587 
588  /* Set up the codec context */
589 
590  context->video_codec = avcodec_alloc_context3(NULL);
591  AVCodecContext *c = context->video_codec;
592  c->codec_id = codec_id;
593  c->codec_type = AVMEDIA_TYPE_VIDEO;
594 
595  codec = avcodec_find_encoder(c->codec_id);
596  if (!codec) {
597  fprintf(stderr, "Couldn't find valid video codec\n");
598  avcodec_free_context(&c);
599  context->video_codec = NULL;
600  return NULL;
601  }
602 
603  /* Load codec defaults into 'c'. */
604  avcodec_get_context_defaults3(c, codec);
605 
606  /* Get some values from the current render settings */
607 
608  c->width = rectx;
609  c->height = recty;
610 
611  if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
612  /* FIXME: Really bad hack (tm) for NTSC support */
613  c->time_base.den = 2997;
614  c->time_base.num = 100;
615  }
616  else if ((float)((int)rd->frs_sec_base) == rd->frs_sec_base) {
617  c->time_base.den = rd->frs_sec;
618  c->time_base.num = (int)rd->frs_sec_base;
619  }
620  else {
621  c->time_base = calc_time_base(rd->frs_sec, rd->frs_sec_base, codec_id);
622  }
623 
624  /* As per the timebase documentation here:
625  * https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options
626  * We want to set the time base to (1 / fps) for fixed frame rate video.
627  * If it is not possible, we want to set the timebase numbers to something as
628  * small as possible.
629  */
630  if (c->time_base.num != 1) {
631  AVRational new_time_base;
632  if (av_reduce(
633  &new_time_base.num, &new_time_base.den, c->time_base.num, c->time_base.den, INT_MAX)) {
634  /* Exact reduction was possible. Use the new value. */
635  c->time_base = new_time_base;
636  }
637  }
638 
639  st->time_base = c->time_base;
640 
641  c->gop_size = context->ffmpeg_gop_size;
642  c->max_b_frames = context->ffmpeg_max_b_frames;
643 
644  if (context->ffmpeg_type == FFMPEG_WEBM && context->ffmpeg_crf == 0) {
645  ffmpeg_dict_set_int(&opts, "lossless", 1);
646  }
647  else if (context->ffmpeg_crf >= 0) {
648  /* As per https://trac.ffmpeg.org/wiki/Encode/VP9 we must set the bit rate to zero when
649  * encoding with vp9 in crf mode.
650  * Set this to always be zero for other codecs as well.
651  * We don't care about bit rate in crf mode. */
652  c->bit_rate = 0;
653  ffmpeg_dict_set_int(&opts, "crf", context->ffmpeg_crf);
654  }
655  else {
656  c->bit_rate = context->ffmpeg_video_bitrate * 1000;
657  c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
658  c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
659  c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;
660  }
661 
662  if (context->ffmpeg_preset) {
663  /* 'preset' is used by h.264, 'deadline' is used by webm/vp9. I'm not
664  * setting those properties conditionally based on the video codec,
665  * as the FFmpeg encoder simply ignores unknown settings anyway. */
666  char const *preset_name = NULL; /* used by h.264 */
667  char const *deadline_name = NULL; /* used by webm/vp9 */
668  switch (context->ffmpeg_preset) {
669  case FFM_PRESET_GOOD:
670  preset_name = "medium";
671  deadline_name = "good";
672  break;
673  case FFM_PRESET_BEST:
674  preset_name = "slower";
675  deadline_name = "best";
676  break;
677  case FFM_PRESET_REALTIME:
678  preset_name = "superfast";
679  deadline_name = "realtime";
680  break;
681  default:
682  printf("Unknown preset number %i, ignoring.\n", context->ffmpeg_preset);
683  }
684  if (preset_name != NULL) {
685  av_dict_set(&opts, "preset", preset_name, 0);
686  }
687  if (deadline_name != NULL) {
688  av_dict_set(&opts, "deadline", deadline_name, 0);
689  }
690  }
691 
692  /* Be sure to use the correct pixel format(e.g. RGB, YUV) */
693 
694  if (codec->pix_fmts) {
695  c->pix_fmt = codec->pix_fmts[0];
696  }
697  else {
698  /* makes HuffYUV happy ... */
699  c->pix_fmt = AV_PIX_FMT_YUV422P;
700  }
701 
702  if (context->ffmpeg_type == FFMPEG_XVID) {
703  /* arghhhh ... */
704  c->pix_fmt = AV_PIX_FMT_YUV420P;
705  c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
706  }
707 
708  /* Keep lossless encodes in the RGB domain. */
709  if (codec_id == AV_CODEC_ID_HUFFYUV) {
710  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
711  c->pix_fmt = AV_PIX_FMT_BGRA;
712  }
713  else {
714  c->pix_fmt = AV_PIX_FMT_RGB32;
715  }
716  }
717 
718  if (codec_id == AV_CODEC_ID_FFV1) {
719  c->pix_fmt = AV_PIX_FMT_RGB32;
720  }
721 
722  if (codec_id == AV_CODEC_ID_QTRLE) {
723  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
724  c->pix_fmt = AV_PIX_FMT_ARGB;
725  }
726  }
727 
728  if (codec_id == AV_CODEC_ID_VP9) {
729  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
730  c->pix_fmt = AV_PIX_FMT_YUVA420P;
731  }
732  }
733 
734  if (codec_id == AV_CODEC_ID_PNG) {
735  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
736  c->pix_fmt = AV_PIX_FMT_RGBA;
737  }
738  }
739 
740  if ((of->oformat->flags & AVFMT_GLOBALHEADER)) {
741  PRINT("Using global header\n");
742  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
743  }
744 
745  /* xasp & yasp got float lately... */
746 
747  st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double)rd->xasp / (double)rd->yasp),
748  255);
749  st->avg_frame_rate = av_inv_q(c->time_base);
750 
751  set_ffmpeg_properties(rd, c, "video", &opts);
752 
753  if (codec->capabilities & AV_CODEC_CAP_AUTO_THREADS) {
754  c->thread_count = 0;
755  }
756  else {
757  c->thread_count = BLI_system_thread_count();
758  }
759 
760  if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) {
761  c->thread_type = FF_THREAD_FRAME;
762  }
763  else if (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
764  c->thread_type = FF_THREAD_SLICE;
765  }
766 
767  int ret = avcodec_open2(c, codec, &opts);
768 
769  if (ret < 0) {
770  fprintf(stderr, "Couldn't initialize video codec: %s\n", av_err2str(ret));
771  BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
772  av_dict_free(&opts);
773  avcodec_free_context(&c);
774  context->video_codec = NULL;
775  return NULL;
776  }
777  av_dict_free(&opts);
778 
779  /* FFmpeg expects its data in the output pixel format. */
780  context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
781 
782  if (c->pix_fmt == AV_PIX_FMT_RGBA) {
783  /* Output pixel format is the same we use internally, no conversion necessary. */
784  context->img_convert_frame = NULL;
785  context->img_convert_ctx = NULL;
786  }
787  else {
788  /* Output pixel format is different, allocate frame for conversion. */
789  context->img_convert_frame = alloc_picture(AV_PIX_FMT_RGBA, c->width, c->height);
790  context->img_convert_ctx = sws_getContext(c->width,
791  c->height,
792  AV_PIX_FMT_RGBA,
793  c->width,
794  c->height,
795  c->pix_fmt,
796  SWS_BICUBIC,
797  NULL,
798  NULL,
799  NULL);
800  }
801 
802  avcodec_parameters_from_context(st->codecpar, c);
803 
804  context->video_time = 0.0f;
805 
806  return st;
807 }
808 
809 static AVStream *alloc_audio_stream(FFMpegContext *context,
810  RenderData *rd,
811  int codec_id,
812  AVFormatContext *of,
813  char *error,
814  int error_size)
815 {
816  AVStream *st;
817  AVCodec *codec;
818  AVDictionary *opts = NULL;
819 
820  error[0] = '\0';
821 
822  st = avformat_new_stream(of, NULL);
823  if (!st) {
824  return NULL;
825  }
826  st->id = 1;
827 
828  context->audio_codec = avcodec_alloc_context3(NULL);
829  AVCodecContext *c = context->audio_codec;
830  c->thread_count = BLI_system_thread_count();
831  c->thread_type = FF_THREAD_SLICE;
832 
833  c->codec_id = codec_id;
834  c->codec_type = AVMEDIA_TYPE_AUDIO;
835 
836  codec = avcodec_find_encoder(c->codec_id);
837  if (!codec) {
838  fprintf(stderr, "Couldn't find valid audio codec\n");
839  avcodec_free_context(&c);
840  context->audio_codec = NULL;
841  return NULL;
842  }
843 
844  /* Load codec defaults into 'c'. */
845  avcodec_get_context_defaults3(c, codec);
846 
847  c->sample_rate = rd->ffcodecdata.audio_mixrate;
848  c->bit_rate = context->ffmpeg_audio_bitrate * 1000;
849  c->sample_fmt = AV_SAMPLE_FMT_S16;
850  c->channels = rd->ffcodecdata.audio_channels;
851 
852  switch (rd->ffcodecdata.audio_channels) {
853  case FFM_CHANNELS_MONO:
854  c->channel_layout = AV_CH_LAYOUT_MONO;
855  break;
856  case FFM_CHANNELS_STEREO:
857  c->channel_layout = AV_CH_LAYOUT_STEREO;
858  break;
860  c->channel_layout = AV_CH_LAYOUT_QUAD;
861  break;
863  c->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
864  break;
866  c->channel_layout = AV_CH_LAYOUT_7POINT1;
867  break;
868  }
869 
870  if (request_float_audio_buffer(codec_id)) {
871  /* mainly for AAC codec which is experimental */
872  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
873  c->sample_fmt = AV_SAMPLE_FMT_FLT;
874  }
875 
876  if (codec->sample_fmts) {
877  /* Check if the preferred sample format for this codec is supported.
878  * this is because, depending on the version of libav,
879  * and with the whole ffmpeg/libav fork situation,
880  * you have various implementations around.
881  * Float samples in particular are not always supported. */
882  const enum AVSampleFormat *p = codec->sample_fmts;
883  for (; *p != -1; p++) {
884  if (*p == c->sample_fmt) {
885  break;
886  }
887  }
888  if (*p == -1) {
889  /* sample format incompatible with codec. Defaulting to a format known to work */
890  c->sample_fmt = codec->sample_fmts[0];
891  }
892  }
893 
894  if (codec->supported_samplerates) {
895  const int *p = codec->supported_samplerates;
896  int best = 0;
897  int best_dist = INT_MAX;
898  for (; *p; p++) {
899  int dist = abs(c->sample_rate - *p);
900  if (dist < best_dist) {
901  best_dist = dist;
902  best = *p;
903  }
904  }
905  /* best is the closest supported sample rate (same as selected if best_dist == 0) */
906  c->sample_rate = best;
907  }
908 
909  if (of->oformat->flags & AVFMT_GLOBALHEADER) {
910  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
911  }
912 
913  set_ffmpeg_properties(rd, c, "audio", &opts);
914 
915  int ret = avcodec_open2(c, codec, &opts);
916 
917  if (ret < 0) {
918  fprintf(stderr, "Couldn't initialize audio codec: %s\n", av_err2str(ret));
919  BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
920  av_dict_free(&opts);
921  avcodec_free_context(&c);
922  context->audio_codec = NULL;
923  return NULL;
924  }
925  av_dict_free(&opts);
926 
927  /* need to prevent floating point exception when using vorbis audio codec,
928  * initialize this value in the same way as it's done in FFmpeg itself (sergey) */
929  c->time_base.num = 1;
930  c->time_base.den = c->sample_rate;
931 
932  if (c->frame_size == 0) {
933  /* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
934  * not sure if that is needed anymore, so let's try out if there are any
935  * complaints regarding some FFmpeg versions users might have. */
936  context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
937  c->channels;
938  }
939  else {
940  context->audio_input_samples = c->frame_size;
941  }
942 
943  context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
944 
945  context->audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);
946 
947  context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * c->channels *
948  context->audio_sample_size);
949  if (context->audio_deinterleave) {
950  context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
951  context->audio_input_samples * c->channels * context->audio_sample_size);
952  }
953 
954  context->audio_time = 0.0f;
955 
956  avcodec_parameters_from_context(st->codecpar, c);
957 
958  return st;
959 }
960 /* essential functions -- start, append, end */
961 
962 static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value)
963 {
964  char buffer[32];
965 
966  BLI_snprintf(buffer, sizeof(buffer), "%d", value);
967 
968  av_dict_set(dict, key, buffer, 0);
969 }
970 
971 static void ffmpeg_dict_set_float(AVDictionary **dict, const char *key, float value)
972 {
973  char buffer[32];
974 
975  BLI_snprintf(buffer, sizeof(buffer), "%.8f", value);
976 
977  av_dict_set(dict, key, buffer, 0);
978 }
979 
980 static void ffmpeg_add_metadata_callback(void *data,
981  const char *propname,
982  char *propvalue,
983  int UNUSED(len))
984 {
985  AVDictionary **metadata = (AVDictionary **)data;
986  av_dict_set(metadata, propname, propvalue, 0);
987 }
988 
989 static int start_ffmpeg_impl(FFMpegContext *context,
990  struct RenderData *rd,
991  int rectx,
992  int recty,
993  const char *suffix,
994  ReportList *reports)
995 {
996  /* Handle to the output file */
997  AVFormatContext *of;
998  AVOutputFormat *fmt;
999  AVDictionary *opts = NULL;
1000  char name[FILE_MAX], error[1024];
1001  const char **exts;
1002 
1003  context->ffmpeg_type = rd->ffcodecdata.type;
1004  context->ffmpeg_codec = rd->ffcodecdata.codec;
1005  context->ffmpeg_audio_codec = rd->ffcodecdata.audio_codec;
1006  context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate;
1007  context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate;
1008  context->ffmpeg_gop_size = rd->ffcodecdata.gop_size;
1009  context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT;
1010  context->ffmpeg_crf = rd->ffcodecdata.constant_rate_factor;
1011  context->ffmpeg_preset = rd->ffcodecdata.ffmpeg_preset;
1012 
1013  if ((rd->ffcodecdata.flags & FFMPEG_USE_MAX_B_FRAMES) != 0) {
1014  context->ffmpeg_max_b_frames = rd->ffcodecdata.max_b_frames;
1015  }
1016 
1017  /* Determine the correct filename */
1018  ffmpeg_filepath_get(context, name, rd, context->ffmpeg_preview, suffix);
1019  PRINT(
1020  "Starting output to %s(ffmpeg)...\n"
1021  " Using type=%d, codec=%d, audio_codec=%d,\n"
1022  " video_bitrate=%d, audio_bitrate=%d,\n"
1023  " gop_size=%d, autosplit=%d\n"
1024  " render width=%d, render height=%d\n",
1025  name,
1026  context->ffmpeg_type,
1027  context->ffmpeg_codec,
1028  context->ffmpeg_audio_codec,
1029  context->ffmpeg_video_bitrate,
1030  context->ffmpeg_audio_bitrate,
1031  context->ffmpeg_gop_size,
1032  context->ffmpeg_autosplit,
1033  rectx,
1034  recty);
1035 
1036  exts = get_file_extensions(context->ffmpeg_type);
1037  if (!exts) {
1038  BKE_report(reports, RPT_ERROR, "No valid formats found");
1039  return 0;
1040  }
1041  fmt = av_guess_format(NULL, exts[0], NULL);
1042  if (!fmt) {
1043  BKE_report(reports, RPT_ERROR, "No valid formats found");
1044  return 0;
1045  }
1046 
1047  of = avformat_alloc_context();
1048  if (!of) {
1049  BKE_report(reports, RPT_ERROR, "Error opening output file");
1050  return 0;
1051  }
1052 
1053  /* Returns after this must 'goto fail;' */
1054 
1055  of->oformat = fmt;
1056 
1057  /* Only bother with setting packet size & mux rate when CRF is not used. */
1058  if (context->ffmpeg_crf == 0) {
1059  of->packet_size = rd->ffcodecdata.mux_packet_size;
1060  if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
1061  ffmpeg_dict_set_int(&opts, "muxrate", rd->ffcodecdata.mux_rate);
1062  }
1063  else {
1064  av_dict_set(&opts, "muxrate", "0", 0);
1065  }
1066  }
1067 
1068  ffmpeg_dict_set_int(&opts, "preload", (int)(0.5 * AV_TIME_BASE));
1069 
1070  of->max_delay = (int)(0.7 * AV_TIME_BASE);
1071 
1072  fmt->audio_codec = context->ffmpeg_audio_codec;
1073 
1074  of->url = av_strdup(name);
1075  /* set the codec to the user's selection */
1076  switch (context->ffmpeg_type) {
1077  case FFMPEG_AVI:
1078  case FFMPEG_MOV:
1079  case FFMPEG_MKV:
1080  fmt->video_codec = context->ffmpeg_codec;
1081  break;
1082  case FFMPEG_OGG:
1083  fmt->video_codec = AV_CODEC_ID_THEORA;
1084  break;
1085  case FFMPEG_DV:
1086  fmt->video_codec = AV_CODEC_ID_DVVIDEO;
1087  break;
1088  case FFMPEG_MPEG1:
1089  fmt->video_codec = AV_CODEC_ID_MPEG1VIDEO;
1090  break;
1091  case FFMPEG_MPEG2:
1092  fmt->video_codec = AV_CODEC_ID_MPEG2VIDEO;
1093  break;
1094  case FFMPEG_H264:
1095  fmt->video_codec = AV_CODEC_ID_H264;
1096  break;
1097  case FFMPEG_XVID:
1098  fmt->video_codec = AV_CODEC_ID_MPEG4;
1099  break;
1100  case FFMPEG_FLV:
1101  fmt->video_codec = AV_CODEC_ID_FLV1;
1102  break;
1103  case FFMPEG_MPEG4:
1104  default:
1105  fmt->video_codec = context->ffmpeg_codec;
1106  break;
1107  }
1108  if (fmt->video_codec == AV_CODEC_ID_DVVIDEO) {
1109  if (rectx != 720) {
1110  BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!");
1111  goto fail;
1112  }
1113  if (rd->frs_sec != 25 && recty != 480) {
1114  BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!");
1115  goto fail;
1116  }
1117  if (rd->frs_sec == 25 && recty != 576) {
1118  BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!");
1119  goto fail;
1120  }
1121  }
1122 
1123  if (context->ffmpeg_type == FFMPEG_DV) {
1124  fmt->audio_codec = AV_CODEC_ID_PCM_S16LE;
1125  if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE &&
1126  rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) {
1127  BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!");
1128  goto fail;
1129  }
1130  }
1131 
1132  if (fmt->video_codec != AV_CODEC_ID_NONE) {
1133  context->video_stream = alloc_video_stream(
1134  context, rd, fmt->video_codec, of, rectx, recty, error, sizeof(error));
1135  PRINT("alloc video stream %p\n", context->video_stream);
1136  if (!context->video_stream) {
1137  if (error[0]) {
1138  BKE_report(reports, RPT_ERROR, error);
1139  PRINT("Video stream error: %s\n", error);
1140  }
1141  else {
1142  BKE_report(reports, RPT_ERROR, "Error initializing video stream");
1143  PRINT("Error initializing video stream");
1144  }
1145  goto fail;
1146  }
1147  }
1148 
1149  if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
1150  context->audio_stream = alloc_audio_stream(
1151  context, rd, fmt->audio_codec, of, error, sizeof(error));
1152  if (!context->audio_stream) {
1153  if (error[0]) {
1154  BKE_report(reports, RPT_ERROR, error);
1155  PRINT("Audio stream error: %s\n", error);
1156  }
1157  else {
1158  BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
1159  PRINT("Error initializing audio stream");
1160  }
1161  goto fail;
1162  }
1163  }
1164  if (!(fmt->flags & AVFMT_NOFILE)) {
1165  if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
1166  BKE_report(reports, RPT_ERROR, "Could not open file for writing");
1167  PRINT("Could not open file for writing\n");
1168  goto fail;
1169  }
1170  }
1171 
1172  if (context->stamp_data != NULL) {
1174  &of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
1175  }
1176 
1177  int ret = avformat_write_header(of, NULL);
1178  if (ret < 0) {
1179  BKE_report(reports,
1180  RPT_ERROR,
1181  "Could not initialize streams, probably unsupported codec combination");
1182  PRINT("Could not write media header: %s\n", av_err2str(ret));
1183  goto fail;
1184  }
1185 
1186  context->outfile = of;
1187  av_dump_format(of, 0, name, 1);
1188  av_dict_free(&opts);
1189 
1190  return 1;
1191 
1192 fail:
1193  if (of->pb) {
1194  avio_close(of->pb);
1195  }
1196 
1197  if (context->video_stream) {
1198  context->video_stream = NULL;
1199  }
1200 
1201  if (context->audio_stream) {
1202  context->audio_stream = NULL;
1203  }
1204 
1205  av_dict_free(&opts);
1206  avformat_free_context(of);
1207  return 0;
1208 }
1209 
1227 static void flush_ffmpeg(AVCodecContext *c, AVStream *stream, AVFormatContext *outfile)
1228 {
1229  AVPacket *packet = av_packet_alloc();
1230 
1231  avcodec_send_frame(c, NULL);
1232 
1233  /* Get the packets frames. */
1234  int ret = 1;
1235  while (ret >= 0) {
1236  ret = avcodec_receive_packet(c, packet);
1237 
1238  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1239  /* No more packets to flush. */
1240  break;
1241  }
1242  if (ret < 0) {
1243  fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
1244  break;
1245  }
1246 
1247  packet->stream_index = stream->index;
1248  av_packet_rescale_ts(packet, c->time_base, stream->time_base);
1249 # ifdef FFMPEG_USE_DURATION_WORKAROUND
1250  my_guess_pkt_duration(outfile, stream, packet);
1251 # endif
1252 
1253  int write_ret = av_interleaved_write_frame(outfile, packet);
1254  if (write_ret != 0) {
1255  fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
1256  break;
1257  }
1258  }
1259 
1260  av_packet_free(&packet);
1261 }
1262 
1263 /* **********************************************************************
1264  * * public interface
1265  * ********************************************************************** */
1266 
1267 /* Get the output filename-- similar to the other output formats */
1268 static void ffmpeg_filepath_get(
1269  FFMpegContext *context, char *string, const RenderData *rd, bool preview, const char *suffix)
1270 {
1271  char autosplit[20];
1272 
1273  const char **exts = get_file_extensions(rd->ffcodecdata.type);
1274  const char **fe = exts;
1275  int sfra, efra;
1276 
1277  if (!string || !exts) {
1278  return;
1279  }
1280 
1281  if (preview) {
1282  sfra = rd->psfra;
1283  efra = rd->pefra;
1284  }
1285  else {
1286  sfra = rd->sfra;
1287  efra = rd->efra;
1288  }
1289 
1290  strcpy(string, rd->pic);
1292 
1293  BLI_make_existing_file(string);
1294 
1295  autosplit[0] = '\0';
1296 
1297  if ((rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0) {
1298  if (context) {
1299  sprintf(autosplit, "_%03d", context->ffmpeg_autosplit_count);
1300  }
1301  }
1302 
1303  if (rd->scemode & R_EXTENSION) {
1304  while (*fe) {
1305  if (BLI_strcasecmp(string + strlen(string) - strlen(*fe), *fe) == 0) {
1306  break;
1307  }
1308  fe++;
1309  }
1310 
1311  if (*fe == NULL) {
1312  strcat(string, autosplit);
1313 
1314  BLI_path_frame_range(string, sfra, efra, 4);
1315  strcat(string, *exts);
1316  }
1317  else {
1318  *(string + strlen(string) - strlen(*fe)) = '\0';
1319  strcat(string, autosplit);
1320  strcat(string, *fe);
1321  }
1322  }
1323  else {
1324  if (BLI_path_frame_check_chars(string)) {
1325  BLI_path_frame_range(string, sfra, efra, 4);
1326  }
1327 
1328  strcat(string, autosplit);
1329  }
1330 
1331  BLI_path_suffix(string, FILE_MAX, suffix, "");
1332 }
1333 
1334 void BKE_ffmpeg_filepath_get(char *string, const RenderData *rd, bool preview, const char *suffix)
1335 {
1336  ffmpeg_filepath_get(NULL, string, rd, preview, suffix);
1337 }
1338 
1339 int BKE_ffmpeg_start(void *context_v,
1340  const struct Scene *scene,
1341  RenderData *rd,
1342  int rectx,
1343  int recty,
1344  ReportList *reports,
1345  bool preview,
1346  const char *suffix)
1347 {
1348  int success;
1349  FFMpegContext *context = context_v;
1350 
1351  context->ffmpeg_autosplit_count = 0;
1352  context->ffmpeg_preview = preview;
1354 
1355  success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
1356 # ifdef WITH_AUDASPACE
1357  if (context->audio_stream) {
1358  AVCodecContext *c = context->audio_codec;
1359 
1360  AUD_DeviceSpecs specs;
1361  specs.channels = c->channels;
1362 
1363  switch (av_get_packed_sample_fmt(c->sample_fmt)) {
1364  case AV_SAMPLE_FMT_U8:
1365  specs.format = AUD_FORMAT_U8;
1366  break;
1367  case AV_SAMPLE_FMT_S16:
1368  specs.format = AUD_FORMAT_S16;
1369  break;
1370  case AV_SAMPLE_FMT_S32:
1371  specs.format = AUD_FORMAT_S32;
1372  break;
1373  case AV_SAMPLE_FMT_FLT:
1374  specs.format = AUD_FORMAT_FLOAT32;
1375  break;
1376  case AV_SAMPLE_FMT_DBL:
1377  specs.format = AUD_FORMAT_FLOAT64;
1378  break;
1379  default:
1380  return -31415;
1381  }
1382 
1383  specs.rate = rd->ffcodecdata.audio_mixrate;
1384  context->audio_mixdown_device = BKE_sound_mixdown(
1385  scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
1386  }
1387 # endif
1388  return success;
1389 }
1390 
1391 static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit);
1392 
1393 # ifdef WITH_AUDASPACE
1394 static void write_audio_frames(FFMpegContext *context, double to_pts)
1395 {
1396  AVCodecContext *c = context->audio_codec;
1397 
1398  while (context->audio_stream) {
1399  if ((context->audio_time_total >= to_pts) || !write_audio_frame(context)) {
1400  break;
1401  }
1402  context->audio_time_total += (double)context->audio_input_samples / (double)c->sample_rate;
1403  context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
1404  }
1405 }
1406 # endif
1407 
1408 int BKE_ffmpeg_append(void *context_v,
1409  RenderData *rd,
1410  int start_frame,
1411  int frame,
1412  int *pixels,
1413  int rectx,
1414  int recty,
1415  const char *suffix,
1416  ReportList *reports)
1417 {
1418  FFMpegContext *context = context_v;
1419  AVFrame *avframe;
1420  int success = 1;
1421 
1422  PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty);
1423 
1424  if (context->video_stream) {
1425  avframe = generate_video_frame(context, (unsigned char *)pixels);
1426  success = (avframe && write_video_frame(context, avframe, reports));
1427 # ifdef WITH_AUDASPACE
1428  /* Add +1 frame because we want to encode audio up until the next video frame. */
1429  write_audio_frames(
1430  context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
1431 # endif
1432 
1433  if (context->ffmpeg_autosplit) {
1434  if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
1435  end_ffmpeg_impl(context, true);
1436  context->ffmpeg_autosplit_count++;
1437 
1438  success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
1439  }
1440  }
1441  }
1442 
1443  return success;
1444 }
1445 
1446 static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
1447 {
1448  PRINT("Closing ffmpeg...\n");
1449 
1450 # ifdef WITH_AUDASPACE
1451  if (is_autosplit == false) {
1452  if (context->audio_mixdown_device) {
1453  AUD_Device_free(context->audio_mixdown_device);
1454  context->audio_mixdown_device = NULL;
1455  }
1456  }
1457 # endif
1458 
1459  if (context->video_stream) {
1460  PRINT("Flushing delayed video frames...\n");
1461  flush_ffmpeg(context->video_codec, context->video_stream, context->outfile);
1462  }
1463 
1464  if (context->audio_stream) {
1465  PRINT("Flushing delayed audio frames...\n");
1466  flush_ffmpeg(context->audio_codec, context->audio_stream, context->outfile);
1467  }
1468 
1469  if (context->outfile) {
1470  av_write_trailer(context->outfile);
1471  }
1472 
1473  /* Close the video codec */
1474 
1475  if (context->video_stream != NULL) {
1476  PRINT("zero video stream %p\n", context->video_stream);
1477  context->video_stream = NULL;
1478  }
1479 
1480  if (context->audio_stream != NULL) {
1481  context->audio_stream = NULL;
1482  }
1483 
1484  /* free the temp buffer */
1485  if (context->current_frame != NULL) {
1486  delete_picture(context->current_frame);
1487  context->current_frame = NULL;
1488  }
1489  if (context->img_convert_frame != NULL) {
1490  delete_picture(context->img_convert_frame);
1491  context->img_convert_frame = NULL;
1492  }
1493 
1494  if (context->outfile != NULL && context->outfile->oformat) {
1495  if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
1496  avio_close(context->outfile->pb);
1497  }
1498  }
1499 
1500  if (context->video_codec != NULL) {
1501  avcodec_free_context(&context->video_codec);
1502  context->video_codec = NULL;
1503  }
1504  if (context->audio_codec != NULL) {
1505  avcodec_free_context(&context->audio_codec);
1506  context->audio_codec = NULL;
1507  }
1508 
1509  if (context->outfile != NULL) {
1510  avformat_free_context(context->outfile);
1511  context->outfile = NULL;
1512  }
1513  if (context->audio_input_buffer != NULL) {
1514  av_free(context->audio_input_buffer);
1515  context->audio_input_buffer = NULL;
1516  }
1517 
1518  if (context->audio_deinterleave_buffer != NULL) {
1519  av_free(context->audio_deinterleave_buffer);
1520  context->audio_deinterleave_buffer = NULL;
1521  }
1522 
1523  if (context->img_convert_ctx != NULL) {
1524  sws_freeContext(context->img_convert_ctx);
1525  context->img_convert_ctx = NULL;
1526  }
1527 }
1528 
1529 void BKE_ffmpeg_end(void *context_v)
1530 {
1531  FFMpegContext *context = context_v;
1532  end_ffmpeg_impl(context, false);
1533 }
1534 
1535 /* properties */
1536 
1537 void BKE_ffmpeg_property_del(RenderData *rd, void *type, void *prop_)
1538 {
1539  struct IDProperty *prop = (struct IDProperty *)prop_;
1540  IDProperty *group;
1541 
1542  if (!rd->ffcodecdata.properties) {
1543  return;
1544  }
1545 
1547  if (group && prop) {
1548  IDP_FreeFromGroup(group, prop);
1549  }
1550 }
1551 
1552 static IDProperty *BKE_ffmpeg_property_add(RenderData *rd,
1553  const char *type,
1554  const AVOption *o,
1555  const AVOption *parent)
1556 {
1557  AVCodecContext c;
1558  IDProperty *group;
1559  IDProperty *prop;
1560  IDPropertyTemplate val;
1561  int idp_type;
1562  char name[256];
1563 
1564  val.i = 0;
1565 
1566  avcodec_get_context_defaults3(&c, NULL);
1567 
1568  if (!rd->ffcodecdata.properties) {
1569  rd->ffcodecdata.properties = IDP_New(IDP_GROUP, &val, "ffmpeg");
1570  }
1571 
1573 
1574  if (!group) {
1575  group = IDP_New(IDP_GROUP, &val, type);
1577  }
1578 
1579  if (parent) {
1580  BLI_snprintf(name, sizeof(name), "%s:%s", parent->name, o->name);
1581  }
1582  else {
1583  BLI_strncpy(name, o->name, sizeof(name));
1584  }
1585 
1586  PRINT("ffmpeg_property_add: %s %s\n", type, name);
1587 
1588  prop = IDP_GetPropertyFromGroup(group, name);
1589  if (prop) {
1590  return prop;
1591  }
1592 
1593  switch (o->type) {
1594  case AV_OPT_TYPE_INT:
1595  case AV_OPT_TYPE_INT64:
1596  val.i = o->default_val.i64;
1597  idp_type = IDP_INT;
1598  break;
1599  case AV_OPT_TYPE_DOUBLE:
1600  case AV_OPT_TYPE_FLOAT:
1601  val.f = o->default_val.dbl;
1602  idp_type = IDP_FLOAT;
1603  break;
1604  case AV_OPT_TYPE_STRING:
1605  val.string.str =
1606  (char
1607  *)" ";
1608  val.string.len = 80;
1609  idp_type = IDP_STRING;
1610  break;
1611  case AV_OPT_TYPE_CONST:
1612  val.i = 1;
1613  idp_type = IDP_INT;
1614  break;
1615  default:
1616  return NULL;
1617  }
1618  prop = IDP_New(idp_type, &val, name);
1619  IDP_AddToGroup(group, prop);
1620  return prop;
1621 }
1622 
1623 /* not all versions of ffmpeg include that, so here we go ... */
1624 
1625 int BKE_ffmpeg_property_add_string(RenderData *rd, const char *type, const char *str)
1626 {
1627  AVCodecContext c;
1628  const AVOption *o = NULL;
1629  const AVOption *p = NULL;
1630  char name_[128];
1631  char *name;
1632  char *param;
1633  IDProperty *prop = NULL;
1634 
1635  avcodec_get_context_defaults3(&c, NULL);
1636 
1637  BLI_strncpy(name_, str, sizeof(name_));
1638 
1639  name = name_;
1640  while (*name == ' ') {
1641  name++;
1642  }
1643 
1644  param = strchr(name, ':');
1645 
1646  if (!param) {
1647  param = strchr(name, ' ');
1648  }
1649  if (param) {
1650  *param++ = '\0';
1651  while (*param == ' ') {
1652  param++;
1653  }
1654  }
1655 
1656  o = av_opt_find(&c, name, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
1657  if (!o) {
1658  PRINT("Ignoring unknown expert option %s\n", str);
1659  return 0;
1660  }
1661  if (param && o->type == AV_OPT_TYPE_CONST) {
1662  return 0;
1663  }
1664  if (param && o->type != AV_OPT_TYPE_CONST && o->unit) {
1665  p = av_opt_find(&c, param, o->unit, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
1666  if (p) {
1667  prop = BKE_ffmpeg_property_add(rd, (char *)type, p, o);
1668  }
1669  else {
1670  PRINT("Ignoring unknown expert option %s\n", str);
1671  }
1672  }
1673  else {
1674  prop = BKE_ffmpeg_property_add(rd, (char *)type, o, NULL);
1675  }
1676 
1677  if (!prop) {
1678  return 0;
1679  }
1680 
1681  if (param && !p) {
1682  switch (prop->type) {
1683  case IDP_INT:
1684  IDP_Int(prop) = atoi(param);
1685  break;
1686  case IDP_FLOAT:
1687  IDP_Float(prop) = atof(param);
1688  break;
1689  case IDP_STRING:
1690  strncpy(IDP_String(prop), param, prop->len);
1691  break;
1692  }
1693  }
1694  return 1;
1695 }
1696 
1697 static void ffmpeg_set_expert_options(RenderData *rd)
1698 {
1699  int codec_id = rd->ffcodecdata.codec;
1700 
1701  if (rd->ffcodecdata.properties) {
1703  }
1704 
1705  if (codec_id == AV_CODEC_ID_DNXHD) {
1707  BKE_ffmpeg_property_add_string(rd, "video", "mbd:rd");
1708  }
1709  }
1710 }
1711 
1712 void BKE_ffmpeg_preset_set(RenderData *rd, int preset)
1713 {
1714  int isntsc = (rd->frs_sec != 25);
1715 
1716  if (rd->ffcodecdata.properties) {
1718  }
1719 
1720  switch (preset) {
1721  case FFMPEG_PRESET_VCD:
1722  rd->ffcodecdata.type = FFMPEG_MPEG1;
1723  rd->ffcodecdata.video_bitrate = 1150;
1724  rd->xsch = 352;
1725  rd->ysch = isntsc ? 240 : 288;
1726  rd->ffcodecdata.gop_size = isntsc ? 18 : 15;
1727  rd->ffcodecdata.rc_max_rate = 1150;
1728  rd->ffcodecdata.rc_min_rate = 1150;
1729  rd->ffcodecdata.rc_buffer_size = 40 * 8;
1730  rd->ffcodecdata.mux_packet_size = 2324;
1731  rd->ffcodecdata.mux_rate = 2352 * 75 * 8;
1732  break;
1733 
1734  case FFMPEG_PRESET_SVCD:
1735  rd->ffcodecdata.type = FFMPEG_MPEG2;
1736  rd->ffcodecdata.video_bitrate = 2040;
1737  rd->xsch = 480;
1738  rd->ysch = isntsc ? 480 : 576;
1739  rd->ffcodecdata.gop_size = isntsc ? 18 : 15;
1740  rd->ffcodecdata.rc_max_rate = 2516;
1741  rd->ffcodecdata.rc_min_rate = 0;
1742  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1743  rd->ffcodecdata.mux_packet_size = 2324;
1744  rd->ffcodecdata.mux_rate = 0;
1745  break;
1746 
1747  case FFMPEG_PRESET_DVD:
1748  rd->ffcodecdata.type = FFMPEG_MPEG2;
1749  rd->ffcodecdata.video_bitrate = 6000;
1750 
1751  /* Don't set resolution, see T21351.
1752  * rd->xsch = 720;
1753  * rd->ysch = isntsc ? 480 : 576; */
1754 
1755  rd->ffcodecdata.gop_size = isntsc ? 18 : 15;
1756  rd->ffcodecdata.rc_max_rate = 9000;
1757  rd->ffcodecdata.rc_min_rate = 0;
1758  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1759  rd->ffcodecdata.mux_packet_size = 2048;
1760  rd->ffcodecdata.mux_rate = 10080000;
1761  break;
1762 
1763  case FFMPEG_PRESET_DV:
1764  rd->ffcodecdata.type = FFMPEG_DV;
1765  rd->xsch = 720;
1766  rd->ysch = isntsc ? 480 : 576;
1767  break;
1768 
1769  case FFMPEG_PRESET_H264:
1770  rd->ffcodecdata.type = FFMPEG_AVI;
1771  rd->ffcodecdata.codec = AV_CODEC_ID_H264;
1772  rd->ffcodecdata.video_bitrate = 6000;
1773  rd->ffcodecdata.gop_size = isntsc ? 18 : 15;
1774  rd->ffcodecdata.rc_max_rate = 9000;
1775  rd->ffcodecdata.rc_min_rate = 0;
1776  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1777  rd->ffcodecdata.mux_packet_size = 2048;
1778  rd->ffcodecdata.mux_rate = 10080000;
1779 
1780  break;
1781 
1782  case FFMPEG_PRESET_THEORA:
1783  case FFMPEG_PRESET_XVID:
1784  if (preset == FFMPEG_PRESET_XVID) {
1785  rd->ffcodecdata.type = FFMPEG_AVI;
1786  rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
1787  }
1788  else if (preset == FFMPEG_PRESET_THEORA) {
1789  rd->ffcodecdata.type = FFMPEG_OGG; /* XXX broken */
1790  rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
1791  }
1792 
1793  rd->ffcodecdata.video_bitrate = 6000;
1794  rd->ffcodecdata.gop_size = isntsc ? 18 : 15;
1795  rd->ffcodecdata.rc_max_rate = 9000;
1796  rd->ffcodecdata.rc_min_rate = 0;
1797  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1798  rd->ffcodecdata.mux_packet_size = 2048;
1799  rd->ffcodecdata.mux_rate = 10080000;
1800  break;
1801  }
1802 
1803  ffmpeg_set_expert_options(rd);
1804 }
1805 
1806 void BKE_ffmpeg_image_type_verify(RenderData *rd, ImageFormatData *imf)
1807 {
1808  int audio = 0;
1809 
1810  if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
1811  if (rd->ffcodecdata.type <= 0 || rd->ffcodecdata.codec <= 0 ||
1812  rd->ffcodecdata.audio_codec <= 0 || rd->ffcodecdata.video_bitrate <= 1) {
1813  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1816  rd->ffcodecdata.type = FFMPEG_MKV;
1817  }
1818  if (rd->ffcodecdata.type == FFMPEG_OGG) {
1819  rd->ffcodecdata.type = FFMPEG_MPEG2;
1820  }
1821 
1822  audio = 1;
1823  }
1824  else if (imf->imtype == R_IMF_IMTYPE_H264) {
1825  if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
1826  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1827  audio = 1;
1828  }
1829  }
1830  else if (imf->imtype == R_IMF_IMTYPE_XVID) {
1831  if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
1832  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
1833  audio = 1;
1834  }
1835  }
1836  else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
1837  if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
1838  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
1839  audio = 1;
1840  }
1841  }
1842 
1843  if (audio && rd->ffcodecdata.audio_codec < 0) {
1844  rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
1845  rd->ffcodecdata.audio_bitrate = 128;
1846  }
1847 }
1848 
1849 void BKE_ffmpeg_codec_settings_verify(RenderData *rd)
1850 {
1851  ffmpeg_set_expert_options(rd);
1852 }
1853 
1854 bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
1855 {
1856  int codec = rd->ffcodecdata.codec;
1857 
1858  return ELEM(codec,
1859  AV_CODEC_ID_FFV1,
1860  AV_CODEC_ID_QTRLE,
1861  AV_CODEC_ID_PNG,
1862  AV_CODEC_ID_VP9,
1863  AV_CODEC_ID_HUFFYUV);
1864 }
1865 
1866 void *BKE_ffmpeg_context_create(void)
1867 {
1868  FFMpegContext *context;
1869 
1870  /* new ffmpeg data struct */
1871  context = MEM_callocN(sizeof(FFMpegContext), "new ffmpeg context");
1872 
1873  context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
1874  context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
1875  context->ffmpeg_video_bitrate = 1150;
1876  context->ffmpeg_audio_bitrate = 128;
1877  context->ffmpeg_gop_size = 12;
1878  context->ffmpeg_autosplit = 0;
1879  context->ffmpeg_autosplit_count = 0;
1880  context->ffmpeg_preview = false;
1881  context->stamp_data = NULL;
1882  context->audio_time_total = 0.0;
1883 
1884  return context;
1885 }
1886 
1887 void BKE_ffmpeg_context_free(void *context_v)
1888 {
1889  FFMpegContext *context = context_v;
1890  if (context == NULL) {
1891  return;
1892  }
1893  if (context->stamp_data) {
1894  MEM_freeN(context->stamp_data);
1895  }
1896  MEM_freeN(context);
1897 }
1898 
1899 #endif /* WITH_FFMPEG */
typedef float(TangentPoint)[2]
#define IDP_Float(prop)
Definition: BKE_idprop.h:179
#define IDP_Int(prop)
Definition: BKE_idprop.h:154
void IDP_FreePropertyContent(struct IDProperty *prop)
Definition: idprop.c:1029
#define IDP_String(prop)
Definition: BKE_idprop.h:181
bool IDP_AddToGroup(struct IDProperty *group, struct IDProperty *prop) ATTR_NONNULL()
Definition: idprop.c:643
struct IDProperty * IDP_GetPropertyFromGroup(const struct IDProperty *prop, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
struct IDProperty * IDP_New(const char type, const IDPropertyTemplate *val, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: idprop.c:907
void IDP_FreeFromGroup(struct IDProperty *group, struct IDProperty *prop) ATTR_NONNULL()
Definition: idprop.c:690
struct StampData * BKE_stamp_info_from_scene_static(const struct Scene *scene)
void BKE_stamp_info_callback(void *data, struct StampData *stamp_data, StampCallback callback, bool noskip)
Definition: image.c:2686
const char * BKE_main_blendfile_path_from_global(void)
Definition: main.c:439
void BKE_report(ReportList *reports, ReportType type, const char *message)
Definition: report.c:104
#define BLI_assert(a)
Definition: BLI_assert.h:58
void BLI_kdtree_nd_() free(KDTree *tree)
Definition: kdtree_impl.h:116
MINLINE int min_ii(int a, int b)
MINLINE unsigned int log2_floor_u(unsigned int x)
bool BLI_make_existing_file(const char *name)
Definition: path_util.c:1347
#define FILE_MAX
bool BLI_path_frame_check_chars(const char *path) ATTR_NONNULL()
Definition: path_util.c:958
bool BLI_path_frame_range(char *path, int sta, int end, int digits) ATTR_NONNULL()
Definition: path_util.c:825
bool BLI_path_abs(char *path, const char *basepath) ATTR_NONNULL()
Definition: path_util.c:1016
bool BLI_path_suffix(char *string, size_t maxlen, const char *suffix, const char *sep) ATTR_NONNULL()
Definition: path_util.c:669
int BLI_strcasecmp(const char *s1, const char *s2) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: string.c:666
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, const size_t maxncpy) ATTR_NONNULL()
Definition: string.c:108
unsigned int uint
Definition: BLI_sys_types.h:83
int BLI_system_thread_count(void)
Definition: threads.cc:309
#define UNUSED(x)
#define ELEM(...)
#define STREQ(a, b)
typedef double(DMatrix)[4][4]
@ IDP_FLOAT
Definition: DNA_ID.h:99
@ IDP_STRING
Definition: DNA_ID.h:97
@ IDP_INT
Definition: DNA_ID.h:98
@ IDP_GROUP
Definition: DNA_ID.h:101
#define R_IMF_IMTYPE_FFMPEG
#define R_IMF_IMTYPE_H264
#define R_EXTENSION
#define R_IMF_IMTYPE_THEORA
@ FFM_PRESET_GOOD
@ FFM_PRESET_REALTIME
@ FFM_PRESET_BEST
@ FFMPEG_LOSSLESS_OUTPUT
@ FFMPEG_AUTOSPLIT_OUTPUT
@ FFMPEG_USE_MAX_B_FRAMES
@ FFM_CRF_MEDIUM
#define R_IMF_IMTYPE_XVID
#define R_IMF_PLANES_RGBA
@ FFM_CHANNELS_SURROUND4
@ FFM_CHANNELS_STEREO
@ FFM_CHANNELS_SURROUND51
@ FFM_CHANNELS_SURROUND71
@ FFM_CHANNELS_MONO
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
const char * IMB_ffmpeg_last_error(void)
Read Guarded memory(de)allocation.
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
Scene scene
#define str(s)
FFMPEG_INLINE void my_guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
Definition: ffmpeg_compat.h:57
#define fabsf(x)
__kernel void ccl_constant KernelData ccl_global void ccl_global char ccl_global int ccl_global char ccl_global unsigned int ccl_global float * buffer
format
Definition: logImageCore.h:47
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:47
static void error(const char *str)
Definition: meshlaplacian.c:65
#define PRINT(format,...)
Definition: moviecache.c:50
static unsigned c
Definition: RandGen.cpp:97
const btScalar eps
Definition: poly34.cpp:11
return ret
struct SELECTID_Context context
Definition: select_engine.c:47
unsigned char uint8_t
Definition: stdint.h:81
IDProperty * properties
ListBase group
Definition: DNA_ID.h:64
int len
Definition: DNA_ID.h:84
struct IDProperty * next
Definition: DNA_ID.h:70
char name[64]
Definition: DNA_ID.h:74
IDPropertyData data
Definition: DNA_ID.h:80
char type
Definition: DNA_ID.h:71
void * first
Definition: DNA_listBase.h:47
float frs_sec_base
struct ImageFormatData im_format
char pic[1024]
struct FFMpegCodecData ffcodecdata
char frame[512]
Definition: image.c:1948
const char * str
Definition: BKE_idprop.h:41
struct IDPropertyTemplate::@27 string
__forceinline const avxi abs(const avxi &a)
Definition: util_avxi.h:186
ccl_device_inline float2 fabs(const float2 &a)
uint len