source: branches/blt4/packages/vizservers/nanovis/RpAVTranslate.cpp @ 2742

Last change on this file since 2742 was 2742, checked in by gah, 12 years ago

sync with trunk

File size: 12.3 KB
RevLine 
[1325]1
[1282]2/*
3 * ======================================================================
4 *  Rappture::AVTranslate
5 *
6 *  AUTHOR:  Derrick Kearney, Purdue University
7 *
8 *  Copyright (c) 2005-2009  Purdue Research Foundation
9 * ----------------------------------------------------------------------
10 *  See the file "license.terms" for information on usage and
11 *  redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
12 * ======================================================================
13 */
14
15
[1351]16#include "nvconf.h"
17
[2120]18#if defined(HAVE_LIBAVCODEC) || defined(HAVE_LIBAVFORMAT)
[1282]19#include <cstdlib>
20#include <cstdio>
21#include <cstring>
22#include <cmath>
23#include <fstream>
24#include <assert.h>
25
[1351]26extern "C" {
27#ifdef HAVE_FFMPEG_MEM_H
28#include <ffmpeg/mem.h>
29#endif
30#ifdef HAVE_LIBAVUTIL_MEM_H
31#include <libavutil/mem.h>
32#endif
33}
34
35#include "RpAVTranslate.h"
36
[2742]37#ifndef HAVE_AVMEDIA_TYPE_VIDEO
38#define AVMEDIA_TYPE_VIDEO      CODEC_TYPE_VIDEO
39#endif  /* HAVE_AVMEDIA_TYPE_VIDEO */
40
41#ifndef AV_PKT_FLAG_KEY
42#define AV_PKT_FLAG_KEY         PKT_FLAG_KEY           
43#endif
44
45#ifndef HAVE_AV_GUESS_FORMAT
46#define av_guess_format         guess_format
47#endif  /*HAVE_AV_GUESS_FORMAT*/
48
49#ifndef HAVE_AV_DUMP_FORMAT
50#define av_dump_format          dump_format
51#endif  /*HAVE_AV_DUMP_FORMAT*/
52
53#ifndef HAVE_AVIO_OPEN
54#define avio_open               url_fopen       
55#endif  /*HAVE_AVIO_OPEN*/
56
57#ifndef HAVE_AVIO_CLOSE
58#define avio_close              url_fclose     
59#endif  /*HAVE_AVIO_CLOSE*/
60
[1282]61#ifndef M_PI
62#define M_PI 3.14159265358979323846
63#endif
64
[1325]65using namespace Rappture;
[1282]66
[1351]67AVTranslate::AVTranslate(size_t width, size_t height) :
68    _width (width),
[1282]69    _height (height),
[1351]70    _bitRate(400000),
71    _frameRate(25.0f),
72    _videoOutbufSize(200000),
73    _videoOutbuf(NULL),
74    _fmtPtr(NULL),
75    _ocPtr(NULL),
76    _avStreamPtr(NULL),
77    _pictPtr(NULL),
78    _rgbPictPtr(NULL)
79{
80}
[1282]81
[1351]82AVTranslate::AVTranslate(size_t width, size_t height, size_t bitRate,
83                         float frameRate)
[1282]84  : _width (width),
85    _height (height),
[1351]86    _bitRate(bitRate),
87    _frameRate(frameRate),
88    _videoOutbufSize(200000),
89    _videoOutbuf(NULL),
90    _fmtPtr(NULL),
91    _ocPtr(NULL),
92    _avStreamPtr(NULL),
93    _pictPtr(NULL),
94    _rgbPictPtr(NULL)
95{
96}
[1282]97
98/**
99 * Copy constructor
100 * @param AVTranslate object to copy
101 */
102 /*
103AVTranslate::AVTranslate(const AVTranslate& o)
104{}
105*/
106
107AVTranslate::~AVTranslate()
[1312]108{
[1325]109#ifdef notdef
110    /* FIXME:: This can't be right.  Don't want to automatically write out
111     * trailer etc. on destruction of this object. */
112    done();
113#endif
[1312]114}
[1282]115
116
[1325]117bool
118AVTranslate::init(Outcome &status, const char *filename)
[1282]119{
[1351]120    status.addContext("Rappture::AVTranslate::init()");
[1508]121    /* Initialize libavcodec, and register all codecs and formats */
122    avcodec_init();
123    avcodec_register_all();
[1282]124    av_register_all();
125
[1508]126    /* Auto detect the output format from the name. default is mpeg. */
[2742]127    _fmtPtr = av_guess_format(NULL, filename, NULL);
[1351]128    if (_fmtPtr == NULL) {
[1282]129        /*
[2409]130          TRACE(  "Could not deduce output format from"
[1282]131                 "file extension: using MPEG.\n");
132        */
[2742]133        _fmtPtr = av_guess_format("mpeg", NULL, NULL);
[1282]134    }
[1351]135    if (_fmtPtr == NULL) {
[1325]136        status.addError("Could not find suitable output format");
137        return false;
[1282]138    }
139
[1353]140#ifdef HAVE_AVFORMAT_ALLOC_CONTEXT
[1508]141    /* Allocate the output media context. */
[1353]142    _ocPtr = avformat_alloc_context();
143#else
[1351]144    _ocPtr = av_alloc_format_context();
145#endif
146    if (!_ocPtr) {
[1325]147        status.addError("Memory error while allocating format context");
148        return false;
[1282]149    }
[1351]150    _ocPtr->oformat = _fmtPtr;
151    snprintf(_ocPtr->filename, sizeof(_ocPtr->filename), "%s", filename);
[1282]152
[1508]153    /* Add the video stream using the default format codecs and initialize the
154       codecs. */
[1351]155    _avStreamPtr = NULL;
156    if (_fmtPtr->video_codec != CODEC_ID_NONE) {
[1508]157        if ( (!addVideoStream(status, _fmtPtr->video_codec, &_avStreamPtr)) ) {
[1325]158            return false;
[1282]159        }
160    }
161
[2742]162#if defined(HAVE_AV_SET_PARAMETERS) && !defined(HAVE_AVFORMAT_WRITE_HEADER)
[1508]163    /* Set the output parameters (must be done even if no parameters). */
[1351]164    if (av_set_parameters(_ocPtr, NULL) < 0) {
[1325]165        status.addError("Invalid output format parameters");
166        return false;
[1282]167    }
[2742]168#endif
169    av_dump_format(_ocPtr, 0, filename, 1);
[1282]170
[1508]171    /* Now that all the parameters are set, we can open the video codec and
172       allocate the necessary encode buffers */
[1351]173    if (_avStreamPtr) {
[1325]174        if (!openVideo(status)) {
175            return false;
[1282]176        }
177    }
178
[1508]179    /* Open the output file, if needed. */
[1351]180    if (!(_fmtPtr->flags & AVFMT_NOFILE)) {
[2742]181        if (avio_open(&_ocPtr->pb, filename, URL_WRONLY) < 0) {
[1325]182            status.addError("Could not open '%s'", filename);
183            return false;
[1282]184        }
185    }
186
187    /* write the stream header, if any */
[2742]188#ifdef HAVE_AVFORMAT_WRITE_HEADER
189    avformat_write_header(_ocPtr, NULL);
190#else
[1351]191    av_write_header(_ocPtr);
[2742]192#endif
[1325]193    return true;
[1282]194}
195
[1325]196bool
[1351]197AVTranslate::append(Outcome &status, uint8_t *rgbData, size_t linePad)
[1282]198{
[1351]199    status.addContext("Rappture::AVTranslate::append()");
200    if (rgbData == NULL) {
201        status.addError("rdbData pointer is NULL");
[1325]202        return false;
[1282]203    }
[1500]204    /* Copy the data into the picture without the padding and reversing the
205     * rows. Note that the origin of the GL image is the lower-left while for
206     * the movie it's upper-left. */
[1503]207    size_t bytesPerRow = _width * 3;
208    size_t bytesPerLine = bytesPerRow + linePad;
209    uint8_t *srcRowPtr = rgbData + ((_height - 1) * bytesPerLine);
[1351]210    uint8_t *destPtr = _rgbPictPtr->data[0];
[1282]211    for (size_t y = 0; y < _height; y++) {
[1500]212        uint8_t *sp, *send;
213       
[1503]214        for (sp = srcRowPtr, send = sp + bytesPerRow; sp < send; sp++, destPtr++) {
[1500]215            *destPtr = *sp;
[1282]216        }
[1503]217        srcRowPtr -= bytesPerLine;
[1282]218    }
219
[1351]220#ifdef HAVE_IMG_CONVERT
[1508]221    // Use img_convert instead of sws_scale because img_convert is LGPL and
222    // sws_scale is GPL
[1351]223    img_convert((AVPicture *)_pictPtr, PIX_FMT_YUV420P,
224                (AVPicture *)_rgbPictPtr, PIX_FMT_RGB24,
[1282]225                _width, _height);
[1351]226#endif  /*HAVE_IMG_CONVERT*/
[1325]227    writeVideoFrame(status);
[1282]228
[1325]229    return true;
[1282]230}
231
[1325]232
233bool
234AVTranslate::done(Outcome &status)
[1282]235{
236    size_t i = 0;
237
[1508]238    /* Close each codec */
[1351]239    if (_avStreamPtr) {
[1325]240        closeVideo(status);
[1282]241    }
242
[1508]243    /* Write the trailer, if any */
[1351]244    av_write_trailer(_ocPtr);
[1282]245
[1508]246    /* Free the streams */
[1351]247    for(i = 0; i < _ocPtr->nb_streams; i++) {
248        av_freep(&_ocPtr->streams[i]->codec);
249        // _ocPtr->streams[i]->codec = NULL;
[1282]250
[1351]251        av_freep(&_ocPtr->streams[i]);
252        // _ocPtr->streams[i] = NULL;
[1282]253    }
254
[1351]255    if (!(_fmtPtr->flags & AVFMT_NOFILE)) {
[1282]256        /* close the output file */
[2742]257        avio_close(_ocPtr->pb);
[1282]258    }
259
[1508]260    /* Free the stream */
[1351]261    av_free(_ocPtr);
262    _ocPtr = NULL;
[1325]263    return true;
[1282]264}
265
266
[1508]267/* Add a video output stream */
[1325]268bool
[1351]269AVTranslate::addVideoStream(Outcome &status, CodecID codec_id,
270                            AVStream **streamPtrPtr)
[1282]271{
[1351]272    status.addContext("Rappture::AVTranslate::add_video_stream()");
273    if (streamPtrPtr == NULL) {
[1325]274        status.addError("AVStream **st is NULL");
275        return false;
[1282]276    }
277
[1351]278    AVStream *streamPtr;
279    streamPtr = av_new_stream(_ocPtr, 0);
280    if (streamPtr == NULL) {
[1325]281        status.addError("Could not alloc stream");
282        return false;
[1282]283    }
284
[1351]285    AVCodecContext *codecPtr;
286    codecPtr = streamPtr->codec;
287    codecPtr->codec_id = codec_id;
[2742]288    codecPtr->codec_type = AVMEDIA_TYPE_VIDEO;
[1282]289
[1508]290    /* Put sample parameters */
[1351]291    codecPtr->bit_rate = _bitRate;
[1282]292    /* resolution must be a multiple of two */
[1351]293    codecPtr->width = _width;
294    codecPtr->height = _height;
[1282]295    /* time base: this is the fundamental unit of time (in seconds) in terms
296       of which frame timestamps are represented. for fixed-fps content,
297       timebase should be 1/framerate and timestamp increments should be
298       identically 1. */
[1351]299    codecPtr->time_base.den = _frameRate;
300    codecPtr->time_base.num = 1;
[1508]301    codecPtr->gop_size = 12;            /* Emit one intra frame every twelve
302                                         * frames at most */
[1351]303    codecPtr->pix_fmt = PIX_FMT_YUV420P;
304    if (codecPtr->codec_id == CODEC_ID_MPEG2VIDEO) {
[1282]305        /* just for testing, we also add B frames */
[1351]306        codecPtr->max_b_frames = 2;
[1282]307    }
[1351]308    if (codecPtr->codec_id == CODEC_ID_MPEG1VIDEO) {
[1282]309        /* Needed to avoid using macroblocks in which some coeffs overflow.
[1508]310           This does not happen with normal video, it just happens here as the
311           motion of the chroma plane does not match the luma plane. */
[1351]312        codecPtr->mb_decision=2;
[1282]313    }
[1508]314    /* some formats want stream headers to be separate */
[1351]315    if((strcmp(_ocPtr->oformat->name, "mp4") == 0) ||
316       (strcmp(_ocPtr->oformat->name, "mov") == 0) ||
317       (strcmp(_ocPtr->oformat->name, "3gp") == 0)) {
318        codecPtr->flags |= CODEC_FLAG_GLOBAL_HEADER;
[1282]319    }
[1351]320    *streamPtrPtr = streamPtr;
[1325]321    return true;
[1282]322}
323
[1325]324bool
[1571]325AVTranslate::allocPicture(Outcome &status, PixelFormat pixFmt,
326     AVFrame **framePtrPtr)
[1282]327{
[1351]328    status.addContext("Rappture::AVTranslate::allocPicture()");
329    if (framePtrPtr == NULL) {
[1325]330        status.addError("AVFrame **p == NULL");
331        return false;
[1282]332    }
333
[1351]334    AVFrame *framePtr;
335    framePtr = avcodec_alloc_frame();
336    if (framePtr == NULL) {
[1325]337        status.addError("Memory error: Could not alloc frame");
338        return false;
[1282]339    }
[1351]340
341    size_t size;
342    size = avpicture_get_size(pixFmt, _width, _height);
343
344    uint8_t *bits;
345    bits = (uint8_t *)av_malloc(size);
346    if (bits == NULL) {
347        av_free(framePtr);
[1325]348        status.addError("Memory error: Could not alloc picture buffer");
349        return false;
[1282]350    }
[1351]351    avpicture_fill((AVPicture *)framePtr, bits, pixFmt, _width, _height);
352    *framePtrPtr = framePtr;
[1325]353    return true;
[1282]354}
355
[1325]356bool
357AVTranslate::openVideo(Outcome &status)
[1282]358{
359    AVCodec *codec;
360    AVCodecContext *c;
361
[1351]362    status.addContext("Rappture::AVTranslate::openVideo()");
363    c = _avStreamPtr->codec;
[1282]364
365    /* find the video encoder */
366    codec = avcodec_find_encoder(c->codec_id);
[1508]367    if (codec == NULL) {
368        status.addError("can't find codec %d\n", c->codec->id);
[1325]369        return false;
[1282]370    }
371
372    /* open the codec */
373    if (avcodec_open(c, codec) < 0) {
[1508]374        status.addError("can't open codec %d", c->codec->id);
[1325]375        return false;
[1282]376    }
377
[1351]378    _videoOutbuf = NULL;
379    if (!(_ocPtr->oformat->flags & AVFMT_RAWPICTURE)) {
[1282]380        /* allocate output buffer */
381        /* XXX: API change will be done */
382        /* buffers passed into lav* can be allocated any way you prefer,
383           as long as they're aligned enough for the architecture, and
384           they're freed appropriately (such as using av_free for buffers
385           allocated with av_malloc) */
[1351]386        _videoOutbuf = (uint8_t *) av_malloc(_videoOutbufSize);
[1282]387    }
[1351]388    /* Allocate the encoded raw picture */
389    if (!allocPicture(status, c->pix_fmt, &_pictPtr)) {
[1325]390        return false;
[1282]391    }
[1351]392    if (!allocPicture(status, PIX_FMT_RGB24, &_rgbPictPtr)) {
393        status.addError("allocPicture: can't allocate picture");
[1325]394        return false;
[1282]395    }
[1325]396    return true;
[1282]397}
398
[1325]399bool
400AVTranslate::writeVideoFrame(Outcome &status)
[1282]401{
[1351]402    AVCodecContext *codecPtr;
[1282]403
[1351]404    status.addContext("Rappture::AVTranslate::writeVideoframe()");
405    codecPtr = _avStreamPtr->codec;
[1282]406
407    /* encode the image */
[1351]408    int size;
409    size = avcodec_encode_video(codecPtr, _videoOutbuf, _videoOutbufSize,
410        _pictPtr);
411    if (size < 0) {
412        status.addError("Error while writing video frame");
413        return false;
414    }
415    if (size == 0) {
416        return true;            /* Image was buffered */
417    }
418    AVPacket pkt;
419    av_init_packet(&pkt);
[1282]420
[1351]421    pkt.pts = av_rescale_q(codecPtr->coded_frame->pts, codecPtr->time_base,
422                           _avStreamPtr->time_base);
423    if (codecPtr->coded_frame->key_frame) {
[2742]424        pkt.flags |= AV_PKT_FLAG_KEY;
[1282]425    }
[1351]426    pkt.stream_index = _avStreamPtr->index;
427    pkt.data = _videoOutbuf;
428    pkt.size = size;
429   
430    /* write the compressed frame i the media file */
431    if (av_write_frame(_ocPtr, &pkt) < 0) {
[1325]432        status.addError("Error while writing video frame");
433        return false;
[1282]434    }
[1325]435    return true;
[1282]436}
437
438
[1325]439bool
440AVTranslate::closeVideo(Outcome &status)
[1282]441{
[1351]442    if (_avStreamPtr != NULL) {
443        avcodec_close(_avStreamPtr->codec);
444    }
445    if (_pictPtr != NULL) {
446        av_free(_pictPtr->data[0]);
447        av_free(_pictPtr);
448        _pictPtr = NULL;
449    }
450    if (_rgbPictPtr != NULL) {
451        av_free(_rgbPictPtr->data[0]);
452        av_free(_rgbPictPtr);
453        _rgbPictPtr = NULL;
454    }
455    if (_videoOutbuf != NULL) {
456        av_free(_videoOutbuf);
457        _videoOutbuf = NULL;
458    }
[1325]459    return true;
[1282]460}
461
462/*
[1325]463status.addError("error while opening file");
[1282]464status.addContext("Rappture::Buffer::dump()");
465return status;
466*/
467
[2120]468#endif /* HAVE_LIBAVCODEC && HAVE_LIBAVFORMAT */
Note: See TracBrowser for help on using the repository browser.