source: trunk/packages/vizservers/nanovis/RpAVTranslate.cpp @ 2072

Last change on this file since 2072 was 2072, checked in by gah, 14 years ago
File size: 11.6 KB
Line 
1
2/*
3 * ======================================================================
4 *  Rappture::AVTranslate
5 *
6 *  AUTHOR:  Derrick Kearney, Purdue University
7 *
8 *  Copyright (c) 2005-2009  Purdue Research Foundation
9 * ----------------------------------------------------------------------
10 *  See the file "license.terms" for information on usage and
11 *  redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
12 * ======================================================================
13 */
14
15
16#include "nvconf.h"
17
18#if defined(HAVE_FFMPEG_MEM_H) || defined(HAVE_LIBAVUTIL_MEM_H)
19
20#include <cstdlib>
21#include <cstdio>
22#include <cstring>
23#include <cmath>
24#include <fstream>
25#include <assert.h>
26
27extern "C" {
28#ifdef HAVE_FFMPEG_MEM_H
29#include <ffmpeg/mem.h>
30#endif
31#ifdef HAVE_LIBAVUTIL_MEM_H
32#include <libavutil/mem.h>
33#endif
34}
35
36#include "RpAVTranslate.h"
37
38#ifndef M_PI
39#define M_PI 3.14159265358979323846
40#endif
41
42using namespace Rappture;
43
44AVTranslate::AVTranslate(size_t width, size_t height) :
45    _width (width),
46    _height (height),
47    _bitRate(400000),
48    _frameRate(25.0f),
49    _videoOutbufSize(200000),
50    _videoOutbuf(NULL),
51    _fmtPtr(NULL),
52    _ocPtr(NULL),
53    _avStreamPtr(NULL),
54    _pictPtr(NULL),
55    _rgbPictPtr(NULL)
56{
57}
58
59AVTranslate::AVTranslate(size_t width, size_t height, size_t bitRate,
60                         float frameRate)
61  : _width (width),
62    _height (height),
63    _bitRate(bitRate),
64    _frameRate(frameRate),
65    _videoOutbufSize(200000),
66    _videoOutbuf(NULL),
67    _fmtPtr(NULL),
68    _ocPtr(NULL),
69    _avStreamPtr(NULL),
70    _pictPtr(NULL),
71    _rgbPictPtr(NULL)
72{
73}
74
75/**
76 * Copy constructor
77 * @param AVTranslate object to copy
78 */
79 /*
80AVTranslate::AVTranslate(const AVTranslate& o)
81{}
82*/
83
84AVTranslate::~AVTranslate()
85{
86#ifdef notdef
87    /* FIXME:: This can't be right.  Don't want to automatically write out
88     * trailer etc. on destruction of this object. */
89    done();
90#endif
91}
92
93
94bool
95AVTranslate::init(Outcome &status, const char *filename)
96{
97    status.addContext("Rappture::AVTranslate::init()");
98    /* Initialize libavcodec, and register all codecs and formats */
99    avcodec_init();
100    avcodec_register_all();
101    av_register_all();
102
103    /* Auto detect the output format from the name. default is mpeg. */
104    _fmtPtr = guess_format(NULL, filename, NULL);
105    if (_fmtPtr == NULL) {
106        /*
107        printf(  "Could not deduce output format from"
108                 "file extension: using MPEG.\n");
109        */
110        _fmtPtr = guess_format("mpeg", NULL, NULL);
111    }
112    if (_fmtPtr == NULL) {
113        status.addError("Could not find suitable output format");
114        return false;
115    }
116
117#ifdef HAVE_AVFORMAT_ALLOC_CONTEXT
118    /* Allocate the output media context. */
119    _ocPtr = avformat_alloc_context();
120#else
121    _ocPtr = av_alloc_format_context();
122#endif
123    if (!_ocPtr) {
124        status.addError("Memory error while allocating format context");
125        return false;
126    }
127    _ocPtr->oformat = _fmtPtr;
128    snprintf(_ocPtr->filename, sizeof(_ocPtr->filename), "%s", filename);
129
130    /* Add the video stream using the default format codecs and initialize the
131       codecs. */
132    _avStreamPtr = NULL;
133    if (_fmtPtr->video_codec != CODEC_ID_NONE) {
134        if ( (!addVideoStream(status, _fmtPtr->video_codec, &_avStreamPtr)) ) {
135            return false;
136        }
137    }
138
139    /* Set the output parameters (must be done even if no parameters). */
140    if (av_set_parameters(_ocPtr, NULL) < 0) {
141        status.addError("Invalid output format parameters");
142        return false;
143    }
144
145    dump_format(_ocPtr, 0, filename, 1);
146
147    /* Now that all the parameters are set, we can open the video codec and
148       allocate the necessary encode buffers */
149    if (_avStreamPtr) {
150        if (!openVideo(status)) {
151            return false;
152        }
153    }
154
155    /* Open the output file, if needed. */
156    if (!(_fmtPtr->flags & AVFMT_NOFILE)) {
157        if (url_fopen(&_ocPtr->pb, filename, URL_WRONLY) < 0) {
158            status.addError("Could not open '%s'", filename);
159            return false;
160        }
161    }
162
163    /* write the stream header, if any */
164    av_write_header(_ocPtr);
165    return true;
166}
167
168bool
169AVTranslate::append(Outcome &status, uint8_t *rgbData, size_t linePad)
170{
171    status.addContext("Rappture::AVTranslate::append()");
172    if (rgbData == NULL) {
173        status.addError("rdbData pointer is NULL");
174        return false;
175    }
176    /* Copy the data into the picture without the padding and reversing the
177     * rows. Note that the origin of the GL image is the lower-left while for
178     * the movie it's upper-left. */
179    size_t bytesPerRow = _width * 3;
180    size_t bytesPerLine = bytesPerRow + linePad;
181    uint8_t *srcRowPtr = rgbData + ((_height - 1) * bytesPerLine);
182    uint8_t *destPtr = _rgbPictPtr->data[0];
183    for (size_t y = 0; y < _height; y++) {
184        uint8_t *sp, *send;
185       
186        for (sp = srcRowPtr, send = sp + bytesPerRow; sp < send; sp++, destPtr++) {
187            *destPtr = *sp;
188        }
189        srcRowPtr -= bytesPerLine;
190    }
191
192#ifdef HAVE_IMG_CONVERT
193    // Use img_convert instead of sws_scale because img_convert is LGPL and
194    // sws_scale is GPL
195    img_convert((AVPicture *)_pictPtr, PIX_FMT_YUV420P,
196                (AVPicture *)_rgbPictPtr, PIX_FMT_RGB24,
197                _width, _height);
198#endif  /*HAVE_IMG_CONVERT*/
199    writeVideoFrame(status);
200
201    return true;
202}
203
204
205bool
206AVTranslate::done(Outcome &status)
207{
208    size_t i = 0;
209
210    /* Close each codec */
211    if (_avStreamPtr) {
212        closeVideo(status);
213    }
214
215    /* Write the trailer, if any */
216    av_write_trailer(_ocPtr);
217
218    /* Free the streams */
219    for(i = 0; i < _ocPtr->nb_streams; i++) {
220        av_freep(&_ocPtr->streams[i]->codec);
221        // _ocPtr->streams[i]->codec = NULL;
222
223        av_freep(&_ocPtr->streams[i]);
224        // _ocPtr->streams[i] = NULL;
225    }
226
227    if (!(_fmtPtr->flags & AVFMT_NOFILE)) {
228        /* close the output file */
229        url_fclose(_ocPtr->pb);
230    }
231
232    /* Free the stream */
233    av_free(_ocPtr);
234    _ocPtr = NULL;
235    return true;
236}
237
238
239/* Add a video output stream */
240bool
241AVTranslate::addVideoStream(Outcome &status, CodecID codec_id,
242                            AVStream **streamPtrPtr)
243{
244    status.addContext("Rappture::AVTranslate::add_video_stream()");
245    if (streamPtrPtr == NULL) {
246        status.addError("AVStream **st is NULL");
247        return false;
248    }
249
250    AVStream *streamPtr;
251    streamPtr = av_new_stream(_ocPtr, 0);
252    if (streamPtr == NULL) {
253        status.addError("Could not alloc stream");
254        return false;
255    }
256
257    AVCodecContext *codecPtr;
258    codecPtr = streamPtr->codec;
259    codecPtr->codec_id = codec_id;
260    codecPtr->codec_type = CODEC_TYPE_VIDEO;
261
262    /* Put sample parameters */
263    codecPtr->bit_rate = _bitRate;
264    /* resolution must be a multiple of two */
265    codecPtr->width = _width;
266    codecPtr->height = _height;
267    /* time base: this is the fundamental unit of time (in seconds) in terms
268       of which frame timestamps are represented. for fixed-fps content,
269       timebase should be 1/framerate and timestamp increments should be
270       identically 1. */
271    codecPtr->time_base.den = _frameRate;
272    codecPtr->time_base.num = 1;
273    codecPtr->gop_size = 12;            /* Emit one intra frame every twelve
274                                         * frames at most */
275    codecPtr->pix_fmt = PIX_FMT_YUV420P;
276    if (codecPtr->codec_id == CODEC_ID_MPEG2VIDEO) {
277        /* just for testing, we also add B frames */
278        codecPtr->max_b_frames = 2;
279    }
280    if (codecPtr->codec_id == CODEC_ID_MPEG1VIDEO) {
281        /* Needed to avoid using macroblocks in which some coeffs overflow.
282           This does not happen with normal video, it just happens here as the
283           motion of the chroma plane does not match the luma plane. */
284        codecPtr->mb_decision=2;
285    }
286    /* some formats want stream headers to be separate */
287    if((strcmp(_ocPtr->oformat->name, "mp4") == 0) ||
288       (strcmp(_ocPtr->oformat->name, "mov") == 0) ||
289       (strcmp(_ocPtr->oformat->name, "3gp") == 0)) {
290        codecPtr->flags |= CODEC_FLAG_GLOBAL_HEADER;
291    }
292    *streamPtrPtr = streamPtr;
293    return true;
294}
295
296bool
297AVTranslate::allocPicture(Outcome &status, PixelFormat pixFmt,
298     AVFrame **framePtrPtr)
299{
300    status.addContext("Rappture::AVTranslate::allocPicture()");
301    if (framePtrPtr == NULL) {
302        status.addError("AVFrame **p == NULL");
303        return false;
304    }
305
306    AVFrame *framePtr;
307    framePtr = avcodec_alloc_frame();
308    if (framePtr == NULL) {
309        status.addError("Memory error: Could not alloc frame");
310        return false;
311    }
312
313    size_t size;
314    size = avpicture_get_size(pixFmt, _width, _height);
315
316    uint8_t *bits;
317    bits = (uint8_t *)av_malloc(size);
318    if (bits == NULL) {
319        av_free(framePtr);
320        status.addError("Memory error: Could not alloc picture buffer");
321        return false;
322    }
323    avpicture_fill((AVPicture *)framePtr, bits, pixFmt, _width, _height);
324    *framePtrPtr = framePtr;
325    return true;
326}
327
328bool
329AVTranslate::openVideo(Outcome &status)
330{
331    AVCodec *codec;
332    AVCodecContext *c;
333
334    status.addContext("Rappture::AVTranslate::openVideo()");
335    c = _avStreamPtr->codec;
336
337    /* find the video encoder */
338    codec = avcodec_find_encoder(c->codec_id);
339    if (codec == NULL) {
340        status.addError("can't find codec %d\n", c->codec->id);
341        return false;
342    }
343
344    /* open the codec */
345    if (avcodec_open(c, codec) < 0) {
346        status.addError("can't open codec %d", c->codec->id);
347        return false;
348    }
349
350    _videoOutbuf = NULL;
351    if (!(_ocPtr->oformat->flags & AVFMT_RAWPICTURE)) {
352        /* allocate output buffer */
353        /* XXX: API change will be done */
354        /* buffers passed into lav* can be allocated any way you prefer,
355           as long as they're aligned enough for the architecture, and
356           they're freed appropriately (such as using av_free for buffers
357           allocated with av_malloc) */
358        _videoOutbuf = (uint8_t *) av_malloc(_videoOutbufSize);
359    }
360    /* Allocate the encoded raw picture */
361    if (!allocPicture(status, c->pix_fmt, &_pictPtr)) {
362        return false;
363    }
364    if (!allocPicture(status, PIX_FMT_RGB24, &_rgbPictPtr)) {
365        status.addError("allocPicture: can't allocate picture");
366        return false;
367    }
368    return true;
369}
370
371bool
372AVTranslate::writeVideoFrame(Outcome &status)
373{
374    AVCodecContext *codecPtr;
375
376    status.addContext("Rappture::AVTranslate::writeVideoframe()");
377    codecPtr = _avStreamPtr->codec;
378
379    /* encode the image */
380    int size;
381    size = avcodec_encode_video(codecPtr, _videoOutbuf, _videoOutbufSize,
382        _pictPtr);
383    if (size < 0) {
384        status.addError("Error while writing video frame");
385        return false;
386    }
387    if (size == 0) {
388        return true;            /* Image was buffered */
389    }
390    AVPacket pkt;
391    av_init_packet(&pkt);
392
393    pkt.pts = av_rescale_q(codecPtr->coded_frame->pts, codecPtr->time_base,
394                           _avStreamPtr->time_base);
395    if (codecPtr->coded_frame->key_frame) {
396        pkt.flags |= PKT_FLAG_KEY;
397    }
398    pkt.stream_index = _avStreamPtr->index;
399    pkt.data = _videoOutbuf;
400    pkt.size = size;
401   
402    /* write the compressed frame i the media file */
403    if (av_write_frame(_ocPtr, &pkt) < 0) {
404        status.addError("Error while writing video frame");
405        return false;
406    }
407    return true;
408}
409
410
411bool
412AVTranslate::closeVideo(Outcome &status)
413{
414    if (_avStreamPtr != NULL) {
415        avcodec_close(_avStreamPtr->codec);
416    }
417    if (_pictPtr != NULL) {
418        av_free(_pictPtr->data[0]);
419        av_free(_pictPtr);
420        _pictPtr = NULL;
421    }
422    if (_rgbPictPtr != NULL) {
423        av_free(_rgbPictPtr->data[0]);
424        av_free(_rgbPictPtr);
425        _rgbPictPtr = NULL;
426    }
427    if (_videoOutbuf != NULL) {
428        av_free(_videoOutbuf);
429        _videoOutbuf = NULL;
430    }
431    return true;
432}
433
434/*
435status.addError("error while opening file");
436status.addContext("Rappture::Buffer::dump()");
437return status;
438*/
439
440#endif /* HAVE_...*/
Note: See TracBrowser for help on using the repository browser.