Panda3D
|
00001 // Filename: ffmpegTexture.cxx 00002 // Created by: zacpavlov (05May06) 00003 // 00004 //////////////////////////////////////////////////////////////////// 00005 // 00006 // PANDA 3D SOFTWARE 00007 // Copyright (c) Carnegie Mellon University. All rights reserved. 00008 // 00009 // All use of this software is subject to the terms of the revised BSD 00010 // license. You should have received a copy of this license along 00011 // with this source code in a file named "LICENSE." 00012 // 00013 //////////////////////////////////////////////////////////////////// 00014 00015 #include "pandabase.h" 00016 00017 #ifdef HAVE_FFMPEG 00018 #include "ffmpegTexture.h" 00019 #include "clockObject.h" 00020 #include "config_gobj.h" 00021 #include "config_grutil.h" 00022 #include "bamCacheRecord.h" 00023 #include "bamReader.h" 00024 00025 TypeHandle FFMpegTexture::_type_handle; 00026 00027 //////////////////////////////////////////////////////////////////// 00028 // Function: FFMpegTexture::Constructor 00029 // Access: Published 00030 // Description: 00031 //////////////////////////////////////////////////////////////////// 00032 FFMpegTexture:: 00033 FFMpegTexture(const string &name) : 00034 VideoTexture(name) 00035 { 00036 } 00037 00038 //////////////////////////////////////////////////////////////////// 00039 // Function: FFMpegTexture::Copy Constructor 00040 // Access: Protected 00041 // Description: Use FFmpegTexture::make_copy() to make a duplicate copy of 00042 // an existing FFMpegTexture. 00043 //////////////////////////////////////////////////////////////////// 00044 FFMpegTexture:: 00045 FFMpegTexture(const FFMpegTexture ©) : 00046 VideoTexture(copy), 00047 _pages(copy._pages) 00048 { 00049 nassertv(false); 00050 } 00051 00052 //////////////////////////////////////////////////////////////////// 00053 // Function: FFMpegTexture::Destructor 00054 // Access: Published, Virtual 00055 // Description: I'm betting that texture takes care of the, so we'll just do a clear. 00056 //////////////////////////////////////////////////////////////////// 00057 FFMpegTexture:: 00058 ~FFMpegTexture() { 00059 clear(); 00060 } 00061 00062 //////////////////////////////////////////////////////////////////// 00063 // Function: FFMpegTexture::do_make_copy 00064 // Access: Protected, Virtual 00065 // Description: Returns a new copy of the same Texture. This copy, 00066 // if applied to geometry, will be copied into texture 00067 // as a separate texture from the original, so it will 00068 // be duplicated in texture memory (and may be 00069 // independently modified if desired). 00070 // 00071 // If the Texture is an FFMpegTexture, the resulting 00072 // duplicate may be animated independently of the 00073 // original. 00074 //////////////////////////////////////////////////////////////////// 00075 PT(Texture) FFMpegTexture:: 00076 do_make_copy() { 00077 PT(FFMpegTexture) tex = new FFMpegTexture(get_name()); 00078 tex->do_assign(*this); 00079 00080 return tex.p(); 00081 } 00082 00083 //////////////////////////////////////////////////////////////////// 00084 // Function: FFMpegTexture::do_assign 00085 // Access: Protected 00086 // Description: Implements make_copy(). 00087 //////////////////////////////////////////////////////////////////// 00088 void FFMpegTexture:: 00089 do_assign(const FFMpegTexture ©) { 00090 VideoTexture::do_assign(copy); 00091 _pages = copy._pages; 00092 } 00093 00094 //////////////////////////////////////////////////////////////////// 00095 // Function: FFMPegTexture::modify_page 00096 // Access: Private 00097 // Description: Returns a reference to the zth VideoPage (level) of 00098 // the texture. In the case of a 2-d texture, there is 00099 // only one page, level 0; but cube maps and 3-d 00100 // textures have more. 00101 //////////////////////////////////////////////////////////////////// 00102 FFMpegTexture::VideoPage &FFMpegTexture:: 00103 modify_page(int z) { 00104 nassertr(z < _z_size, _pages[0]); 00105 while (z >= (int)_pages.size()) { 00106 _pages.push_back(VideoPage()); 00107 } 00108 return _pages[z]; 00109 } 00110 00111 //////////////////////////////////////////////////////////////////// 00112 // Function: FFmpegTexture::do_reconsider_video_properties 00113 // Access: Private 00114 // Description: Resets the internal Texture properties when a new 00115 // video file is loaded. Returns true if the new image 00116 // is valid, false otherwise. 00117 //////////////////////////////////////////////////////////////////// 00118 bool FFMpegTexture:: 00119 do_reconsider_video_properties(const FFMpegTexture::VideoStream &stream, 00120 int num_components, int z, 00121 const LoaderOptions &options) { 00122 double frame_rate = 0.0f; 00123 int num_frames = 0; 00124 if (!stream._codec_context) { 00125 // printf("not valid yet\n"); 00126 return true; 00127 } 00128 00129 AVStream *vstream = stream._format_context->streams[stream._stream_number]; 00130 00131 if (stream.is_from_file()) { 00132 // frame rate comes from ffmpeg as an avRational. 00133 frame_rate = vstream->r_frame_rate.num/(float)vstream->r_frame_rate.den; 00134 00135 // Number of frames is a little questionable if we've got variable 00136 // frame rate. Duration comes in as a generic timestamp, 00137 // and is therefore multiplied by AV_TIME_BASE. 00138 num_frames = (int)((stream._format_context->duration*frame_rate)/AV_TIME_BASE); 00139 if (grutil_cat.is_debug()) { 00140 grutil_cat.debug() 00141 << "Loaded " << stream._filename << ", " << num_frames << " frames at " 00142 << frame_rate << " fps\n"; 00143 } 00144 } 00145 00146 int width = stream._codec_context->width; 00147 int height = stream._codec_context->height; 00148 00149 int x_size = width; 00150 int y_size = height; 00151 00152 if (Texture::get_textures_power_2() != ATS_none) { 00153 x_size = up_to_power_2(width); 00154 y_size = up_to_power_2(height); 00155 } 00156 00157 if (grutil_cat.is_debug()) { 00158 grutil_cat.debug() 00159 << "Video stream is " << width << " by " << height 00160 << " pixels; fitting in texture " << x_size << " by " 00161 << y_size << " texels.\n"; 00162 } 00163 00164 if (!do_reconsider_image_properties(x_size, y_size, num_components, 00165 T_unsigned_byte, z, options)) { 00166 return false; 00167 } 00168 00169 if (_loaded_from_image && 00170 (get_video_width() != width || get_video_height() != height || 00171 get_num_frames() != num_frames || get_frame_rate() != frame_rate)) { 00172 grutil_cat.error() 00173 << "Video properties have changed for texture " << get_name() 00174 << " level " << z << ".\n"; 00175 return false; 00176 } 00177 00178 set_frame_rate(frame_rate); 00179 set_num_frames(num_frames); 00180 set_video_size(width, height); 00181 00182 // By default, the newly-loaded video stream will immediately start 00183 // looping. 00184 loop(true); 00185 00186 return true; 00187 } 00188 00189 //////////////////////////////////////////////////////////////////// 00190 // Function: FFMpegTexture::make_texture 00191 // Access: Public, Static 00192 // Description: A factory function to make a new FFMpegTexture, used 00193 // to pass to the TexturePool. 00194 //////////////////////////////////////////////////////////////////// 00195 PT(Texture) FFMpegTexture:: 00196 make_texture() { 00197 return new FFMpegTexture; 00198 } 00199 00200 //////////////////////////////////////////////////////////////////// 00201 // Function: FFMPegTexture::update_frame 00202 // Access: Protected, Virtual 00203 // Description: Called once per frame, as needed, to load the new 00204 // image contents. 00205 //////////////////////////////////////////////////////////////////// 00206 void FFMpegTexture:: 00207 update_frame(int frame) { 00208 int max_z = min(_z_size, (int)_pages.size()); 00209 for (int z = 0; z < max_z; ++z) { 00210 VideoPage &page = _pages.at(z); 00211 if (page._color.is_valid() || page._alpha.is_valid()) { 00212 do_modify_ram_image(); 00213 } 00214 if (page._color.is_valid()) { 00215 nassertv(_num_components >= 3 && _component_width == 1); 00216 00217 // A little different from the opencv implementation 00218 // The frame is kept on the stream itself. This is partially 00219 // because there is a conversion step that must be done for 00220 // every video (I've gotten very odd results with any video 00221 // that I don't convert, even if the IO formats are the same!) 00222 if (page._color.get_frame_data(frame)) { 00223 nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); 00224 unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; 00225 int dest_row_width = (_x_size * _num_components * _component_width); 00226 00227 // Simplest case, where we deal with an rgb texture 00228 if (_num_components == 3) { 00229 int source_row_width=3*page._color._codec_context->width; 00230 unsigned char * source=(unsigned char *)page._color._frame_out->data[0] 00231 +source_row_width*(get_video_height()-1); 00232 00233 // row by row copy. 00234 for (int y = 0; y < get_video_height(); ++y) { 00235 memcpy(dest, source, source_row_width); 00236 dest += dest_row_width; 00237 source -= source_row_width; 00238 } 00239 // Next best option, we're a 4 component alpha video on one stream 00240 } else if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { 00241 int source_row_width= page._color._codec_context->width * 4; 00242 unsigned char * source=(unsigned char *)page._color._frame_out->data[0] 00243 +source_row_width*(get_video_height()-1); 00244 00245 // row by row copy. 00246 for (int y = 0; y < get_video_height(); ++y) { 00247 memcpy(dest,source,source_row_width); 00248 dest += dest_row_width; 00249 source -= source_row_width; 00250 } 00251 // Otherwise, we've got to be tricky 00252 } else { 00253 int source_row_width= page._color._codec_context->width * 3; 00254 unsigned char * source=(unsigned char *)page._color._frame_out->data[0] 00255 +source_row_width*(get_video_height()-1); 00256 00257 // The harder case--interleave the color in with the alpha, 00258 // pixel by pixel. 00259 nassertv(_num_components == 4); 00260 for (int y = 0; y < get_video_height(); ++y) { 00261 int dx = 0; 00262 int sx = 0; 00263 for (int x = 0; x < get_video_width(); ++x) { 00264 dest[dx] = source[sx]; 00265 dest[dx + 1] = source[sx + 1]; 00266 dest[dx + 2] = source[sx + 2]; 00267 dx += 4; 00268 sx += 3; 00269 } 00270 dest += dest_row_width; 00271 source -= source_row_width; 00272 } 00273 } 00274 00275 00276 } 00277 } 00278 00279 if (page._alpha.is_valid()) { 00280 nassertv(_num_components == 4 && _component_width == 1); 00281 00282 if (page._alpha.get_frame_data(frame)) { 00283 nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); 00284 00285 // Currently, we assume the alpha has been converted to an rgb format 00286 // There is no reason it can't be a 256 color grayscale though. 00287 unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; 00288 int dest_row_width = (_x_size * _num_components * _component_width); 00289 00290 int source_row_width= page._alpha._codec_context->width * 3; 00291 unsigned char * source=(unsigned char *)page._alpha._frame_out->data[0] 00292 +source_row_width*(get_video_height()-1); 00293 for (int y = 0; y < get_video_height(); ++y) { 00294 int dx = 3; 00295 int sx = 0; 00296 for (int x = 0; x < get_video_width(); ++x) { 00297 dest[dx] = source[sx]; 00298 dx += 4; 00299 sx += 3; 00300 } 00301 dest += dest_row_width; 00302 source -= source_row_width; 00303 } 00304 00305 } 00306 } 00307 } 00308 } 00309 00310 00311 //////////////////////////////////////////////////////////////////// 00312 // Function: FFMpegTexture::do_read_one 00313 // Access: Protected, Virtual 00314 // Description: Combines a color and alpha video image from the two 00315 // indicated filenames. Both must be the same kind of 00316 // video with similar properties. 00317 //////////////////////////////////////////////////////////////////// 00318 bool FFMpegTexture:: 00319 do_read_one(const Filename &fullpath, const Filename &alpha_fullpath, 00320 int z, int n, int primary_file_num_channels, int alpha_file_channel, 00321 const LoaderOptions &options, 00322 bool header_only, BamCacheRecord *record) { 00323 if (record != (BamCacheRecord *)NULL) { 00324 record->add_dependent_file(fullpath); 00325 } 00326 00327 nassertr(n == 0, false); 00328 nassertr(z >= 0 && z < get_z_size(), false); 00329 00330 VideoPage &page = modify_page(z); 00331 if (!page._color.read(fullpath)) { 00332 grutil_cat.error() 00333 << "FFMpeg couldn't read " << fullpath << " as video.\n"; 00334 return false; 00335 } 00336 00337 if (!alpha_fullpath.empty()) { 00338 if (!page._alpha.read(alpha_fullpath)) { 00339 grutil_cat.error() 00340 << "FFMPEG couldn't read " << alpha_fullpath << " as video.\n"; 00341 page._color.clear(); 00342 return false; 00343 } 00344 } 00345 00346 00347 if (z == 0) { 00348 if (!has_name()) { 00349 set_name(fullpath.get_basename_wo_extension()); 00350 } 00351 if (!_filename.empty()) { 00352 _filename = fullpath; 00353 _alpha_filename = alpha_fullpath; 00354 } 00355 00356 _fullpath = fullpath; 00357 _alpha_fullpath = alpha_fullpath; 00358 } 00359 if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { 00360 // There had better not be an alpha interleave here. 00361 nassertr(alpha_fullpath.empty(), false); 00362 00363 _primary_file_num_channels = 4; 00364 _alpha_file_channel = 0; 00365 if (!do_reconsider_video_properties(page._color, 4, z, options)) { 00366 page._color.clear(); 00367 return false; 00368 } 00369 00370 } else { 00371 _primary_file_num_channels = 3; 00372 _alpha_file_channel = alpha_file_channel; 00373 00374 if (page._alpha.is_valid()) { 00375 if (!do_reconsider_video_properties(page._color, 4, z, options)) { 00376 page._color.clear(); 00377 page._alpha.clear(); 00378 return false; 00379 } 00380 if (!do_reconsider_video_properties(page._alpha, 4, z, options)) { 00381 page._color.clear(); 00382 page._alpha.clear(); 00383 return false; 00384 } 00385 } else { 00386 if (!do_reconsider_video_properties(page._color, 3, z, options)) { 00387 page._color.clear(); 00388 page._alpha.clear(); 00389 return false; 00390 } 00391 00392 } 00393 00394 } 00395 set_loaded_from_image(); 00396 clear_current_frame(); 00397 update_frame(0); 00398 return true; 00399 } 00400 00401 00402 //////////////////////////////////////////////////////////////////// 00403 // Function: FFMpegTexture::do_load_one 00404 // Access: Protected, Virtual 00405 // Description: Resets the texture (or the particular level of the 00406 // texture) to the indicated static image. 00407 //////////////////////////////////////////////////////////////////// 00408 bool FFMpegTexture:: 00409 do_load_one(const PNMImage &pnmimage, const string &name, int z, int n, 00410 const LoaderOptions &options) { 00411 if (z <= (int)_pages.size()) { 00412 VideoPage &page = modify_page(z); 00413 page._color.clear(); 00414 } 00415 00416 return Texture::do_load_one(pnmimage, name, z, n, options); 00417 } 00418 00419 00420 00421 00422 //////////////////////////////////////////////////////////////////// 00423 // Function: FFMpegTexture::register_with_read_factory 00424 // Access: Public, Static 00425 // Description: Factory method to generate a Texture object 00426 //////////////////////////////////////////////////////////////////// 00427 void FFMpegTexture:: 00428 register_with_read_factory() { 00429 // Since Texture is such a funny object that is reloaded from the 00430 // TexturePool each time, instead of actually being read fully from 00431 // the bam file, and since the VideoTexture and FFMpegTexture 00432 // classes don't really add any useful data to the bam record, we 00433 // don't need to define make_from_bam(), fillin(), or 00434 // write_datagram() in this class--we just inherit the same 00435 // functions from Texture. 00436 00437 // We do, however, have to register this class with the BamReader, 00438 // to avoid warnings about creating the wrong kind of object from 00439 // the bam file. 00440 BamReader::get_factory()->register_factory(get_class_type(), make_from_bam); 00441 } 00442 00443 //////////////////////////////////////////////////////////////////// 00444 // Function: FFMpegTexture::VideoStream::Constructor 00445 // Access: Public 00446 // Description: 00447 //////////////////////////////////////////////////////////////////// 00448 FFMpegTexture::VideoStream:: 00449 VideoStream() : 00450 _codec_context(NULL), 00451 _format_context(NULL), 00452 _frame(NULL), 00453 _frame_out(NULL), 00454 _next_frame_number(0) 00455 { 00456 // printf("creating video stream\n"); 00457 } 00458 00459 //////////////////////////////////////////////////////////////////// 00460 // Function: FFMpegTexture::VideoStream::Copy Constructor 00461 // Access: Public 00462 // Description: 00463 //////////////////////////////////////////////////////////////////// 00464 FFMpegTexture::VideoStream:: 00465 VideoStream(const FFMpegTexture::VideoStream ©) : 00466 _codec_context(NULL), 00467 _format_context(NULL), 00468 _frame(NULL), 00469 _frame_out(NULL), 00470 _next_frame_number(0) 00471 { 00472 // Rather than copying the _capture pointer, we must open a new 00473 // stream that references the same file. 00474 if (copy.is_valid()) { 00475 if (copy.is_from_file()) { 00476 read(copy._filename); 00477 } 00478 } 00479 } 00480 00481 //////////////////////////////////////////////////////////////////// 00482 // Function: FFMpegTexture::VideoStream::Copy Constructor 00483 // Access: Public 00484 // Description: 00485 //////////////////////////////////////////////////////////////////// 00486 FFMpegTexture::VideoStream:: 00487 ~VideoStream() { 00488 clear(); 00489 } 00490 00491 //////////////////////////////////////////////////////////////////// 00492 // Function: FFMpegTexture::VideoStream::get_frame_data 00493 // Access: Public 00494 // Description: Returns the pointer to the beginning of the 00495 // decompressed buffer for the indicated frame number. 00496 // It is most efficient to call this in increasing order 00497 // of frame number. 00498 //////////////////////////////////////////////////////////////////// 00499 bool FFMpegTexture::VideoStream:: 00500 get_frame_data(int frame_number) { 00501 nassertr(is_valid(), false); 00502 int coming_from = _next_frame_number; 00503 00504 _next_frame_number = frame_number + 1; 00505 AVPacket packet; 00506 AVStream *vstream = _format_context->streams[_stream_number]; 00507 00508 int got_frame; 00509 00510 // Can we get to our target frame just by skipping forward a few 00511 // frames? We arbitrarily draw the line at 50 frames for now. 00512 if (frame_number >= coming_from && frame_number - coming_from < 50) { 00513 00514 if (frame_number > coming_from) { 00515 // Ok, we do have to skip a few frames. 00516 _codec_context->hurry_up = true; 00517 while (frame_number > coming_from) { 00518 int err = read_video_frame(&packet); 00519 if (err < 0) { 00520 return false; 00521 00522 } 00523 #if LIBAVCODEC_VERSION_INT < 3414272 00524 avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); 00525 #else 00526 avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); 00527 #endif 00528 av_free_packet(&packet); 00529 ++coming_from; 00530 } 00531 _codec_context->hurry_up = false; 00532 } 00533 00534 // Now we're ready to read a frame. 00535 int err = read_video_frame(&packet); 00536 if (err < 0) { 00537 return false; 00538 } 00539 00540 } else { 00541 // We have to skip backward, or maybe forward a whole bunch of 00542 // frames. Better off seeking through the stream. 00543 00544 double time_stamp = ((double)AV_TIME_BASE * frame_number * vstream->r_frame_rate.den) / vstream->r_frame_rate.num; 00545 double curr_time_stamp; 00546 00547 // find point in time 00548 av_seek_frame(_format_context, -1, (long long)time_stamp, 00549 AVSEEK_FLAG_BACKWARD); 00550 00551 // Okay, now we're at the nearest keyframe behind our timestamp. 00552 // Hurry up and move through frames until we find a frame just after it. 00553 _codec_context->hurry_up = true; 00554 do { 00555 int err = read_video_frame(&packet); 00556 if (err < 0) { 00557 return false; 00558 } 00559 00560 curr_time_stamp = (((double)AV_TIME_BASE * packet.pts) / 00561 ((double)packet.duration * av_q2d(vstream->r_frame_rate))); 00562 if (curr_time_stamp > time_stamp) { 00563 break; 00564 } 00565 00566 #if LIBAVCODEC_VERSION_INT < 3414272 00567 avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); 00568 #else 00569 avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); 00570 #endif 00571 00572 av_free_packet(&packet); 00573 } while (true); 00574 00575 _codec_context->hurry_up = false; 00576 // Now near frame with Packet ready for decode (and free) 00577 } 00578 00579 // Now we have a packet from someone. Lets get this in a frame 00580 00581 int frame_finished; 00582 00583 // Is this a packet from the video stream? 00584 if (packet.stream_index == _stream_number) { 00585 // Decode video frame 00586 #if LIBAVCODEC_VERSION_INT < 3414272 00587 avcodec_decode_video(_codec_context, _frame, &frame_finished, packet.data, packet.size); 00588 #else 00589 avcodec_decode_video2(_codec_context, _frame, &frame_finished, &packet); 00590 #endif 00591 00592 // Did we get a video frame? 00593 if (frame_finished) { 00594 // Convert the image from its native format to RGB 00595 #ifdef HAVE_SWSCALE 00596 // Note from pro-rsoft: ffmpeg removed img_convert and told 00597 // everyone to use sws_scale instead - that's why I wrote 00598 // this code. I have no idea if it works well or not, but 00599 // it seems to compile and run without crashing. 00600 PixelFormat dst_format; 00601 if (_codec_context->pix_fmt != PIX_FMT_RGB32) { 00602 dst_format = PIX_FMT_BGR24; 00603 } else { 00604 dst_format = PIX_FMT_RGB32; 00605 } 00606 struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height, 00607 _codec_context->pix_fmt, _codec_context->width, _codec_context->height, 00608 dst_format, 2, NULL, NULL, NULL); 00609 nassertr(convert_ctx != NULL, false); 00610 sws_scale(convert_ctx, _frame->data, _frame->linesize, 00611 0, _codec_context->height, _frame_out->data, _frame_out->linesize); 00612 sws_freeContext(convert_ctx); 00613 #else 00614 if (_codec_context->pix_fmt != PIX_FMT_RGB32) { 00615 img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, 00616 (AVPicture *)_frame, _codec_context->pix_fmt, 00617 _codec_context->width, _codec_context->height); 00618 00619 } else { // _codec_context->pix_fmt == PIX_FMT_RGB32 00620 img_convert((AVPicture *)_frame_out, PIX_FMT_RGB32, 00621 (AVPicture *)_frame, _codec_context->pix_fmt, 00622 _codec_context->width, _codec_context->height); 00623 } 00624 #endif 00625 } 00626 } 00627 00628 // Free the packet that was allocated by av_read_frame 00629 av_free_packet(&packet); 00630 00631 return true; 00632 } 00633 00634 //////////////////////////////////////////////////////////////////// 00635 // Function: FFMpegTexture::VideoStream::read 00636 // Access: Public 00637 // Description: Sets up the stream to read the indicated file. 00638 // Returns true on success, false on failure. 00639 //////////////////////////////////////////////////////////////////// 00640 bool FFMpegTexture::VideoStream:: 00641 read(const Filename &filename) { 00642 // Clear out the last stream 00643 clear(); 00644 00645 string os_specific = filename.to_os_specific(); 00646 // Open video file 00647 int result = av_open_input_file(&_format_context, os_specific.c_str(), NULL, 00648 0, NULL); 00649 if (result != 0) { 00650 grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; 00651 // Don't call clear(), because nothing happened yet 00652 return false; 00653 } 00654 00655 // Retrieve stream information 00656 result = av_find_stream_info(_format_context); 00657 if (result < 0) { 00658 grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; 00659 clear(); 00660 return false; 00661 } 00662 dump_format(_format_context, 0, os_specific.c_str(), false); 00663 00664 _stream_number = -1; 00665 for(int i = 0; i < _format_context->nb_streams; i++) { 00666 if ((*_format_context->streams[i]->codec).codec_type == CODEC_TYPE_VIDEO) { 00667 _stream_number = i; 00668 break; 00669 } 00670 } 00671 00672 if (_stream_number == -1) { 00673 grutil_cat.error() 00674 << "ffmpeg: no stream found with codec of type CODEC_TYPE_VIDEO" << endl; 00675 clear(); 00676 return false; 00677 } 00678 00679 // Get a pointer to the codec context for the video stream 00680 AVCodecContext *codec_context = _format_context->streams[_stream_number]->codec; 00681 00682 if (grutil_cat.is_debug()) { 00683 grutil_cat.debug() 00684 << "ffmpeg: codec id is " << codec_context->codec_id << endl; 00685 } 00686 00687 // Find the decoder for the video stream 00688 _codec = avcodec_find_decoder(codec_context->codec_id); 00689 if (_codec == NULL) { 00690 grutil_cat.error() << "ffmpeg: no appropriate decoder found" << endl; 00691 clear(); 00692 return false; 00693 } 00694 00695 if (_codec->capabilities & CODEC_CAP_TRUNCATED) { 00696 codec_context->flags |= CODEC_FLAG_TRUNCATED; 00697 } 00698 00699 // Open codec 00700 _codec_context = codec_context; 00701 result = avcodec_open(_codec_context, _codec); 00702 if (result < 0) { 00703 grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; 00704 _codec_context = NULL; 00705 clear(); 00706 return false; 00707 } 00708 00709 _frame = avcodec_alloc_frame(); 00710 00711 if (_codec_context->pix_fmt != PIX_FMT_RGB32) { 00712 _frame_out = avcodec_alloc_frame(); 00713 if (_frame_out == NULL) { 00714 grutil_cat.error() 00715 << "ffmpeg: unable to allocate AVPFrame (BGR24)" << endl; 00716 clear(); 00717 return false; 00718 } 00719 00720 // Determine required buffer size and allocate buffer 00721 _image_size_bytes = avpicture_get_size(PIX_FMT_BGR24, _codec_context->width, 00722 _codec_context->height); 00723 00724 _raw_data = new uint8_t[_image_size_bytes]; 00725 00726 // Assign appropriate parts of buffer to image planes in _frameRGB 00727 avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_BGR24, 00728 _codec_context->width, _codec_context->height); 00729 00730 } else { 00731 _frame_out = avcodec_alloc_frame(); 00732 if (_frame_out == NULL) { 00733 grutil_cat.error() 00734 << "ffmpeg: unable to allocate AVPFrame (RGBA32)" << endl; 00735 clear(); 00736 return false; 00737 } 00738 00739 // Determine required buffer size and allocate buffer 00740 _image_size_bytes = avpicture_get_size(PIX_FMT_RGB32, _codec_context->width, 00741 _codec_context->height); 00742 00743 _raw_data = new uint8_t[_image_size_bytes]; 00744 // Assign appropriate parts of buffer to image planes in _frameRGB 00745 avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_RGB32, 00746 _codec_context->width, _codec_context->height); 00747 } 00748 // We could put an option here for single channel frames. 00749 00750 _next_frame_number = 0; 00751 _filename = filename; 00752 00753 return true; 00754 } 00755 00756 00757 00758 //////////////////////////////////////////////////////////////////// 00759 // Function: FFMpegTexture::VideoStream::clear 00760 // Access: Public 00761 // Description: Stops the video playback and frees the associated 00762 // resources. 00763 //////////////////////////////////////////////////////////////////// 00764 void FFMpegTexture::VideoStream:: 00765 clear() { 00766 if (_codec_context) { 00767 avcodec_close(_codec_context); 00768 _codec_context = NULL; 00769 } 00770 if (_format_context) { 00771 av_close_input_file(_format_context); 00772 _format_context = NULL; 00773 } 00774 if (_frame) { 00775 av_free(_frame); 00776 _frame = NULL; 00777 } 00778 if (_frame_out) { 00779 av_free(_frame_out); 00780 _frame_out = NULL; 00781 } 00782 00783 _next_frame_number = 0; 00784 } 00785 00786 //////////////////////////////////////////////////////////////////// 00787 // Function: FFMpegTexture::VideoStream::read_video_frame 00788 // Access: Private 00789 // Description: Fills packet with the next sequential video frame in 00790 // the stream, skipping over all non-video frames. 00791 // packet must later be deallocated with 00792 // av_free_packet(). 00793 // 00794 // Returns nonnegative on success, or negative on error. 00795 //////////////////////////////////////////////////////////////////// 00796 int FFMpegTexture::VideoStream:: 00797 read_video_frame(AVPacket *packet) { 00798 int err = av_read_frame(_format_context, packet); 00799 if (err < 0) { 00800 return err; 00801 } 00802 00803 while (packet->stream_index != _stream_number) { 00804 // It's not a video packet; free it and get another. 00805 av_free_packet(packet); 00806 00807 err = av_read_frame(_format_context, packet); 00808 if (err < 0) { 00809 grutil_cat.debug() 00810 << "Got error " << err << " reading frame.\n"; 00811 return err; 00812 } 00813 } 00814 00815 // This is a video packet, return it. 00816 return err; 00817 } 00818 00819 00820 #endif // HAVE_FFMpeg 00821