성태의 닷넷 이야기
홈 주인
모아 놓은 자료
프로그래밍
질문/답변
사용자 관리
사용자
메뉴
아티클
외부 아티클
유용한 코드
온라인 기능
MathJax 입력기
최근 덧글
[정성태] VT sequences to "CONOUT$" vs. STD_O...
[정성태] NetCoreDbg is a managed code debugg...
[정성태] Evaluating tail call elimination in...
[정성태] What’s new in System.Text.Json in ....
[정성태] What's new in .NET 9: Cryptography ...
[정성태] 아... 제시해 주신 "https://akrzemi1.wordp...
[정성태] 다시 질문을 정리할 필요가 있을 것 같습니다. 제가 본문에...
[이승준] 완전히 잘못 짚었습니다. 댓글 지우고 싶네요. 검색을 해보...
[정성태] 우선 답글 감사합니다. ^^ 그런데, 사실 저 예제는 (g...
[이승준] 수정이 안되어서... byteArray는 BYTE* 타입입니다...
글쓰기
제목
이름
암호
전자우편
HTML
홈페이지
유형
제니퍼 .NET
닷넷
COM 개체 관련
스크립트
VC++
VS.NET IDE
Windows
Team Foundation Server
디버깅 기술
오류 유형
개발 환경 구성
웹
기타
Linux
Java
DDK
Math
Phone
Graphics
사물인터넷
부모글 보이기/감추기
내용
<div style='display: inline'> <h1 style='font-family: Malgun Gothic, Consolas; font-size: 20pt; color: #006699; text-align: center; font-weight: bold'>C# - ffmpeg(FFmpeg.AutoGen)를 이용한 demuxing_decoding.c 예제 포팅</h1> <p> 지난 예제에 이어,<br /> <br /> <pre style='margin: 10px 0px 10px 10px; padding: 10px 0px 10px 10px; background-color: #fbedbb; overflow: auto; font-family: Consolas, Verdana;' > C# - ffmpeg(FFmpeg.AutoGen)를 이용한 filtering_video.c 예제 포팅 ; <a target='tab' href='https://www.sysnet.pe.kr/2/0/12984'>https://www.sysnet.pe.kr/2/0/12984</a> </pre> <br /> 이번에는 <a target='tab' href='https://ffmpeg.org/doxygen/trunk/examples.html'>ffmpeg 예제</a> 중 "<a target='tab' href='https://ffmpeg.org/doxygen/trunk/demuxing_decoding_8c-example.html'>demuxing_decoding.c</a>" 파일을 FFmpeg.AutoGen으로 포팅하겠습니다.<br /> <br /> <pre style='margin: 10px 0px 10px 10px; padding: 10px 0px 10px 10px; background-color: #fbedbb; overflow: auto; font-family: Consolas, Verdana;' > using FFmpeg.AutoGen; using FFmpeg.AutoGen.Example; using System; using System.IO; namespace encode_video { internal unsafe class Program { static AVFormatContext* fmt_ctx = null; static AVCodecContext* video_dec_ctx = null; static AVCodecContext* audio_dec_ctx = null; static int width, height; static AVPixelFormat pix_fmt; static AVStream* video_stream; static AVStream* audio_stream; static string src_filename = ""; static string video_dst_filename = ""; static string audio_dst_filename = ""; static FileStream? video_dst_file; static FileStream? audio_dst_file; static byte_ptrArray4 video_dst_data; static int_array4 video_dst_linesize; static int video_dst_bufsize; static int video_stream_idx = -1; static int audio_stream_idx = -1; static AVFrame* frame = null; static AVPacket* pkt = null; static int video_Frame_count = 0; static int audio_frame_count = 0; static unsafe int output_video_frame(AVFrame* frame) { if (frame->width != width || frame->height != height || frame->format != (int)pix_fmt) { Console.WriteLine("Error: Width, height and pixel format have to be constant in an rawvideo file, but the width, height or pixel format of the input video changed:\n" + $"old: width = {width}, height = {height}, format = {ffmpeg.av_get_pix_fmt_name(pix_fmt)}\n" + $"new: width = {frame->width}, height = {frame->height}, format = {ffmpeg.av_get_pix_fmt_name((AVPixelFormat)frame->format)}"); return -1; } Console.WriteLine($"video_frame n: {video_Frame_count++} coded: {frame->coded_picture_number}\n"); byte_ptrArray4 tempData = new byte_ptrArray4(); tempData.UpdateFrom(frame->data); int_array4 tempLinesize = new int_array4(); tempLinesize.UpdateFrom(frame->linesize); ffmpeg.av_image_copy(ref video_dst_data, ref video_dst_linesize, ref tempData, tempLinesize, pix_fmt, width, height); video_dst_file?.Write(new ReadOnlySpan<byte>(video_dst_data[0], video_dst_bufsize)); return 0; } static int output_audio_frame(AVFrame* frame) { int unpadded_linesize = frame->nb_samples * ffmpeg.av_get_bytes_per_sample((AVSampleFormat)frame->format); Console.WriteLine($"audio_frame n: {audio_frame_count++} nb_samples: {frame->nb_samples} pts: {FFmpegHelper.av_ts2timestr(frame->pts, in audio_dec_ctx->time_base)}"); audio_dst_file?.Write(new ReadOnlySpan<byte>(frame->extended_data[0], unpadded_linesize)); return 0; } static unsafe int decode_packet(AVCodecContext* dec, AVPacket* pkt) { int ret = ffmpeg.avcodec_send_packet(dec, pkt); if (ret < 0) { Console.WriteLine($"Error submitting a packet for decoding {ret}"); return ret; } while (ret >= 0) { ret = ffmpeg.avcodec_receive_frame(dec, frame); if (ret < 0) { if (ret == ffmpeg.AVERROR(ffmpeg.AVERROR_EOF) || ret == ffmpeg.AVERROR(ffmpeg.EAGAIN)) { return 0; } Console.WriteLine($"Error during decoding ({FFmpegHelper.av_err2str(ret)})"); return ret; } if (dec->codec->type == AVMediaType.AVMEDIA_TYPE_VIDEO) { ret = output_video_frame(frame); } else { ret = output_audio_frame(frame); } ffmpeg.av_frame_unref(frame); if (ret < 0) { return ret; } } return 0; } static unsafe int open_codec_context(int* stream_idx, AVCodecContext** dec_ctx, AVFormatContext* fmt_ctx, AVMediaType type) { int ret, stream_index; AVStream* st; AVCodec* dec = null; ret = ffmpeg.av_find_best_stream(fmt_ctx, type, -1, -1, null, 0); if (ret < 0) { Console.WriteLine($"Could not find {ffmpeg.av_get_media_type_string(type)} stream in input file '{src_filename}'"); return ret; } else { stream_index = ret; st = fmt_ctx->streams[stream_index]; dec = ffmpeg.avcodec_find_decoder(st->codecpar->codec_id); if (dec == null) { Console.WriteLine($"Failed to find {ffmpeg.av_get_media_type_string(type)} codec"); return ffmpeg.AVERROR(ffmpeg.EINVAL); } *dec_ctx = ffmpeg.avcodec_alloc_context3(dec); if (*dec_ctx == null) { Console.WriteLine($"Failed to allocate the {ffmpeg.av_get_media_type_string(type)} codec context"); return ffmpeg.AVERROR(ffmpeg.ENOMEM); } if ((ret = ffmpeg.avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) { Console.WriteLine($"Failed to copy {ffmpeg.av_get_media_type_string(type)} codec parameters to decoder context"); return ret; } if ((ret = ffmpeg.avcodec_open2(*dec_ctx, dec, null)) < 0) { Console.WriteLine($"Failed to open {ffmpeg.av_get_media_type_string(type)} codec"); return ret; } *stream_idx = stream_index; } return 0; } static unsafe int Main(string[] args) { FFmpegBinariesHelper.RegisterFFmpegBinaries(); int ret = 0; // <a target='tab' href='https://samplelib.com/sample-mp4.html'>https://samplelib.com/sample-mp4.html</a> string dirPath = Path.GetDirectoryName(typeof(Program).Assembly.Location) ?? ""; src_filename = Path.Combine(dirPath, "..", "..", "..", "Samples", "sample-10s.mp4"); video_dst_filename = Path.Combine(dirPath, "video.out"); audio_dst_filename = Path.Combine(dirPath, "audio.out"); fixed (AVFormatContext** pfmt_ctx = &fmt_ctx) fixed (AVCodecContext** pvideo_dec_ctx = &video_dec_ctx) fixed (AVCodecContext** paudio_dec_ctx = &audio_dec_ctx) fixed (int* pvideo_stream_index = &video_stream_idx) fixed (int* paudio_stream_index = &audio_stream_idx) fixed (AVPacket** ppkt = &pkt) fixed (AVFrame** pframe = &frame) { if (ffmpeg.avformat_open_input(pfmt_ctx, src_filename, null, null) < 0) { Console.WriteLine($"Could not open source file {src_filename}"); return 1; } if (ffmpeg.avformat_find_stream_info(fmt_ctx, null) < 0) { Console.WriteLine($"Could not find stream information"); return 1; } if (open_codec_context(pvideo_stream_index, pvideo_dec_ctx, fmt_ctx, AVMediaType.AVMEDIA_TYPE_VIDEO) >= 0) { video_stream = fmt_ctx->streams[video_stream_idx]; video_dst_file = File.OpenWrite(video_dst_filename); if (video_dst_file == null) { Console.WriteLine($"Could not open destination file {video_dst_filename}"); ret = 1; goto end; } width = video_dec_ctx->width; height = video_dec_ctx->height; pix_fmt = video_dec_ctx->pix_fmt; ret = ffmpeg.av_image_alloc(ref video_dst_data, ref video_dst_linesize, width, height, pix_fmt, 1); if (ret < 0) { Console.WriteLine("Could not allocate raw video buffer"); goto end; } video_dst_bufsize = ret; } if (open_codec_context(paudio_stream_index, paudio_dec_ctx, fmt_ctx, AVMediaType.AVMEDIA_TYPE_AUDIO) >= 0) { audio_stream = fmt_ctx->streams[audio_stream_idx]; audio_dst_file = File.OpenWrite(audio_dst_filename); if (audio_dst_file == null) { Console.WriteLine($"Could not open destination file {audio_dst_filename}"); ret = 1; goto end; } } ffmpeg.av_dump_format(fmt_ctx, 0, src_filename, 0); if (audio_stream == null && video_stream == null) { Console.WriteLine("Could not find audio or video stream in the input, aborting"); ret = 1; goto end; } frame = ffmpeg.av_frame_alloc(); if (frame == null) { Console.WriteLine("Could not allocate frame"); ret = ffmpeg.AVERROR(ffmpeg.ENOMEM); goto end; } pkt = ffmpeg.av_packet_alloc(); if (pkt == null) { Console.WriteLine("Could not allocate packet"); ret = ffmpeg.AVERROR(ffmpeg.ENOMEM); goto end; } if (video_stream != null) { Console.WriteLine($"Demuxing video from file '{src_filename}' into '{video_dst_filename}'"); } if (audio_stream != null) { Console.WriteLine($"Demuxing audio from file '{src_filename}' into '{audio_dst_filename}'"); } while (ffmpeg.av_read_frame(fmt_ctx, pkt) >= 0) { if (pkt->stream_index == video_stream_idx) { ret = decode_packet(video_dec_ctx, pkt); } else if (pkt->stream_index == audio_stream_idx) { ret = decode_packet(audio_dec_ctx, pkt); } ffmpeg.av_packet_unref(pkt); if (ret < 0) { break; } } if (video_dec_ctx != null) { decode_packet(video_dec_ctx, null); } if (audio_dec_ctx != null) { decode_packet(audio_dec_ctx, null); } Console.WriteLine("Demuxing succeeded."); if (video_stream != null) { Console.WriteLine("Play the output video file with the command: \n" + $"ffplay -autoexit -f rawvideo -pix_fmt {ffmpeg.av_get_pix_fmt_name(pix_fmt)} -video_size {width}x{height} {video_dst_filename}"); } if (audio_stream != null) { AVSampleFormat sfmt = audio_dec_ctx->sample_fmt; int n_channels = audio_dec_ctx->channels; string fmt; if (ffmpeg.av_sample_fmt_is_planar(sfmt) == 1) { string packed = ffmpeg.av_get_sample_fmt_name(sfmt); Console.WriteLine("Warning: the sample format the decoder produced is planar " + $"({(packed != null ? packed : "?")}). This example will output the first channel only."); sfmt = ffmpeg.av_get_packed_sample_fmt(sfmt); n_channels = 1; } if ((ret = FFmpegHelper.get_format_from_sample_fmt(out fmt, sfmt)) < 0) { goto end; } Console.WriteLine("Play the output audio file with the command:\n" + $"ffplay -autoexit -f {fmt} -ac {n_channels} -ar {audio_dec_ctx->sample_rate} {audio_dst_filename}"); } end: ffmpeg.avcodec_free_context(pvideo_dec_ctx); ffmpeg.avcodec_free_context(paudio_dec_ctx); ffmpeg.avformat_close_input(pfmt_ctx); if (video_dst_file != null) { video_dst_file.Close(); } if (audio_dst_file != null) { audio_dst_file.Close(); } ffmpeg.av_packet_free(ppkt); ffmpeg.av_frame_free(pframe); ffmpeg.av_free(video_dst_data[0]); } return 0; } } } </pre> <br /> 실행하면,<br /> <br /> <pre style='margin: 10px 0px 10px 10px; padding: 10px 0px 10px 10px; background-color: #fbedbb; overflow: auto; font-family: Consolas, Verdana;' > Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '...[생략]...\demuxing_decoding\bin\Debug\..\..\..\Samples\sample-10s.mp4': Metadata: major_brand : isom minor_version : 512 compatible_brands: isomiso2avc1mp41 encoder : Lavf58.44.100 Duration: 00:00:10.24, start: 0.000000, bitrate: 4285 kb/s Stream #0:0[0x1](und): Video: h264 (High) (avc1 / 0x31637661), yuv420p(progressive), 1920x1080 [SAR 1:1 DAR 16:9], 4207 kb/s, 29.97 fps, 29.97 tbr, 30k tbn (default) Metadata: handler_name : VideoHandler vendor_id : [0][0][0][0] Stream #0:1[0x2](eng): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 121 kb/s (default) Metadata: handler_name : SoundHandler vendor_id : [0][0][0][0] Demuxing video from file 'E:\git_clone\ffmpeg_autogen_cs\ffmpeg_autogen_cs\demuxing_decoding\bin\Debug\..\..\..\Samples\sample-10s.mp4' into '...[생략]...\demuxing_decoding\bin\Debug\video.out' Demuxing audio from file 'E:\git_clone\ffmpeg_autogen_cs\ffmpeg_autogen_cs\demuxing_decoding\bin\Debug\..\..\..\Samples\sample-10s.mp4' into '...[생략]...\demuxing_decoding\bin\Debug\audio.out' video_frame n: 0 coded: 0 audio_frame n: 0 nb_samples: 1024 pts: 0 audio_frame n: 1 nb_samples: 1024 pts: 0.023219954648526078 video_frame n: 1 coded: 3 audio_frame n: 2 nb_samples: 1024 pts: 0.046439909297052155 video_frame n: 2 coded: 2 ...[생략]... audio_frame n: 437 nb_samples: 1024 pts: 10.147120181405896 audio_frame n: 438 nb_samples: 1024 pts: 10.170340136054422 audio_frame n: 439 nb_samples: 1024 pts: 10.193560090702949 video_frame n: 301 coded: 302 video_frame n: 302 coded: 299 Error during decoding (End of file) Error during decoding (End of file) Demuxing succeeded. Play the output video file with the command: <span style='color: blue; font-weight: bold'>ffplay -autoexit -f rawvideo -pix_fmt yuv420p -video_size 1920x1080 ...[생략]...\demuxing_decoding\bin\Debug\video.out</span> Warning: the sample format the decoder produced is planar (fltp). This example will output the first channel only. Play the output audio file with the command: <span style='color: blue; font-weight: bold'>ffplay -autoexit -f f32le -ac 1 -ar 44100 ...[생략]...\demuxing_decoding\bin\Debug\audio.out</span> </pre> <br /> 입력 파일의 동영상 파일을 각각 영상과 음성 파일로 분리해 별도의 파일로 저장합니다. 그리고 마지막 콘솔 출력 부분을 보면, 해당 파일들을 ffplay로 어떻게 재생할 수 있는지 명령행 옵션과 함께 보여줍니다.<br /> <br /> (<a target='tab' href='https://github.com/stjeong/ffmpeg_autogen_cs/tree/master/demuxing_decoding'>이 글의 소스 코드는 github에 올려</a>져 있습니다.)<br /> </p><br /> <br /><hr /><span style='color: Maroon'>[이 글에 대해서 여러분들과 의견을 공유하고 싶습니다. 틀리거나 미흡한 부분 또는 의문 사항이 있으시면 언제든 댓글 남겨주십시오.]</span> </div>
첨부파일
스팸 방지용 인증 번호
8835
(왼쪽의 숫자를 입력해야 합니다.)