h264_rkmpp decoder in ffmpeg: avcodec_receive_frame (code = -11)" #74

Closed
opened 2025-12-23 10:32:16 +01:00 by backuprepo · 2 comments
Owner

Originally created by @tu-nie on GitHub (Jul 9, 2024).

您好,可以看一看什么问题吗,这是代码

#include
#include
#include<string.h>
#include "decode_replay.h"

using namespace std;
QString Decode_Replay::replay_path=nullptr;

int useHardwareDecoding = 1; // 启用硬解码

int width = 0;
int height = 0;

static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_DRM_PRIME; //硬件支持的像素格式
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,const enum AVPixelFormat *pix_fmts)
{
const enum AVPixelFormat *p;
for (p = pix_fmts; *p != -1; p++) {
if (*p == hw_pix_fmt)
return *p;
}
fprintf(stderr, "Failed to get HW surface format.\n");
return AV_PIX_FMT_NONE;
}

void Decode_Replay::hardwave_init(AVCodecContext *codecCtx,const AVCodec * codec){
//设置硬解码器
AVHWDeviceType deviceType = AV_HWDEVICE_TYPE_DRM; // 根据架构设置硬解码类型
AVBufferRef * bufferRef = av_hwdevice_ctx_alloc(deviceType);
cout << "alloc hwdevice success address : " << bufferRef << endl;
int ret = av_hwdevice_ctx_init(bufferRef);
if(ret != 0){
cout << "init hardwave device context failed!" << endl;
}
codecCtx->hw_device_ctx = av_buffer_ref(bufferRef);
}

Decode_Replay::Decode_Replay(QString path)
{
this->videoIndex = -1; //记录视频流下标
this->replay_path = path;
this->play_speed = 60;
this->status = 0;
this->screenshots_mark = 0;
}

Decode_Replay::~Decode_Replay()
{
if (packet) {
av_packet_unref(packet);
packet = NULL;
}
if (swsCtx) {
sws_freeContext(swsCtx);
swsCtx = 0;
}
if (frame) {
av_frame_free(&frame);
frame = 0;
}
if (frameHW) {
av_frame_free(&frameHW);
frameHW = 0;
}
if (codecCtx->hw_device_ctx) {
av_buffer_unref(&codecCtx->hw_device_ctx);
}
if (codecCtx) {
avcodec_close(codecCtx);
codecCtx = 0;
}
if (avFormatCtx) {
avformat_close_input(&avFormatCtx);
avformat_free_context(avFormatCtx);
avFormatCtx = 0;
}
}
void Decode_Replay::registerFFmpeg()
{

avdevice_register_all();

}
int Decode_Replay::open_video()
{

avFormatCtx = NULL; // 编解码上下文
avDict = NULL; // 键值对
int ret = 0;

// 打开文件
ret = avformat_open_input(&avFormatCtx, replay_path.toStdString().c_str(), NULL, &avDict);
if (ret != 0) {
    cout << "open input file failed ! code:" << ret << endl;
    return -1;
}

}

int Decode_Replay::find_stream()
{
avFormatCtx->probesize = 20000000;
avFormatCtx->max_analyze_duration = 5 * AV_TIME_BASE;
// 获取流信息
int ret = avformat_find_stream_info(avFormatCtx, avDict == NULL ? NULL : &avDict);
if (ret < 0) {
cout << "get stream info failed ! code :" << ret << endl;
return -1;
}

auto all_seconds = avFormatCtx->duration / 1000000;
printf("the video time [%d:%d]\n", (int)all_seconds / 60, (int)all_seconds % 60);

// 查找需要的流索引
int audioStreamIndex = -1;
for (int i = 0; i < avFormatCtx->nb_streams; i++) {
    if (avFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
        videoIndex= i;
    }
    if (avFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
        audioStreamIndex = i;
    }
}
if (videoIndex == -1) {
    cout << "do not found video strem !" << endl;
}
if (audioStreamIndex == -1) {
    cout << "do not found audio strem !" << endl;
}
cout << "found video strem index " << videoIndex << endl;
cout << "found audio strem index " << audioStreamIndex << endl;


return this->videoIndex;

}

int Decode_Replay::find_decoder()
{
AVCodecParameters *codecPar = avFormatCtx->streams[videoIndex]->codecpar;
codec = NULL;

if (useHardwareDecoding) {
    cout << "find decoder_by_name h264_rkmpp!" << endl;
    codec = avcodec_find_decoder_by_name("h264_rkmpp");
}
else {
    cout << "codec_id is " << codecPar->codec_id << endl;
    codec = avcodec_find_decoder_by_name("h264");
}
cout << "find decoder by name address :" << codec << endl;
if (!codec) {
    cout << "do not find decoder of the stream !" << endl;
    return -1;
}
// 根据编码器找到上下文
codecCtx = avcodec_alloc_context3(codec);
if (!codecCtx) {
    cout << "do not find codec Context !" << endl;
    return -1;
}

//拷贝解码器的参数到解码器
int ret = avcodec_parameters_to_context(codecCtx, codecPar);
if (ret < 0) {
    cout << "write parameters into context failed !" << endl;
    return -1;
}
width = codecCtx->width;
height = codecCtx->height;
cout << "the codec id [" << codec->id << "] name [" << codecCtx->codec->name << "],[w:" << width << ",h:" << height << "]" << endl;
cout << "the pic format " << codecCtx->pix_fmt << endl;

if (useHardwareDecoding) {
    hardwave_init(codecCtx, codec);
    cout << "init hardwave device context! address : " << codecCtx->hw_device_ctx << endl;
}

AVDictionary *avDict_open = NULL; // 键值对
ret = avcodec_open2(codecCtx, codec, &avDict_open);
if (ret != 0) {
    cout << "open decoder failed!" << endl;
    return -1;
}

}

void Decode_Replay::prepare_image()
{

packet = (AVPacket *)av_malloc(sizeof(AVPacket));
frame = av_frame_alloc();
frameYUV = av_frame_alloc();
frameHW = av_frame_alloc();
// 分配buffer内存空间

int framesize = av_image_alloc(frame->data, frame->linesize, width, height,
    useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P, 1);
int frameHWsize = av_image_alloc(frameHW->data, frameHW->linesize, width, height,
    useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P, 1);
int frameYUVsize = av_image_alloc(frameYUV->data, frameYUV->linesize, width, height, AV_PIX_FMT_YUV420P, 1);

qDebug()<<"alloc origin frame size:"<<framesize;
qDebug()<<"alloc hardwave frame size:"<<frameHWsize;
qDebug()<<"alloc YUV frame size:"<<frameYUVsize;

// 创建转换器 将帧转换成我们想要的格式 此处是YUV,同时可以添加滤镜等Filter
swsCtx = sws_getContext(width, height,
    useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P,
    width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

}

void Decode_Replay::decode_frame() {

int packetFinish=-1, frameFinish=-1;

// 从流中读取帧
while (begin = clock(), av_read_frame(avFormatCtx, packet) >= 0)
{
    if (packet->stream_index == videoIndex)
    {

// qDebug()<<"packer size:"<size<<endl;
packetFinish = avcodec_send_packet(codecCtx, packet);
if (packetFinish < 0)
{
// fprintf(stderr, "Error: avcodec_send_packet failed with error code %d\n", packetFinish);
qDebug()<<"Error: avcodec_send_packet failed with error code";
continue;
}

        while (packetFinish >= 0)
        {
            frameFinish = avcodec_receive_frame(codecCtx, frame);
            if (frameFinish == AVERROR(EAGAIN) || frameFinish == AVERROR_EOF)
            {
                break;
            }
            else if (frameFinish == AVERROR(EINVAL))
            {
                fprintf(stderr, "Error: Invalid data when decoding!\n");
                return;
            }
            else if (frameFinish == AVERROR(ENOMEM)) {
                fprintf(stderr, "Error: Insufficient memory when decoding!\n");
                return;
            }
            else if (frameFinish <=0) {

// fprintf(stderr, "Error: avcodec_receive_frame failed with error code %d\n", frameFinish);
qDebug()<<"Error: avcodec_receive_frame failed with error code";
continue;
}

            // 确认帧已成功解码
            printf("Frame decoded: width = %d, height = %d, format = %d\n", frame->width, frame->height, frame->format);

            // 转换帧格式
            if (hw_pix_fmt == frame->format) {
                int ret = av_hwframe_transfer_data(frameHW, frame, 0);
                if (ret < 0) {
                    fprintf(stderr, "Error: av_hwframe_transfer_data failed with error code %d\n", ret);
                    continue;
                }
                sws_scale(swsCtx, (const uint8_t* const*)frameHW->data, frameHW->linesize, 0, height, frameYUV->data, frameYUV->linesize);
            } else {
                sws_scale(swsCtx, (const uint8_t* const*)frame->data, frame->linesize, 0, height, frameYUV->data, frameYUV->linesize);
            }
            QImage img(frameYUV->data[0], width, height, QImage::Format_Indexed8);

// emit sendImg2(img);
if (this->screenshots_mark == 1)
{
QString image_name = this->getTime();
QString path = SetControl::getInstance()->getSetting_imagepath() + "/" + "shots_" + image_name + ".jpg";
img.save(path);
ImageControl::getInstance()->addImagePath(image_name, path);
this->screenshots_mark = 0;
}
}
}
av_packet_unref(packet);
}
end = clock();
}

Originally created by @tu-nie on GitHub (Jul 9, 2024). 您好,可以看一看什么问题吗,这是代码 #include<iostream> #include<thread> #include<string.h> #include "decode_replay.h" using namespace std; QString Decode_Replay::replay_path=nullptr; int useHardwareDecoding = 1; // 启用硬解码 int width = 0; int height = 0; static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_DRM_PRIME; //硬件支持的像素格式 static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,const enum AVPixelFormat *pix_fmts) { const enum AVPixelFormat *p; for (p = pix_fmts; *p != -1; p++) { if (*p == hw_pix_fmt) return *p; } fprintf(stderr, "Failed to get HW surface format.\n"); return AV_PIX_FMT_NONE; } void Decode_Replay::hardwave_init(AVCodecContext *codecCtx,const AVCodec * codec){ //设置硬解码器 AVHWDeviceType deviceType = AV_HWDEVICE_TYPE_DRM; // 根据架构设置硬解码类型 AVBufferRef * bufferRef = av_hwdevice_ctx_alloc(deviceType); cout << "alloc hwdevice success address : " << bufferRef << endl; int ret = av_hwdevice_ctx_init(bufferRef); if(ret != 0){ cout << "init hardwave device context failed!" << endl; } codecCtx->hw_device_ctx = av_buffer_ref(bufferRef); } Decode_Replay::Decode_Replay(QString path) { this->videoIndex = -1; //记录视频流下标 this->replay_path = path; this->play_speed = 60; this->status = 0; this->screenshots_mark = 0; } Decode_Replay::~Decode_Replay() { if (packet) { av_packet_unref(packet); packet = NULL; } if (swsCtx) { sws_freeContext(swsCtx); swsCtx = 0; } if (frame) { av_frame_free(&frame); frame = 0; } if (frameHW) { av_frame_free(&frameHW); frameHW = 0; } if (codecCtx->hw_device_ctx) { av_buffer_unref(&codecCtx->hw_device_ctx); } if (codecCtx) { avcodec_close(codecCtx); codecCtx = 0; } if (avFormatCtx) { avformat_close_input(&avFormatCtx); avformat_free_context(avFormatCtx); avFormatCtx = 0; } } void Decode_Replay::registerFFmpeg() { avdevice_register_all(); } int Decode_Replay::open_video() { avFormatCtx = NULL; // 编解码上下文 avDict = NULL; // 键值对 int ret = 0; // 打开文件 ret = avformat_open_input(&avFormatCtx, replay_path.toStdString().c_str(), NULL, &avDict); if (ret != 0) { cout << "open input file failed ! code:" << ret << endl; return -1; } } int Decode_Replay::find_stream() { avFormatCtx->probesize = 20000000; avFormatCtx->max_analyze_duration = 5 * AV_TIME_BASE; // 获取流信息 int ret = avformat_find_stream_info(avFormatCtx, avDict == NULL ? NULL : &avDict); if (ret < 0) { cout << "get stream info failed ! code :" << ret << endl; return -1; } auto all_seconds = avFormatCtx->duration / 1000000; printf("the video time [%d:%d]\n", (int)all_seconds / 60, (int)all_seconds % 60); // 查找需要的流索引 int audioStreamIndex = -1; for (int i = 0; i < avFormatCtx->nb_streams; i++) { if (avFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { videoIndex= i; } if (avFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { audioStreamIndex = i; } } if (videoIndex == -1) { cout << "do not found video strem !" << endl; } if (audioStreamIndex == -1) { cout << "do not found audio strem !" << endl; } cout << "found video strem index " << videoIndex << endl; cout << "found audio strem index " << audioStreamIndex << endl; return this->videoIndex; } int Decode_Replay::find_decoder() { AVCodecParameters *codecPar = avFormatCtx->streams[videoIndex]->codecpar; codec = NULL; if (useHardwareDecoding) { cout << "find decoder_by_name h264_rkmpp!" << endl; codec = avcodec_find_decoder_by_name("h264_rkmpp"); } else { cout << "codec_id is " << codecPar->codec_id << endl; codec = avcodec_find_decoder_by_name("h264"); } cout << "find decoder by name address :" << codec << endl; if (!codec) { cout << "do not find decoder of the stream !" << endl; return -1; } // 根据编码器找到上下文 codecCtx = avcodec_alloc_context3(codec); if (!codecCtx) { cout << "do not find codec Context !" << endl; return -1; } //拷贝解码器的参数到解码器 int ret = avcodec_parameters_to_context(codecCtx, codecPar); if (ret < 0) { cout << "write parameters into context failed !" << endl; return -1; } width = codecCtx->width; height = codecCtx->height; cout << "the codec id [" << codec->id << "] name [" << codecCtx->codec->name << "],[w:" << width << ",h:" << height << "]" << endl; cout << "the pic format " << codecCtx->pix_fmt << endl; if (useHardwareDecoding) { hardwave_init(codecCtx, codec); cout << "init hardwave device context! address : " << codecCtx->hw_device_ctx << endl; } AVDictionary *avDict_open = NULL; // 键值对 ret = avcodec_open2(codecCtx, codec, &avDict_open); if (ret != 0) { cout << "open decoder failed!" << endl; return -1; } } void Decode_Replay::prepare_image() { packet = (AVPacket *)av_malloc(sizeof(AVPacket)); frame = av_frame_alloc(); frameYUV = av_frame_alloc(); frameHW = av_frame_alloc(); // 分配buffer内存空间 int framesize = av_image_alloc(frame->data, frame->linesize, width, height, useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P, 1); int frameHWsize = av_image_alloc(frameHW->data, frameHW->linesize, width, height, useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P, 1); int frameYUVsize = av_image_alloc(frameYUV->data, frameYUV->linesize, width, height, AV_PIX_FMT_YUV420P, 1); qDebug()<<"alloc origin frame size:"<<framesize; qDebug()<<"alloc hardwave frame size:"<<frameHWsize; qDebug()<<"alloc YUV frame size:"<<frameYUVsize; // 创建转换器 将帧转换成我们想要的格式 此处是YUV,同时可以添加滤镜等Filter swsCtx = sws_getContext(width, height, useHardwareDecoding ? AV_PIX_FMT_NV12 : AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); } void Decode_Replay::decode_frame() { int packetFinish=-1, frameFinish=-1; // 从流中读取帧 while (begin = clock(), av_read_frame(avFormatCtx, packet) >= 0) { if (packet->stream_index == videoIndex) { // qDebug()<<"packer size:"<<packet->size<<endl; packetFinish = avcodec_send_packet(codecCtx, packet); if (packetFinish < 0) { // fprintf(stderr, "Error: avcodec_send_packet failed with error code %d\n", packetFinish); qDebug()<<"Error: avcodec_send_packet failed with error code"; continue; } while (packetFinish >= 0) { frameFinish = avcodec_receive_frame(codecCtx, frame); if (frameFinish == AVERROR(EAGAIN) || frameFinish == AVERROR_EOF) { break; } else if (frameFinish == AVERROR(EINVAL)) { fprintf(stderr, "Error: Invalid data when decoding!\n"); return; } else if (frameFinish == AVERROR(ENOMEM)) { fprintf(stderr, "Error: Insufficient memory when decoding!\n"); return; } else if (frameFinish <=0) { // fprintf(stderr, "Error: avcodec_receive_frame failed with error code %d\n", frameFinish); qDebug()<<"Error: avcodec_receive_frame failed with error code"; continue; } // 确认帧已成功解码 printf("Frame decoded: width = %d, height = %d, format = %d\n", frame->width, frame->height, frame->format); // 转换帧格式 if (hw_pix_fmt == frame->format) { int ret = av_hwframe_transfer_data(frameHW, frame, 0); if (ret < 0) { fprintf(stderr, "Error: av_hwframe_transfer_data failed with error code %d\n", ret); continue; } sws_scale(swsCtx, (const uint8_t* const*)frameHW->data, frameHW->linesize, 0, height, frameYUV->data, frameYUV->linesize); } else { sws_scale(swsCtx, (const uint8_t* const*)frame->data, frame->linesize, 0, height, frameYUV->data, frameYUV->linesize); } QImage img(frameYUV->data[0], width, height, QImage::Format_Indexed8); // emit sendImg2(img); if (this->screenshots_mark == 1) { QString image_name = this->getTime(); QString path = SetControl::getInstance()->getSetting_imagepath() + "/" + "shots_" + image_name + ".jpg"; img.save(path); ImageControl::getInstance()->addImagePath(image_name, path); this->screenshots_mark = 0; } } } av_packet_unref(packet); } end = clock(); }
backuprepo 2025-12-23 10:32:16 +01:00
  • closed this issue
  • added the
    question
    label
Author
Owner

@nyanmisaka commented on GitHub (Jul 9, 2024):

I can't review your code. But I guess you can't handle the default async API.

Check this. aba28f2c1b

@nyanmisaka commented on GitHub (Jul 9, 2024): I can't review your code. But I guess you can't handle the default async API. Check this. https://github.com/JeffyCN/FFmpeg/commit/aba28f2c1bb22d7929bb329e732f7cebdcd7bd71
Author
Owner

@wzw88486969 commented on GitHub (Oct 14, 2024):

` for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
is_annexb = strcmp(av_fourcc2str(ifmt_ctx->streams[i]->codecpar->codec_tag), "avc1") == 0 ? 0 : 1;
break;
}
}

if (video_stream_index == -1) {
    fprintf(stderr, "no video stream found.\n");
    goto end;
}

fprintf(stdout, "is_annexb=%d\n", is_annexb);  
if (!is_annexb) {
    if (ret = open_bitstream_filter(ifmt_ctx->streams[video_stream_index], &bsf_ctx, "h264_mp4toannexb") < 0) {
       fprintf(stderr, "open_bitstream_filter failed, ret=%d\n", ret);  
       goto end;
    }
}
while (av_read_frame(ifmt_ctx, pkt) >= 0) {
    if (pkt->stream_index == video_stream_index) {
        if (is_annexb) {
            ret = write_output(of, pkt);
        } else {
            ret = filter_stream(bsf_ctx, pkt, of, 0);
        }
        if (ret < 0) goto end;
    } else {
        fprintf(stdout, "read a packet, not a video frame\n");
    }
    av_packet_unref(pkt);
}
if (!is_annexb) {//flush bistream filter
    filter_stream(bsf_ctx, NULL, of, 1);
}

AVCodecParameters *codecpar = p->frtCtx->streams[i]->codecpar;
if(codecpar->extradata_size > 0){
//store sps pps
}

use h264_mp4toannexb, then to do decode

sps + pps + I frame Feeding mpp

`

use h264_mp4toannexb, then to do decode

@wzw88486969 commented on GitHub (Oct 14, 2024): ` for (int i = 0; i < ifmt_ctx->nb_streams; i++) { if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream_index = i; is_annexb = strcmp(av_fourcc2str(ifmt_ctx->streams[i]->codecpar->codec_tag), "avc1") == 0 ? 0 : 1; break; } } if (video_stream_index == -1) { fprintf(stderr, "no video stream found.\n"); goto end; } fprintf(stdout, "is_annexb=%d\n", is_annexb); if (!is_annexb) { if (ret = open_bitstream_filter(ifmt_ctx->streams[video_stream_index], &bsf_ctx, "h264_mp4toannexb") < 0) { fprintf(stderr, "open_bitstream_filter failed, ret=%d\n", ret); goto end; } } while (av_read_frame(ifmt_ctx, pkt) >= 0) { if (pkt->stream_index == video_stream_index) { if (is_annexb) { ret = write_output(of, pkt); } else { ret = filter_stream(bsf_ctx, pkt, of, 0); } if (ret < 0) goto end; } else { fprintf(stdout, "read a packet, not a video frame\n"); } av_packet_unref(pkt); } if (!is_annexb) {//flush bistream filter filter_stream(bsf_ctx, NULL, of, 1); } AVCodecParameters *codecpar = p->frtCtx->streams[i]->codecpar; if(codecpar->extradata_size > 0){ //store sps pps } use h264_mp4toannexb, then to do decode sps + pps + I frame Feeding mpp ` use h264_mp4toannexb, then to do decode
Sign in to join this conversation.
No milestone
No project
No assignees
1 participant
Notifications
Due date
The due date is invalid or out of range. Please use the format "yyyy-mm-dd".

No due date set.

Dependencies

No dependencies set.

Reference: starred/ffmpeg-rockchip#74
No description provided.