mirror of
https://github.com/nyanmisaka/ffmpeg-rockchip.git
synced 2026-01-24 07:31:22 +01:00
h264_rkmpp decoder in ffmpeg: avcodec_receive_frame (code = -11)" #74
Labels
No labels
bug
enhancement
help wanted
invalid
pull-request
question
upstream
wontfix
No milestone
No project
No assignees
1 participant
Notifications
Due date
No due date set.
Dependencies
No dependencies set.
Reference: starred/ffmpeg-rockchip#74
Loading…
Add table
Add a link
Reference in a new issue
No description provided.
Delete branch "%!s()"
Deleting a branch is permanent. Although the deleted branch may continue to exist for a short time before it actually gets removed, it CANNOT be undone in most cases. Continue?
Originally created by @tu-nie on GitHub (Jul 9, 2024).
您好,可以看一看什么问题吗,这是代码
#include
#include
#include<string.h>
#include "decode_replay.h"
using namespace std;
QString Decode_Replay::replay_path=nullptr;
int useHardwareDecoding = 1; // 启用硬解码
int width = 0;
int height = 0;
static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_DRM_PRIME; //硬件支持的像素格式
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,const enum AVPixelFormat *pix_fmts)
{
const enum AVPixelFormat *p;
for (p = pix_fmts; *p != -1; p++) {
if (*p == hw_pix_fmt)
return *p;
}
fprintf(stderr, "Failed to get HW surface format.\n");
return AV_PIX_FMT_NONE;
}
void Decode_Replay::hardwave_init(AVCodecContext *codecCtx,const AVCodec * codec){
//设置硬解码器
AVHWDeviceType deviceType = AV_HWDEVICE_TYPE_DRM; // 根据架构设置硬解码类型
AVBufferRef * bufferRef = av_hwdevice_ctx_alloc(deviceType);
cout << "alloc hwdevice success address : " << bufferRef << endl;
int ret = av_hwdevice_ctx_init(bufferRef);
if(ret != 0){
cout << "init hardwave device context failed!" << endl;
}
codecCtx->hw_device_ctx = av_buffer_ref(bufferRef);
}
Decode_Replay::Decode_Replay(QString path)
{
this->videoIndex = -1; //记录视频流下标
this->replay_path = path;
this->play_speed = 60;
this->status = 0;
this->screenshots_mark = 0;
}
Decode_Replay::~Decode_Replay()
{
if (packet) {
av_packet_unref(packet);
packet = NULL;
}
if (swsCtx) {
sws_freeContext(swsCtx);
swsCtx = 0;
}
if (frame) {
av_frame_free(&frame);
frame = 0;
}
if (frameHW) {
av_frame_free(&frameHW);
frameHW = 0;
}
if (codecCtx->hw_device_ctx) {
av_buffer_unref(&codecCtx->hw_device_ctx);
}
if (codecCtx) {
avcodec_close(codecCtx);
codecCtx = 0;
}
if (avFormatCtx) {
avformat_close_input(&avFormatCtx);
avformat_free_context(avFormatCtx);
avFormatCtx = 0;
}
}
void Decode_Replay::registerFFmpeg()
{
}
int Decode_Replay::open_video()
{
}
int Decode_Replay::find_stream()
{
avFormatCtx->probesize = 20000000;
avFormatCtx->max_analyze_duration = 5 * AV_TIME_BASE;
// 获取流信息
int ret = avformat_find_stream_info(avFormatCtx, avDict == NULL ? NULL : &avDict);
if (ret < 0) {
cout << "get stream info failed ! code :" << ret << endl;
return -1;
}
}
int Decode_Replay::find_decoder()
{
AVCodecParameters *codecPar = avFormatCtx->streams[videoIndex]->codecpar;
codec = NULL;
}
void Decode_Replay::prepare_image()
{
}
void Decode_Replay::decode_frame() {
// qDebug()<<"packer size:"<size<<endl;
packetFinish = avcodec_send_packet(codecCtx, packet);
if (packetFinish < 0)
{
// fprintf(stderr, "Error: avcodec_send_packet failed with error code %d\n", packetFinish);
qDebug()<<"Error: avcodec_send_packet failed with error code";
continue;
}
// fprintf(stderr, "Error: avcodec_receive_frame failed with error code %d\n", frameFinish);
qDebug()<<"Error: avcodec_receive_frame failed with error code";
continue;
}
// emit sendImg2(img);
if (this->screenshots_mark == 1)
{
QString image_name = this->getTime();
QString path = SetControl::getInstance()->getSetting_imagepath() + "/" + "shots_" + image_name + ".jpg";
img.save(path);
ImageControl::getInstance()->addImagePath(image_name, path);
this->screenshots_mark = 0;
}
}
}
av_packet_unref(packet);
}
end = clock();
}
@nyanmisaka commented on GitHub (Jul 9, 2024):
I can't review your code. But I guess you can't handle the default async API.
Check this.
aba28f2c1b@wzw88486969 commented on GitHub (Oct 14, 2024):
` for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
is_annexb = strcmp(av_fourcc2str(ifmt_ctx->streams[i]->codecpar->codec_tag), "avc1") == 0 ? 0 : 1;
break;
}
}
AVCodecParameters *codecpar = p->frtCtx->streams[i]->codecpar;
if(codecpar->extradata_size > 0){
//store sps pps
}
use h264_mp4toannexb, then to do decode
sps + pps + I frame Feeding mpp
`
use h264_mp4toannexb, then to do decode