QT学习总结--音视频采集编码封装

zhy

发布于 2020.04.25 13:08 阅读 3082 评论 0

   我们在采集到音频和视频后,需要对数据进行编码压缩,我们需要三个AVFormatContext结构体,两个输入(音频,视频),一个输出。

 

实现步骤如下:

采集的步骤上篇文章已经详细说明了,这里着重于编码输出的过程:

   

    1. 初始化AVFormatContext输出上下文avformat_alloc_output_context2

    //初始化一个用于输出的AVFormatContext结构体ofmt_ctx
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);

 

    2. 查找编码器(音频为aac,视频为h264) AVCodec,根据(AVCodec)编码器初始化(AVCodecContext)编码器上下文信息,设置上下文信息(pix_fmt , width, height, time_base等)

    //查找编码器
    pCodec_v = avcodec_find_encoder(AV_CODEC_ID_H264);
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
    //初始化编解码器上下文信息
    pCodecCtx_v=avcodec_alloc_context3(pCodec_v);
    pCodecCtx_a=avcodec_alloc_context3(pCodec_a);
    //设置解码器上下文信息
    pCodecCtx_v->pix_fmt = AV_PIX_FMT_YUV420P;

    pCodecCtx_v->width = ifmt_ctx_v->streams[videoStream]->codec->width;

    pCodecCtx_v->height = ifmt_ctx_v->streams[videoStream]->codec->height;

    pCodecCtx_v->time_base.num = 1;
    pCodecCtx_v->time_base.den = 25;

    pCodecCtx_v->bit_rate = 400000;

    pCodecCtx_v->gop_size = 250;

 

    3. 打开编码器,根据编码器创建输出流AVStream (调用avformat_new_stream函数), 打开输出文件

    //打开编码器
    avcodec_open2(pCodecCtx_v, pCodec_v,&param);
    avcodec_open2(pCodecCtx_a,pCodec_a,NULL);
    //根据编码器创建输出流
    AVStream *video_st = avformat_new_stream(ofmt_ctx, pCodec_v);
    AVStream *audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);

    //打开输出文件,out_path是推流地址
    if (avio_open(&ofmt_ctx->pb,out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return 0;
    }

 

    4. 打开输出地址(avio_open),写头文件avformat_write_header(ofmt_ctx,&opt)

因为新版的ffmpeg在写头文件时,会将ofmt_ctx的time_base改写, 导致视频播放速度变快,所以要通过传递AVDictionary参数来设置。

    //设置帧率(因为新版的ffmpeg在写头文件时,会将ofmt_ctx的time_base改写)
    AVDictionary *opt = 0;
    av_dict_set_int(&opt, "video_track_timescale", 25, 0);
    //写头文件
    avformat_write_header(ofmt_ctx,&opt);

 

    5. 初始化packet队列,Message结构体,创建读取视频packet线程,创建编解码线程,进入while()循环,读取音频packet放入队列。(main线程结束后,创建的SDL线程会关闭)

如果读取packet和编解码都放在同一个线程中,因为编解码需要的时间长,可能会导致ffmpeg的缓存溢出,所以就创建了一个编解码的线程,Message结构体和packet队列,充当传递者,将获取的packet传递给编解码线程。

 

编解码线程:

    初始化后,进入while()循环,从队列中取出一个packet,进行解码,编码,写入。

 

    音频和视频的操作步骤相同:  解码 -> 编码 -> 设置pts  -> 写入packet

 

实现的难点在于计算pts:

pts的单位time_base,pts*time_base得到的时正常的时间

 

视频pts计算:

 

视频的fps为1秒获取的帧数,假设帧率(frame_rate)为25, 那么1秒能获得25个packet。

packet之间的时间间隔为1/25秒,time_base=1/1000000, 转化为time_base的时间就是(1/time_base)*1/25。

时间间隔 calc_duration = (1/time_base)*(1/frame_rate)

 

pts是递增的:

第一个:0*calc_duration

第二个:1*calc_duration

第三个:2*calc_duration

...............................................

...............................................

第n个: (n-1)*calc_duration

 

所以pts = (n-1)*(1/time_base)*(1/frame_rate)

这里的pts是以输入流的time_base为单位,我们要利用av_rescale_q(pts,输入流time_base,输出流time_base)函数,将pts转化为以输出流的time_base为单位的值。

 

音频pts计算:

音频看起来比较的复杂,其实理解了采样率和采样数就不难了。

音频的采样率为1秒钟(1个通道)的采样数,音频和视频不同的一点是音频的一个packet可能包含多个frame。frame的nb_samples字段指的是这个frame(1个通道)的采样数,有了这个就可以计算1秒有多少个frame。

1秒frame数: 采样率/一个frame的采样数 (相当于视频的帧率)

那么1个frame的时间间隔为:一个frame的采样数  /采样率

以time_base为单位的时间间隔:  (1/time_base) *(nb_samples  /sample_rate)

 

另外: 由于摄像头和麦克风打开可能会存在时间差,所以pts要加上这个时间差。

 

我们自己计算的pts可能与实际的存在一定的误差,这个问题可以通过同步采集到的packet的pts进行同步,当前packet的pts - 第一个packet的pts = 正常的pts。

 

音频缓存:我们通过解码获得的frame一般很大,而编码的frame比它小,比如aac的frame大小为1024,而解码获得的frame可能是8千9千,所以我们创建了音频缓存,将解码后的frame存入,每次取出1024大小的数据编码,否则会出现more samples than frame size (avcodec_encode_audio2)

 

程序源码:

#include <stdio.h>
#include <QImage>
#include <QString>
#include <QDebug>
#include <QCameraInfo>
#include <QAudioDeviceInfo>
#include <qmutex.h>
#include <QList>

extern "C"

{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/pixfmt.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/time.h"
#include "libavutil/audio_fifo.h"

#include <SDL.h>
#include <SDL_audio.h>
#include <SDL_types.h>
#include <SDL_name.h>
#include <SDL_main.h>
#include <SDL_config.h>
#include <SDL_mutex.h>
}
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio

DECLARE_ALIGNED(16,uint8_t,audio_buf2) [AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];

typedef struct FrameDataNode
{
    uint8_t * buffer;
    int size;

    FrameDataNode()
    {
        buffer = NULL;
        size = 0;
    }
} FrameDataNode;

//暂时用不到
typedef struct AVFrameList {
    AVFrame pkt;
    struct AVFrameList *next;
} AVFrameList;
//暂时用不到
typedef struct FrameQueue {
    AVFrameList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    SDL_mutex *mutex;
    SDL_cond *cond;
} FrameQueue;


typedef struct PacketQueue {
    AVPacketList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    SDL_mutex *mutex;
    SDL_cond *cond;
} PacketQueue;

QMutex mMutex;//PCM队列Mutex
QList<FrameDataNode> mPcmBufferList; //PCM数据队列

typedef struct Message{
    PacketQueue q, q_audio;//获取到的packet的队列
    FrameQueue frameq,frameq_audio;//解码后的frame队列
    int videoStream, audioStream,outStreams_a_index, outStreams_v_index;//输入流和输出流(音频,视频)的下标
    AVFormatContext *ifmt_ctx_v, *ifmt_ctx_a, *ofmt_ctx;
    AVCodecContext *pCodecCtx_v,*pCodecCtx_a;//码流数据的解码方式相关数据
    SwsContext *img_convert_ctx;
    AVFrame *pFrameYUV;//
    AVStream *video_st,*audio_st;
    uint8_t *out_buffer;
    //a_v_duration是打开摄像头和麦克风的时间差
    double cur_pts_v, cur_pts_a, a_v_duration;//记录当前pts

}Message;


//清空队列
static void packet_queue_flush(PacketQueue *q)
{

    AVPacketList *pkt, *pkt1;
    //互斥锁加锁
    SDL_LockMutex(q->mutex);
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1)
    {
        pkt1 = pkt->next;

        if(pkt1->pkt.data != (uint8_t *)"FLUSH")
        {

        }
        //将packet指向的数据域的引用技术减1
        av_free_packet(&pkt->pkt);
        //释放内存
        av_freep(&pkt);

    }
    q->last_pkt = NULL;
    q->first_pkt = NULL;
    q->nb_packets = 0;
    q->size = 0;
    //互斥锁解锁
    SDL_UnlockMutex(q->mutex);
}

//清空frame队列
static void frame_queue_flush(FrameQueue *q)
{

    AVFrameList *pkt, *pkt1;
    //互斥锁加锁
    SDL_LockMutex(q->mutex);
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1)
    {
        pkt1 = pkt->next;

        //将packet指向的数据域的引用技术减1
        av_free(&pkt->pkt);
        //释放内存
        av_freep(&pkt);

    }
    q->last_pkt = NULL;
    q->first_pkt = NULL;
    q->nb_packets = 0;
    q->size = 0;
    //互斥锁解锁
    SDL_UnlockMutex(q->mutex);
}

//队列销毁
static void packet_queue_deinit(PacketQueue *q) {
    packet_queue_flush(q);
    SDL_DestroyMutex(q->mutex);
    SDL_DestroyCond(q->cond);
}

//frame队列销毁
static void frame_queue_deinit(FrameQueue *q) {
    frame_queue_flush(q);
    SDL_DestroyMutex(q->mutex);
    SDL_DestroyCond(q->cond);
}

//初始化队列
void packet_queue_init(PacketQueue *q) {
    //初始化内存单元
    memset(q, 0, sizeof(PacketQueue));
    //创建Mutex和Cond
    q->mutex = SDL_CreateMutex();

    q->cond = SDL_CreateCond();
    //初始化其他
    q->size = 0;
    q->nb_packets = 0;
    q->first_pkt = NULL;
    q->last_pkt = NULL;
}


//初始化frame队列
void frame_queue_init(FrameQueue *q) {
    //初始化内存单元
    memset(q, 0, sizeof(FrameQueue));
    //创建Mutex和Cond
    q->mutex = SDL_CreateMutex();

    q->cond = SDL_CreateCond();
    //初始化其他
    q->size = 0;
    q->nb_packets = 0;
    q->first_pkt = NULL;
    q->last_pkt = NULL;
}


//队列添加内容
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

    AVPacketList *pkt1;
    if (av_dup_packet(pkt) < 0) {
        return -1;
    }
    pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
    if (!pkt1)
        return -1;
    pkt1->pkt = *pkt;
    pkt1->next = NULL;
    //加锁
    SDL_LockMutex(q->mutex);
    //队列操作:如果last_pkt为空,说明队列是空的,新增节点为队头;否则,队列有数据,则让原队尾的next为新增节点,最后将队尾指向新增节点
    if (!q->last_pkt)
        q->first_pkt = pkt1;
    else
        q->last_pkt->next = pkt1;
    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size;
    //通知
    SDL_CondSignal(q->cond);
    //解锁
    SDL_UnlockMutex(q->mutex);
    return 0;
}

//frame队列添加内容
int frame_queue_put(FrameQueue *q, AVFrame *pkt) {

    AVFrameList *pkt1;
//    if (av_dup_packet(pkt) < 0) {
//        return -1;
//    }
    pkt1 = (AVFrameList*)av_malloc(sizeof(AVFrameList));
    if (!pkt1)
        return -1;
    pkt1->pkt = *pkt;
    pkt1->next = NULL;
    //加锁
    SDL_LockMutex(q->mutex);
    //队列操作:如果last_pkt为空,说明队列是空的,新增节点为队头;否则,队列有数据,则让原队尾的next为新增节点,最后将队尾指向新增节点
    if (!q->last_pkt)
        q->first_pkt = pkt1;
    else
        q->last_pkt->next = pkt1;
    q->last_pkt = pkt1;
    q->nb_packets++;
//    q->size += pkt1->pkt.size;
    //通知
    SDL_CondSignal(q->cond);
    //解锁
    SDL_UnlockMutex(q->mutex);
    return 0;
}

//出队
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
    AVPacketList *pkt1;
    int ret = -1;

    SDL_LockMutex(q->mutex);

    for (;;) {

        pkt1 = q->first_pkt;
        if (pkt1) {//队列中有数据
            q->first_pkt = pkt1->next;//队头移到第二个节点

            if (!q->first_pkt)
                q->last_pkt = NULL;

            q->nb_packets--;//节点数减1
            q->size -= pkt1->pkt.size;
            *pkt = pkt1->pkt;
            av_free(pkt1);//释放节点内存
            ret = 1;
            break;
        } else if (!block) {//队列中没有数据,且非阻塞调用
            ret = -1;
            break;
        } else {//队列中没有数据,且阻塞调用
            SDL_CondWait(q->cond, q->mutex);//这里没有break。for循环的另一个作用是在条件变量满足后重复上述代码取出节点
        }

    }

    SDL_UnlockMutex(q->mutex);
    return ret;
}

//frame出队
static int frame_queue_get(FrameQueue *q, AVFrame *pkt, int block) {
    AVFrameList *pkt1;
    int ret = -1;

    SDL_LockMutex(q->mutex);

    for (;;) {

        pkt1 = q->first_pkt;
        if (pkt1) {//队列中有数据
            q->first_pkt = pkt1->next;//队头移到第二个节点

            if (!q->first_pkt)
                q->last_pkt = NULL;

            q->nb_packets--;//节点数减1
//            q->size -= pkt1->pkt.size;
            *pkt = pkt1->pkt;
            av_free(pkt1);//释放节点内存
            ret = 1;
            break;
        } else if (!block) {//队列中没有数据,且非阻塞调用
            ret = -1;
            break;
        } else {//队列中没有数据,且阻塞调用
            SDL_CondWait(q->cond, q->mutex);//这里没有break。for循环的另一个作用是在条件变量满足后重复上述代码取出节点
        }

    }

    SDL_UnlockMutex(q->mutex);
    return ret;
}


/* select layout with the highest channel count */
static int select_channel_layout(const AVCodec *codec)
{
    const uint64_t *p;
    uint64_t best_ch_layout = 0;
    int best_nb_channels   = 0;

    if (!codec->channel_layouts)
        return AV_CH_LAYOUT_STEREO;

    p = codec->channel_layouts;
    while (*p) {
        int nb_channels = av_get_channel_layout_nb_channels(*p);

        if (nb_channels > best_nb_channels) {
            best_ch_layout    = *p;
            best_nb_channels = nb_channels;
        }
        p++;
    }
    return best_ch_layout;
}

/* just pick the highest supported samplerate */
static int select_sample_rate(const AVCodec *codec)
{
    const int *p;
    int best_samplerate = 0;

    if (!codec->supported_samplerates)
        return 44100;

    p = codec->supported_samplerates;
    while (*p) {
        if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
            best_samplerate = *p;
        p++;
    }
    return best_samplerate;
}

/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
    const enum AVSampleFormat *p = codec->sample_fmts;

    while (*p != AV_SAMPLE_FMT_NONE) {
        if (*p == sample_fmt)
            return 1;
        p++;
    }
    return 0;
}

//添加pcm缓存
void inputPcmBuffer(Message *message, uint8_t *buffer, int size)
{
    FrameDataNode node;
    node.size = size;
    node.buffer = buffer;

    mPcmBufferList.append(node);
}


//编解码视频
int decodeAndEncodeThread(void *arg){
    int frame_index=0;
    Message *msg = (Message *)arg;
    AVPacket *dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
    AVPacket enc_pkt;
    SwsContext *img_convert_ctx = msg->img_convert_ctx;
    int ret,dec_got_frame, enc_got_frame, videoStream, audioStream;
    ret = -1;
    dec_got_frame = -1;
    enc_got_frame = -1;
    videoStream = msg->videoStream;
    audioStream = msg->audioStream;
    AVFormatContext *ifmt_ctx_v = msg->ifmt_ctx_v;
    AVFormatContext *ofmt_ctx   = msg->ofmt_ctx;
    AVCodecContext *pCodecCtx_v,*pCodecCtx_a;//码流数据的解码方式相关数据
    pCodecCtx_v = msg->pCodecCtx_v;
    pCodecCtx_a = msg->pCodecCtx_a;
    AVStream *video_st = msg->video_st;
    AVFrame *pFrame, *pFrameYUV;
    pFrameYUV = msg->pFrameYUV;

    int i=0;
    int64_t firstPts;
    //获取队内一帧
    while(1){

        if(packet_queue_get(&msg->q, dec_pkt, 1) >= 0){
            //            if (exit_thread)
            //                break;
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");

            pFrame = av_frame_alloc();//获取空间

            if (!pFrame) {
                ret = AVERROR(ENOMEM);
                return 0;
            }

            //编解码视频
            if(1){

                 //解码一帧视频数据。输入一个压缩编码的结构体AVPacket,输出一个解码后的结构体AVFrame。
                 ret = avcodec_decode_video2(ifmt_ctx_v->streams[videoStream]->codec, pFrame,
                         &dec_got_frame, dec_pkt);


                 //解码失败
                 if (ret < 0) {
                     av_frame_free(&pFrame);
                     av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                     continue;
                 }
                 if (dec_got_frame){

                     if(i==0){
                         firstPts = dec_pkt->pts;
                         i=1;
                     }

                     //AVFrame色彩空间转换
                     sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx_v->height, pFrameYUV->data, pFrameYUV->linesize);

                     enc_pkt.data = NULL;

                     enc_pkt.size = 0;

                     //设置enc_pkt默认值
                     av_init_packet(&enc_pkt);

                     //把YUV数据进行编码
                     ret = avcodec_encode_video2(pCodecCtx_v, &enc_pkt, pFrameYUV, &enc_got_frame);

                     if (enc_got_frame == 1){

                         enc_pkt.stream_index = video_st->index;

                         //计算PTS
                         //输出流time_base{1,1000}
                         AVRational time_base = ofmt_ctx->streams[msg->outStreams_v_index]->time_base;

                         AVRational r_framerate1 = ifmt_ctx_v->streams[videoStream]->r_frame_rate;//25
                         //输入流time_base
                         AVRational time_base_q = { 1, AV_TIME_BASE };

                         qDebug()<<av_q2d(r_framerate1)<<av_q2d(time_base);

                         //Duration between 2 frames (us)
                         int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳


                         msg->cur_pts_v = frame_index*(1/av_q2d(r_framerate1));

                         //Parameters
//                       enc_pkt.pts = (double)(frame_index*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
//                         enc_pkt.pts = av_rescale_q(frame_index*calc_duration, time_base_q, time_base);
                         enc_pkt.pts = av_rescale_q(dec_pkt->pts-firstPts, time_base_q, time_base)/10;
                         enc_pkt.dts = enc_pkt.pts;

                         enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));

                         enc_pkt.pos = -1;
                         qDebug()<<"enc_pkt.pts:"<<enc_pkt.pts<<"calc_duration:"<<calc_duration;

                         //写入frame
                         ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);

                         av_free_packet(&enc_pkt);
                         frame_index++;
                     }
                 }

                 av_frame_free(&pFrame);
                 av_free_packet(dec_pkt);

            }

        }
        else{
            SDL_Delay(1);
            av_log(NULL, AV_LOG_DEBUG, "读取帧,队列为空:av_read_frame fail!!");
        }
    }

        packet_queue_deinit(&msg->q);

        av_write_trailer(ofmt_ctx);

        av_free(msg->out_buffer);

        av_free(pFrameYUV);

        avcodec_close(pCodecCtx_v);

        avformat_close_input(&ifmt_ctx_v);

        avformat_close_input(&ofmt_ctx);
}

//编解码音频
int decodeAndEncodeAudioThread(void *arg){
    int frame_index_a=0;
    Message *msg = (Message *)arg;
    AVPacket *dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
    AVPacket enc_pkt;
    int ret,dec_got_frame, enc_got_frame, videoStream, audioStream;
    ret = -1;
    dec_got_frame = -1;
    enc_got_frame = -1;
    videoStream = msg->videoStream;
    audioStream = msg->audioStream;
    AVFormatContext *ifmt_ctx_a = msg->ifmt_ctx_a;
    AVFormatContext *ofmt_ctx   = msg->ofmt_ctx;
    AVCodecContext *pCodecCtx_v,*pCodecCtx_a;//码流数据的解码方式相关数据
    pCodecCtx_v = msg->pCodecCtx_v;
    pCodecCtx_a = msg->pCodecCtx_a;
    AVStream *audio_st = msg->audio_st;
    AVFrame *pFrame, *pFrameYUV;
    pFrameYUV = msg->pFrameYUV;

    AVFrame *frame = av_frame_alloc();//输出音频frame
    AVFrame *aFrame = av_frame_alloc();//解码音频得到的frame
    //ONEFrameSize为一个输出frame的大小4096
    int ONEFrameSize = av_samples_get_buffer_size(NULL, pCodecCtx_a->channels,pCodecCtx_a->frame_size,pCodecCtx_a->sample_fmt, 1);
    uint8_t* frame_buf = (uint8_t *)av_malloc(ONEFrameSize);
    avcodec_fill_audio_frame(frame, pCodecCtx_a->channels, pCodecCtx_a->sample_fmt,(const uint8_t*)frame_buf, ONEFrameSize, 1);

    AVPacket pkt;
    av_new_packet(&pkt,ONEFrameSize);
    uint8_t * mAacBuffer = (uint8_t * )malloc(ONEFrameSize*100);
    int mAacBufferIndex = 0;
    int mAacBufferSize = 0;

    //计算PTS的单位(输出流)
    AVRational time_base = ofmt_ctx->streams[msg->outStreams_a_index]->time_base;//{1,1000};
    //输入流time_base
    AVRational time_base_q = {1, AV_TIME_BASE};

    //输入流时间基: frame间隔时长
    double calc_duration = (double)AV_TIME_BASE*(1/((double)pCodecCtx_a->sample_rate/1024));
    //输出流时间基: 摄像头和麦克风打开的时间差
    int a_v_duration = av_rescale_q(msg->a_v_duration* (double)AV_TIME_BASE ,time_base_q,time_base);

    //第一帧pts
    int64_t firstPts = -1;
    msg->cur_pts_a = 0;


    while(1){

        if(packet_queue_get(&msg->q_audio, dec_pkt, 1) >= 0){
            //            if (exit_thread)
            //                break;
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");

            pFrame = av_frame_alloc();//获取空间

            if (!pFrame) {
                ret = AVERROR(ENOMEM);
                return 0;
            }

            //av_packet_rescale_ts(dec_pkt, ifmt_ctx->streams[dec_pkt->stream_index]->time_base,
            //  ifmt_ctx->streams[dec_pkt->stream_index]->codec->time_base);
            //解码一帧视频数据。输入一个压缩编码的结构体AVPacket,输出一个解码后的结构体AVFrame。
            ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioStream]->codec, aFrame, &dec_got_frame, dec_pkt);

            //注意这里不是aFrame
            //frame的大小为编码格式大小,aac为1024
            frame->nb_samples = pCodecCtx_a->frame_size;
            frame->format = pCodecCtx_a->sample_fmt;

            //解码失败
            if (ret < 0) {
                av_frame_free(&pFrame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            //解码成功
            if (dec_got_frame){

                if(firstPts == -1){
                    firstPts = dec_pkt->pts;
                }


                int framSize = av_samples_get_buffer_size(NULL,pCodecCtx_a->channels, aFrame->nb_samples,
                                                          ifmt_ctx_a->streams[audioStream]->codec->sample_fmt, 1);

                //输入流: 剩余数据
                int64_t timeshift =  mAacBufferSize/4 * AV_TIME_BASE/pCodecCtx_a->sample_rate;

                //if( (dec_pkt.pts-firstPts) - 剩余时间差 > cur_pts_a)
                if( (dec_pkt->pts - firstPts - timeshift)/10 > msg->cur_pts_a/10){
                    msg->cur_pts_a = (dec_pkt->pts -firstPts -timeshift)/10;
                }
                qDebug()<<"dec_pkt->pts:"<<dec_pkt->pts
                            <<"firstPts:"<<firstPts<<"timeshift:"<<timeshift
                            <<"mAacBufferSize/4:"<<mAacBufferSize/4<<msg->cur_pts_a;



                uint8_t * audio_buf = (uint8_t *)malloc(framSize);

                //拷贝数据
                memcpy(audio_buf, aFrame->data[0], framSize);
                //存入数据
                inputPcmBuffer(msg,(uint8_t*)audio_buf,framSize);

                //取出1个frame数据(未分割)
                FrameDataNode node = mPcmBufferList.takeFirst();
                memcpy(mAacBuffer+mAacBufferSize,node.buffer,node.size);
                mAacBufferSize += node.size;
                free(node.buffer);


                /// 每次传递给编码器的数据大小都要是 上面获取到的 "ONEFrameSize"
                /// 因此需要下面的循环
                while(1)
                {

                    enc_pkt.data = NULL;
                    enc_pkt.size = 0;
                    //设置enc_pkt默认值
                    av_init_packet(&enc_pkt);

                    int size = mAacBufferSize - mAacBufferIndex;
                    if (size < ONEFrameSize) //不够编码1帧了
                    {
                        //buffer+int:缓存的开始坐标
                        memcpy(mAacBuffer,mAacBuffer+mAacBufferIndex,size);
                        mAacBufferIndex = 0;
                        mAacBufferSize = size;
                        break;
                    }

                    frame->data[0] = mAacBuffer+mAacBufferIndex;  //采样信号
                    mAacBufferIndex += ONEFrameSize;

                    int got_frame=0;

                    //需要自己计算pts
                    int ret = avcodec_encode_audio2(pCodecCtx_a, &enc_pkt,frame, &got_frame);
                    if (got_frame==1)
                    {


    enc_pkt.stream_index = audio_st->index;

    //以正常时间为单位: 当前时间
//    msg->cur_pts_a = frame_index_a*(1/((double)pCodecCtx_a->sample_rate/frame->nb_samples))+ msg->a_v_duration;
    //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
    //输出流时间基: frame间隔时长
//    enc_pkt.duration = av_rescale_q(calc_duration,time_base_q,time_base);
    enc_pkt.pts = av_rescale_q(msg->cur_pts_a,time_base_q,time_base) + a_v_duration ;

    enc_pkt.dts = enc_pkt.pts;
    enc_pkt.pos = -1;

    msg->cur_pts_a += calc_duration;


    qDebug()<<"cur_pts_a:"<<enc_pkt.pts<<"calc_duration:"<<calc_duration<<"msg->cur_pts_a"<<msg->cur_pts_a;
    qDebug()<<frame_index_a;
    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);

    av_free_packet(&enc_pkt);
    frame_index_a++;

                    }
                }

            }

            av_free_packet(dec_pkt);
            av_frame_free(&pFrame);
        }
    }

}


int getVideoThread(void *arg){

    Message *msg = (Message *)arg;
    AVFormatContext *ifmt_ctx_v = msg->ifmt_ctx_v;
    AVPacket *dec_pkt;//解码前的数据
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));


    while(1){

        if(av_read_frame(ifmt_ctx_v, dec_pkt) >= 0){
            //设置Packet所在的流
    //        dec_pkt->stream_index = video_st->index;
            qDebug()<<"read video_pts:"<<dec_pkt->pts;
            //入队
            packet_queue_put(&msg->q,dec_pkt);


        }
        else SDL_Delay(2);
    }


}


int getAudioThread(void *arg){
    Message *msg = (Message *)arg;
    AVFormatContext *ifmt_ctx_a = msg->ifmt_ctx_a;
    AVPacket *dec_pkt;//解码前的数据
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

    while(1){

        if(av_read_frame(ifmt_ctx_a, dec_pkt) >= 0){
            packet_queue_put(&msg->q_audio,dec_pkt);

        }
        else SDL_Delay(2);
    }

}


#undef main
int main()
{
    Message message;
    memset(&message,0,sizeof(Message));
    Message *msg = &message;

    AVFormatContext *ifmt_ctx_v,*ifmt_ctx_a,*ofmt_ctx;//输入输出上下文

    AVCodecContext *pCodecCtx_v,*pCodecCtx_a;//码流数据的解码方式相关数据。

    AVCodec *pCodec_v,*pCodec_a;//AVCodecContext中所包含的解码器信息。

    AVFrame  *pFrameYUV;//存储压缩编码数据相关信息的结构体

    AVPacket *dec_pkt;//解码前的数据

    uint8_t *out_buffer;

    AVInputFormat *ifmt_v,*ifmt_a;

    static struct SwsContext *img_convert_ctx;

    int videoStream, audioSteam, i;

    const char *out_path;

    //文件地址
    out_path="test.flv";
    //out_path="rtmp://39.105.25.195:1935/live/test";

    av_register_all(); //初始化FFMPEG
    avdevice_register_all();//初始化设备(获取摄像头画面之前必须要做的)
    avformat_network_init();//初始化网络组件

    ifmt_ctx_v = NULL;
    ifmt_ctx_a = NULL;

    //打开摄像头
    ifmt_v=av_find_input_format("dshow");
    AVDictionary *options = 0;
    //设置摄像头帧率
    av_dict_set(&options, "framerate", "25", 0);
    avformat_open_input(&ifmt_ctx_v,"video=HP Wide Vision HD Camera",ifmt_v,&options) ;
    int64_t start_time=av_gettime();
    //打开麦克风
    ifmt_a=av_find_input_format("dshow");
    avformat_open_input(&ifmt_ctx_a,"audio=麦克风阵列 (Realtek(R) Audio)",ifmt_a,NULL);
    int64_t end_time=av_gettime();

    qDebug()<<end_time - start_time;


    //获取视频信息
    if (avformat_find_stream_info(ifmt_ctx_v, NULL) < 0) {
        printf("Could't find videoStream infomation.\n");
        return 0;
    }
    //获取音频信息
    if(avformat_find_stream_info(ifmt_ctx_a,NULL)<0){
        printf("Could't find audioStream infomation.\n");
    }

    videoStream = -1;
    audioSteam = -1;

    ///循环查找视频中包含的流信息,找到视频,音频类型的流
    ///便将其记录下来 保存到videoStream和audioStream变量中
    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            qDebug()<<videoStream;
        }
    }
    for(i=0; i<ifmt_ctx_a->nb_streams; i++){
        if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
            audioSteam = i;
            qDebug()<<audioSteam;
        }
    }
    ///如果videoStream为-1 说明没有找到视频流
    if (videoStream == -1) {
        printf("Didn't find a video stream.\n");
        return 0;
    }
    else
    {
        printf("video stream OK\n");
    }
    ///同上
    if(audioSteam == -1){
        printf("Didn't find a audio stream.\n");
        return 0;
    }
    else
    {
        printf("audio stream OK\n");
    }
    //查看帧率
    qDebug()<<ifmt_ctx_v->streams[videoStream]->r_frame_rate.num<<ifmt_ctx_v->streams[videoStream]->r_frame_rate.den;



    //打开解码器
    if (avcodec_open2(ifmt_ctx_v->streams[videoStream]->codec,
                      avcodec_find_decoder(ifmt_ctx_v->streams[videoStream]->codec->codec_id), NULL)<0)
    {
        printf("Could not open codec.(无法打开video解码器)\n");
        return 0;
    }
    if(avcodec_open2(ifmt_ctx_a->streams[audioSteam]->codec,
                     avcodec_find_decoder(ifmt_ctx_a->streams[audioSteam]->codec->codec_id),NULL))
    {
        printf("Could not open codec.(无法打开audio解码器)\n");
        return 0;
    }


    //初始化一个用于输出的AVFormatContext结构体ofmt_ctx
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv" , out_path);

    //查找编码器
    pCodec_v = avcodec_find_encoder(AV_CODEC_ID_H264);
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);


    if (!pCodec_v){
        printf("Can not find encoder! (没有找到合适的video编码器!)\n");
        return 0;
    }
    else
    {
        printf("find encoder OK\n");
    }
    if(!pCodec_a){
        printf("Can not find encoder! (没有找到合适的audio编码器!)\n");
    }
    else
    {
        printf("find encoder OK\n");
    }

    //初始化编解码器上下文信息
    pCodecCtx_v=avcodec_alloc_context3(pCodec_v);
    pCodecCtx_a=avcodec_alloc_context3(pCodec_a);

    //设置解码器上下文信息
    pCodecCtx_v->pix_fmt = AV_PIX_FMT_YUV420P;

    pCodecCtx_v->width = ifmt_ctx_v->streams[videoStream]->codec->width;

    pCodecCtx_v->height = ifmt_ctx_v->streams[videoStream]->codec->height;

    pCodecCtx_v->time_base.num = 1;
    pCodecCtx_v->time_base.den = 25;

    pCodecCtx_v->bit_rate = 400000;

    pCodecCtx_v->gop_size = 250;

    /* Some formats,for example,flv, want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER){
        pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
        pCodecCtx_v->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    //H264 codec param
//    pCodecCtx_v->me_range = 16;
//    pCodecCtx_v->max_qdiff = 4;
//    pCodecCtx_v->qcompress = 0.6;
    pCodecCtx_v->qmin = 10;
    pCodecCtx_v->qmax = 51;
    //Optional Param
    pCodecCtx_v->max_b_frames = 3;
    // Set H264 preset and tune
    AVDictionary *param = 0;
    //编码形式修改
    av_dict_set(&param, "preset", "fast", 0);
    //实时编码
    av_dict_set(&param, "tune", "zerolatency", 0);
    //采集缓存
    av_dict_set_int(&param, "rtbufsize", 3041280 * 100, 0);


    pCodecCtx_a->codec = pCodec_a;
    pCodecCtx_a->codec_id = AV_CODEC_ID_AAC;
    pCodecCtx_a->time_base.den = 1000;
    pCodecCtx_a->time_base.num = 1;
    pCodecCtx_a->bit_rate = 64000;
    pCodecCtx_a->sample_fmt =  AV_SAMPLE_FMT_S16;
    /* select other audio parameters supported by the encoder */
    pCodecCtx_a->sample_rate    = select_sample_rate(pCodec_a);

    pCodecCtx_a->channel_layout = AV_CH_LAYOUT_STEREO;
    pCodecCtx_a->channels       = av_get_channel_layout_nb_channels(pCodecCtx_a->channel_layout);


    qDebug()<<"sample_rate::"<<pCodecCtx_a->sample_rate;

    //打开编码器
    if (avcodec_open2(pCodecCtx_v, pCodec_v,&param) < 0){
        printf("Failed to open encoder! (video编码器打开失败!)\n");
        return 0;
    }
    else
    {
        printf("open encoder OK\n");
    }
    if(avcodec_open2(pCodecCtx_a,pCodec_a,NULL) < 0){
        printf("Failed to open encoder! (audio编码器打开失败!)\n");
        return 0;
    }
    else{
        printf("open encoder OK\n");
    }

    //根据编码器创建输出流
    AVStream *video_st = avformat_new_stream(ofmt_ctx, pCodec_v);
    AVStream *audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);

    //音频,视频流在输出流的下标
//    qDebug()<<video_st->index<<audio_st->index;

    if (video_st == NULL){
        return 0;
    }

    video_st->time_base.den = 25;
    video_st->time_base.num = 1;
    video_st->codec = pCodecCtx_v;

    audio_st->time_base.den = 1000;
    audio_st->time_base.num = 1;
    audio_st->codec = pCodecCtx_a;


    //打开输出文件,out_path是推流地址
    if (avio_open(&ofmt_ctx->pb,out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return 0;
    }
    else
    {
        printf("open output file OK\n");
    }

    //打印输出流信息
    av_dump_format(ofmt_ctx, 0, out_path, 1);

    //设置帧率(因为新版的ffmpeg在写头文件时,会将ofmt_ctx的time_base改写)
    AVDictionary *opt = 0;
    av_dict_set_int(&opt, "video_track_timescale", 25, 0);
    //连接超时
    //av_dict_set(&opt, "stimeout", std::to_string(2 * 1000000).c_str(), 0);
    //连接协议,默认udp
    //av_dict_set(&opt, "rtsp_transport", "tcp", 0);
    //写头文件
    avformat_write_header(ofmt_ctx,&opt);

    //转YUV数据用
    pFrameYUV = av_frame_alloc();

    //camera data has a pix fmt of RGB,convert it to YUV420
    img_convert_ctx = sws_getContext(ifmt_ctx_v->streams[videoStream]->codec->width, ifmt_ctx_v->streams[videoStream]->codec->height,

                                     ifmt_ctx_v->streams[videoStream]->codec->pix_fmt, pCodecCtx_v->width, pCodecCtx_v->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);



    out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUVJ420P, pCodecCtx_v->width, pCodecCtx_v->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx_v->width, pCodecCtx_v->height);



    //////////////////////////////////////////////////////////////////////////////
    /// ////////             开始读取数据        //////////////////////////////////
    printf("Begin\n");


    //初始化队列,用于储存获取的packet
    packet_queue_init(&msg->q);
    packet_queue_init(&msg->q_audio);
    //初始化队列,用于储存解码后的frame
//    frame_queue_init(&msg->frameq);
//    frame_queue_init(&msg->frameq_audio);


    msg->a_v_duration = (double)(end_time - start_time)/1000000;
    msg->audioStream = audioSteam;
    msg->outStreams_a_index = audio_st->index;
    msg->audio_st = audio_st;
    msg->ifmt_ctx_a = ifmt_ctx_a;
    msg->ifmt_ctx_v = ifmt_ctx_v;
    msg->img_convert_ctx = img_convert_ctx;
    msg->ofmt_ctx = ofmt_ctx;
    msg->pCodecCtx_a = pCodecCtx_a;
    msg->pCodecCtx_v = pCodecCtx_v;
    msg->pFrameYUV = pFrameYUV;
    msg->videoStream = videoStream;
    msg->outStreams_v_index = video_st->index;
    msg->video_st = video_st;
    msg->out_buffer = out_buffer;

    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

//    SDL_CreateThread(getVideoThread, "getVideoThread", msg);
    SDL_CreateThread(getAudioThread, "getAudioThread", msg);
    SDL_CreateThread(decodeAndEncodeThread, "decodeAndEncodeThread", msg);
    SDL_CreateThread(decodeAndEncodeAudioThread,"decodeAndEncodeAudioThread", msg);



    while(1){

        if(av_read_frame(ifmt_ctx_v, dec_pkt) >= 0){
            //入队
            packet_queue_put(&msg->q,dec_pkt);
        }

    }

    return 0;
}