您的当前位置:首页iOS语音对讲(三)FFmpeg实时解码AAC并播放PCM

iOS语音对讲(三)FFmpeg实时解码AAC并播放PCM

2024-12-14 来源:哗拓教育

具体过程如下:

1.解码

初始化解码器

- (BOOL)initAACDecoderWithSampleRate:(int)sampleRate channel:(int)channel bit:(int)bit {
    av_register_all();
    avformat_network_init();
    self.aacCodec = avcodec_find_decoder(AV_CODEC_ID_AAC);
    av_init_packet(&_aacPacket);
    if (self.aacCodec != nil) {
        self.aacCodecCtx = avcodec_alloc_context3(self.aacCodec);
        // 初始化codecCtx
        self.aacCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
        self.aacCodecCtx->sample_rate = sampleRate;
        self.aacCodecCtx->channels = channel;
        self.aacCodecCtx->bit_rate = bit;
        self.aacCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
        // 打开codec
        if (avcodec_open2(self.aacCodecCtx, self.aacCodec, NULL) >= 0) {
            self.aacFrame = av_frame_alloc();
        }
    }
    return (BOOL)self.aacFrame;
}

解码AAC,block中返回解码后的PCM

- (void)AACDecoderWithMediaData:(NSData *)mediaData sampleRate:(int)sampleRate completion:(void (^)(uint8_t *, size_t))completion {
    _aacPacket.data = (uint8_t *)mediaData.bytes;
    _aacPacket.size = (int)mediaData.length;
    if (!self.aacCodecCtx) {
        return;
    }
    if (&_aacPacket) {
        avcodec_send_packet(self.aacCodecCtx, &_aacPacket);
        int result = avcodec_receive_frame(self.aacCodecCtx, self.aacFrame);
        //如果FFmpeg版本过旧,请使用avcodec_decode_audio4进行解码
        /*int gotframe = 0;
        int result = avcodec_decode_audio4(self.aacCodecCtx,
                                           self.aacFrame,
                                           &gotframe,
                                           &_aacPacket);*/
        if (result == 0) {
            struct SwrContext *au_convert_ctx = swr_alloc();
            au_convert_ctx = swr_alloc_set_opts(au_convert_ctx,
                                                AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, sampleRate,
                                                self.aacCodecCtx->channel_layout, self.aacCodecCtx->sample_fmt, self.aacCodecCtx->sample_rate,
                                                0, NULL);
            swr_init(au_convert_ctx);
            int out_linesize;
            int out_buffer_size = av_samples_get_buffer_size(&out_linesize, self.aacCodecCtx->channels,self.aacCodecCtx->frame_size,self.aacCodecCtx->sample_fmt, 1);
            uint8_t *out_buffer = (uint8_t *)av_malloc(out_buffer_size);
            // 转换
            swr_convert(au_convert_ctx, &out_buffer, out_linesize, (const uint8_t **)self.aacFrame->data , self.aacFrame->nb_samples);
            swr_free(&au_convert_ctx);
            au_convert_ctx = NULL;
            if (completion) {
                completion(out_buffer, out_linesize);
            }
            av_free(out_buffer);
        }
    }
}

释放解码器

- (void)releaseAACDecoder {
    if(self.aacCodecCtx) {
        avcodec_close(self.aacCodecCtx);
        avcodec_free_context(&_aacCodecCtx);
        self.aacCodecCtx = NULL;
    }
    if(self.aacFrame) {
        av_frame_free(&_aacFrame);
        self.aacFrame = NULL;
    }
}

2.播放

播放PCM使用AudioQueue,具体流程:

Audio Queue Process

通过上图可以得知,Audio Queue的播放流程即是一个生产者与消费者的模式:
创建多个Buffer容器,依次填充(生产)Buffer后插入队列中,开始播放(消费),然后通过回调将消费过的Buffer reuse,循环整个过程。

创建Buffer和Queue,设置参数并开始执行队列

- (instancetype)init
{
    self = [super init];
    if (self) {
        str = [NSMutableString string];
        sysnLock = [[NSLock alloc] init];
        // 播放PCM使用
        if (_audioDescription.mSampleRate <= 0) {
            //设置音频参数
            _audioDescription.mSampleRate = 32000.0;//采样率
            _audioDescription.mFormatID = kAudioFormatLinearPCM;
            // 下面这个是保存音频数据的方式的说明,如可以根据大端字节序或小端字节序,浮点数或整数以及不同体位去保存数据
            _audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
            //1单声道 2双声道
            _audioDescription.mChannelsPerFrame = 1;
            //每一个packet一帧数据,每个数据包下的帧数,即每个数据包里面有多少帧
            _audioDescription.mFramesPerPacket = 1;
            //每个采样点16bit量化 语音每采样点占用位数
            _audioDescription.mBitsPerChannel = 16;
            _audioDescription.mBytesPerFrame = (_audioDescription.mBitsPerChannel / 8) * _audioDescription.mChannelsPerFrame;
            //每个数据包的bytes总数,每帧的bytes数*每个数据包的帧数
            _audioDescription.mBytesPerPacket = _audioDescription.mBytesPerFrame * _audioDescription.mFramesPerPacket;
        }
        // 使用player的内部线程播放 新建输出
        AudioQueueNewOutput(&_audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, 0, 0, &audioQueue);
        // 设置音量
        AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, 1.0);
        // 初始化需要的缓冲区
        for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
            audioQueueBufferUsed[i] = false;
            osState = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]);
            NSLog(@"AudioQueueAllocateBuffer, osState=%d", osState);
        }
        osState = AudioQueueStart(audioQueue, NULL);
        if (osState != noErr) {
            NSLog(@"AudioQueueStart Error");
        }
    }
    return self;
}

填充Buffer

// 填充buffer
- (void)playWithData:(NSData *)data {
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
        [self->sysnLock lock];
        self->tempData = [NSMutableData new];
        [self->tempData appendData:data];
        NSUInteger len = self->tempData.length;
        Byte *bytes = (Byte *)malloc(len);
        [self->tempData getBytes:bytes length:len];
        int i = 0;
        //判断buffer是否被使用
        while (true) {
            usleep(1000);//防止cpu过高
            if (!self->audioQueueBufferUsed[i]) {
                self->audioQueueBufferUsed[i] = true;
                break;
            }else {
                i++;
                if (i >= QUEUE_BUFFER_SIZE) {
                    i = 0;
                }
            }
        }
        if (self->str.length < 3) {
            [self->str appendString:[NSString stringWithFormat:@"%d",i]];
        }
        else if (self->str.length == 3) {
            [self->str deleteCharactersInRange:NSMakeRange(0, 1)];
            [self->str appendString:[NSString stringWithFormat:@"%d",i]];
        }
        if ([self->str isEqualToString:@"000"]) {
            //reset
            [self resetPlay];
        }
        //向buffer填充数据
        self->audioQueueBuffers[i]->mAudioDataByteSize = (unsigned int)len;
        memcpy(self->audioQueueBuffers[i]->mAudioData, bytes, len);
        free(bytes);
        //将buffer插入队列
        AudioQueueEnqueueBuffer(self->audioQueue, self->audioQueueBuffers[i], 0, NULL);
        [self->sysnLock unlock];
    });
}

在回调中将容器状态设置为空,用于循环复用

// 回调
static void AudioPlayerAQInputCallback(void* inUserData,AudioQueueRef audioQueueRef, AudioQueueBufferRef audioQueueBufferRef) {
    PCMPlayer *player = (__bridge PCMPlayer*)inUserData;
    [player resetBufferState:audioQueueRef and:audioQueueBufferRef];
}

- (void)resetBufferState:(AudioQueueRef)audioQueueRef and:(AudioQueueBufferRef)audioQueueBufferRef {
    for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
        // 将这个buffer设为未使用
        if (audioQueueBufferRef == audioQueueBuffers[i]) {
            audioQueueBufferUsed[i] = false;
        }
    }
}

- (void)dealloc {
    if (audioQueue != nil) {
        AudioQueueStop(audioQueue,true);
    }
    audioQueue = nil;
    sysnLock = nil;
}
PS:Audio Queue在播放过程中可能遇到播放一会儿过后音频开始卡顿然后音频逐渐消失的问题,作者在代码中的解决方法是reset:
- (void)resetPlay {
    if (audioQueue != nil) {
        AudioQueueReset(audioQueue);
    }
}

以上,则完成了实时解码并播放的整个流程。本文应用场景基于监控摄像头与手机客户端的双向实时语音对讲。


显示全文