今天自己总结一下工程里的实现一个在线的音频播放器。。。

第一步呢。。先初始化AudioSession。为了实现中断的控制和后台,锁屏的播放状态

- (id)initsession {   

    AudioSessionInitialize (NULL, NULL, interruptionListenerCallback, self);

    UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback;//采用后台和锁屏时仍继续播放的模式;

    AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, sizeof (sessionCategory), &sessionCategory);

    UInt32 doSetProperty = 0;

    AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryMixWithOthers,sizeof (doSetProperty),&doSetProperty);

    AudioSessionSetActive (true);//激活AudioSession

    return self;

}

void interruptionListenerCallback (void    *inUserData, UInt32 interruptionState) {

    AudioPlayer *player = (AudioPlayer*)inUserData;

    if (interruptionState == kAudioSessionBeginInterruption) {

//使音乐暂停

        AudioWrapper * ad = [AudioWrapper sharedAudioWrapperElement];

        [ad pause];

    }

    else if (interruptionState == kAudioSessionEndInterruption ) {

        OSStatus rc = AudioSessionSetActive(true);

        if (rc) {

            NSLog(@"AudioSessionSetActive(true) returned %d", rc);

        }

//使音乐恢复播放

        AudioWrapper * ad = [AudioWrapper sharedAudioWrapperElement];

        [ad resume];

    }

}

第二步呢。。。为了接受在线过来的数据要使用媒体流功能,设置,最后解析NSData成为音频Buffer数据。。

先打开流媒体功能:

        error = AudioFileStreamOpen(

                                    (void*)self,                         

                                    &AudioFileStreamPropertyListenerProc, //监听设置变化

                                    &AudioFileStreamPacketsProc,     //数据解析后的接收函数   

                                    0,                                   

                                    &audio_file_stream);//媒体流的ID

void AudioFileStreamPropertyListenerProc(

   void *inClientData,

   AudioFileStreamID inAudioFileStream,

   AudioFileStreamPropertyID inPropertyID,

   UInt32 *ioFlags)

{

#ifdef KXT_DEBUG

    [ZPub LogTalk:@"AudioFileStreamPropertyListenerProc"];

#endif

    ;

}



void AudioFileStreamPacketsProc(

   void *inClientData,

   UInt32 inNumberBytes,

   UInt32 inNumberPackets,

   const void *inInputData,

   AudioStreamPacketDescription *inPacketDescriptions) {

    [(AudioPlayer*)inClientData newAudioData:inInputData bytes:inNumberBytes packets:inNumberPackets packetDescriptions:inPacketDescriptions];//自己开始整理解析后的数据

}

然后再把NSData传给×××解析:

    error = AudioFileStreamParseBytes(

                                      audio_file_stream,//在这时,媒体流文件ID被生成

                                      datalen,

                                      [data bytes],

                                      kAudioFileStreamProperty_FileFormat);

这样解码后的数据自动会被发送到AudioFileStreamPacketsProc接口里。。

第三步;准备Buffer数据(mAudioData(所有的Data),mAudioDataByteSize数据的大小,num_packets多少包,packet_descriptions包的描述)

- (id)initWithData:(NSData*)_data packets:(UInt32)_num_packets packetDescriptions:(const AudioStreamPacketDescription*)_packet_descriptions {

    if(!_data)

        return nil;

    if(!_packet_descriptions)

        return nil;

    self = [super init];

    if(!self)

        return nil;

    self.data = _data;

    num_packets = _num_packets;

    packet_descriptions = malloc(num_packets * sizeof(AudioStreamPacketDescription));

    memcpy((void*)packet_descriptions, (const void*)_packet_descriptions, num_packets * sizeof(AudioStreamPacketDescription));

    return self;

}

第四步如果没有播放队列就创建播放队列。。。

  UInt32 formatListSize;

        Boolean b;//拿媒体流文件ID申请它的音乐格式结构信息

        AudioFileStreamGetPropertyInfo(audio_file_stream,

                                       kAudioFileStreamProperty_FormatList,

                                       &formatListSize,

                                       &b);


        // now get the format data得到音乐格式信息

        NSMutableData *listData = [NSMutableData dataWithLength:formatListSize];

        OSStatus status = AudioFileStreamGetProperty(audio_file_stream,

                                                     kAudioFileStreamProperty_FormatList,

                                                     &formatListSize,

                                                     [listData mutableBytes]);

        AudioFormatListItem *formatList = [listData mutableBytes];


        // The formatList property isn't always supported, so an error isn't unexpected here.

        // Therefore, we won't call VERIFY_STATUS on this status code.

        if (status == noErr) {

            // now see which format this device supports best

            UInt32 chosen;

            UInt32 chosenSize = sizeof(UInt32);

            int formatCount = formatListSize/sizeof(AudioFormatListItem);

            status = AudioFormatGetProperty ('fpfl',

                                             formatListSize,

                                             formatList,

                                             &chosenSize,

                                             &chosen);

            if (status == noErr) {

                audio_format = formatList[chosen].mASBD;//获取到具体的音乐格式(用于创建更好的播放队列)



            } else {

                // the docs tell us to grab the last in the list because it's most compatible

                audio_format = formatList[formatCount - 1].mASBD;

            }

        } else {

            // fall back to the stream's DataFormat

            UInt32 audio_format_size = sizeof(AudioStreamBasicDescription);

            error = AudioFileStreamGetProperty(audio_file_stream,

                                                        kAudioFileStreamProperty_DataFormat,

                                                        &audio_format_size,

                                                        &audio_format);


            if(error){

                [buffer release];

                return;

            }

        }



        Float32 outValue ;

        AudioQueueGetParameter(audio_queue,kAudioQueueDeviceProperty_NumberChannels,&outValue) ;

        if(![self createAudioQueue]){

            [buffer release];

            return;

        }

    }

- (BOOL)createAudioQueue {

    error = AudioQueueNewOutput(&audio_format, &MyAudioQueueOutputCallback, (void*)self, [[NSRunLoop currentRunLoop] getCFRunLoop], kCFRunLoopCommonModes, 0, &audio_queue);

}

最后。将准备好的buffer放到已创建的播放队列里就播放了。

    [buffer enqueueOnQueue:audio_queue];


- (void)enqueueOnQueue:(AudioQueueRef)queue {


    AudioQueueBufferRef buffer;

    error = AudioQueueAllocateBuffer(queue, [data length], &buffer);

    if(error) {

        [super dealloc];

        return;

    }

    buffer->mAudioDataByteSize = [data length];

    memcpy((char*)buffer->mAudioData, [data bytes], [data length]);

    error = AudioQueueEnqueueBuffer(queue, buffer, num_packets, packet_descriptions);

    if(error) {

        [ZPub LogTalk:[NSString stringWithFormat:@"AudioQueueEnqueueBuffer failed with %d", (int)error]];

        return;

    }

}