1.界面布局,以及相关功能






点击中间开始录音,点击左上角播放或暂停播放,点击右上角移除文件

2.定义相关属性

#import "SendVoiceController.h"
#import#import "RecordVoiceView.h"
#import "lame.h"
#import "PlayVoiceView.h"
#define cafFilePathName @"myRecordForCaf.caf"
#define mp3FilePathName @"myRecordForMp3.mp3"
@interface SendVoiceController ()//录音存储路径
@property (nonatomic, strong)NSURL *tmpFile;
//录音
@property (nonatomic, strong)AVAudioRecorder *recorder;
//播放
@property (nonatomic, strong)AVAudioPlayer *player;
//录音动画
@property(nonatomic,strong)RecordVoiceView *recordview;
@property(strong,nonatomic)PlayVoiceView *playview;
//录音计时器
@property(nonatomic,strong)NSTimer *recordTimer;
//录音秒数
@property(strong,nonatomic)NSTimer *timer;
@property(nonatomic,assign)int index;
@property(nonatomic,assign)int duration;
@end
@implementation SendVoiceController
3.创建audiosession
-(void)setAVRecorder{
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
urlStr=[urlStr stringByAppendingPathComponent:cafFilePathName];
self.tmpFile = [NSURL fileURLWithPath:urlStr];
//设置后台播放
AVAudioSession *session = [AVAudioSession sharedInstance];
NSError *sessionError;
//AVAudioSessionCategoryPlayAndRecord,这个确保了既能录音也能播放
[session setCategory:AVAudioSessionCategoryPlayAndRecord error:&sessionError];
//判断后台有没有播放
if (session == nil) {
NSLog(@"Error creating sessing:%@", [sessionError description]);
} else {
[session setActive:YES error:nil];
}
}

4.中间图片手势代理方法

//长按事件的实现方法
- (void) handleTableviewCellLongPressed:(UILongPressGestureRecognizer *)gestureRecognizer {
if (gestureRecognizer.state ==
UIGestureRecognizerStateBegan) {
NSLog(@"UIGestureRecognizerStateBegan");
[self recorderPressing];
}
if (gestureRecognizer.state ==
UIGestureRecognizerStateChanged) {
NSLog(@"UIGestureRecognizerStateChanged");
}
if (gestureRecognizer.state == UIGestureRecognizerStateEnded) {
NSLog(@"UIGestureRecognizerStateEnded");
[self recorderEnd];
}
}

5.创建AVAudioRecorder并开始录音

-(void)recorderPressing{
NSDictionary *settingdic = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithFloat:8000],                  AVSampleRateKey, // 电话所用采样率
[NSNumber numberWithInt:kAudioFormatLinearPCM],    AVFormatIDKey,
[NSNumber numberWithInt:2],                      AVNumberOfChannelsKey,
[NSNumber numberWithInt:16],                      AVLinearPCMBitDepthKey,
[NSNumber numberWithInt:AVAudioQualityMin],      AVEncoderAudioQualityKey,
nil];
//开始录音,将所获取到得录音存到文件里
//开始录音,将所获取到得录音存到文件里
self.recorder = [[AVAudioRecorder alloc] initWithURL:_tmpFile settings:settingdic error:nil];
self.recorder.meteringEnabled = YES;
self.recorder.delegate=self;
//准备记录录音
[self.recorder prepareToRecord];
//启动或者恢复记录的录音文件
[self.recorder record];
self.player = nil;
//以下是我仿照卫星发语音的动画效果
self.recordview.hidden=NO;
self.recordTimer=[NSTimer scheduledTimerWithTimeInterval:1 target:self selector:@selector(recordTimerChange) userInfo:nil repeats:YES];
}

6.中间图片长按取消的同时关闭录音

-(void)recorderEnd{
self.duration=self.index;
self.index=0;
//停止录音
if (self.recordTimer) {
[self.recordTimer invalidate];
self.recordTimer=nil;
}
[self.recorder stop];
self.recordview.hidden=YES;
self.recorder = nil;
self.playBtn.hidden=NO;
self.removeBtn.hidden=NO;
self.curveLineImage.hidden=NO;
NSError *playError;
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
urlStr=[urlStr stringByAppendingPathComponent:mp3FilePathName];
NSURL *url = [NSURL fileURLWithPath:urlStr];
self.player = [[AVAudioPlayer alloc] initWithContentsOfURL:url error:&playError];
//当播放录音为空, 打印错误信息
if (self.player == nil) {
NSLog(@"Error crenting player: %@", [playError description]);
}
self.player.delegate = self;
//    [self.player play];
}

7.与其同时触发avaudiorecord代理方法

这里面的主要作用是把录音的caf文件转成mp3文件,用于上传到服务器,其中的方法是来自于Libmo3lame库

#pragma mark---录音代理
- (void)audioRecorderDidFinishRecording:(AVAudioRecorder *)recorder successfully:(BOOL)flag{
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString *cafFilePath =[urlStr stringByAppendingPathComponent:cafFilePathName];    //caf文件路径
NSString *mp3FilePath = [urlStr stringByAppendingPathComponent:mp3FilePathName]; ;//存储mp3文件的路径
NSFileManager * fileManager=[ NSFileManager defaultManager ];
if ([fileManager fileExistsAtPath:cafFilePath]){
float msize= [[fileManager attributesOfItemAtPath:cafFilePath error:nil] fileSize]/(1024.0*1024.0);
NSLog(@"caf文件大小%lf",msize);
}else{
float msize= [[fileManager attributesOfItemAtPath:mp3FilePathName error:nil] fileSize]/(1024.0*1024.0);
NSLog(@"mp3文件大小%lf",msize);
}
if ([fileManager removeItemAtPath :mp3FilePath error : nil ])
{
NSLog ( @" 删除 " );
}
@try {
int read, write;
FILE *pcm = fopen ([cafFilePath cStringUsingEncoding : 1 ], "rb" );  //source 被 转换的音频文件位置
if (pcm == NULL )
{
NSLog ( @"file not found" );
}
else
{
fseek (pcm, 4 * 1024 , SEEK_CUR );                                  //skip file header
FILE *mp3 = fopen ([mp3FilePath cStringUsingEncoding : 1 ], "wb" );  //output 输出生成的 Mp3 文件位置
const int PCM_SIZE = 8192 ;
const int MP3_SIZE = 8192 ;
short int pcm_buffer[PCM_SIZE* 2 ];
unsigned char mp3_buffer[MP3_SIZE];
lame_t lame = lame_init ();
lame_set_num_channels (lame, 2 ); // 设置 1 为单通道,默认为 2 双通道
lame_set_in_samplerate (lame,  8000.0 ); //11025.0
//lame_set_VBR(lame, vbr_default);
lame_set_brate (lame, 16);
lame_set_mode (lame, 3 );
lame_set_quality (lame, 2 ); /* 2=high 5 = medium 7=low 音 质 */
lame_init_params (lame);
do {
read = fread (pcm_buffer, 2 * sizeof ( short int ), PCM_SIZE, pcm);
if (read == 0 )
write = lame_encode_flush (lame, mp3_buffer, MP3_SIZE);
else
write = lame_encode_buffer_interleaved (lame, pcm_buffer, read, mp3_buffer, MP3_SIZE);
fwrite (mp3_buffer, write, 1 , mp3);
} while (read != 0 );
lame_close (lame);
fclose (mp3);
fclose (pcm);
return  ;
}
return  ;
}
@catch (NSException *exception) {
NSLog ( @"%@" ,[exception description ]);
return  ;
}
@finally {
NSLog ( @" 执行完成 " );
}
}

8.左上角按钮事件

-(IBAction)playerClick:(id)sender{
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString * path=[urlStr stringByAppendingPathComponent:cafFilePathName];
NSFileManager* manager = [NSFileManager defaultManager];
if ([manager fileExistsAtPath:path]){
float msize= [[manager attributesOfItemAtPath:path error:nil] fileSize]/(1024.0*1024.0);
NSLog(@"文件大小%lf",msize);
}
else{
_player=nil;
}
//判断是否正在播放,如果正在播放
if ([self.player isPlaying]) {
//暂停播放
[_player pause];
[self.playBtn  setImage:[UIImage imageNamed:@"voice_play"] forState:UIControlStateNormal];
//按钮显示为播放
} else {
//开始播放
[_player play];
[self.playBtn  setImage:[UIImage imageNamed:@"voice_pause"] forState:UIControlStateNormal];
}
}

9.右上角按钮事件

-(IBAction)removeClick:(id)sender{
NSIndexPath *indexpath=[NSIndexPath indexPathForItem:6 inSection:0];
self.playBtn.hidden=YES;
self.removeBtn.hidden=YES;
self.curveLineImage.hidden=YES;
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString *cafFilePath =[urlStr stringByAppendingPathComponent:cafFilePathName];    //caf文件路径
NSString *mp3FilePath = [urlStr stringByAppendingPathComponent:mp3FilePathName]; ;//存储mp3文件的路径
NSFileManager * fileManager=[ NSFileManager defaultManager ];
if ([fileManager removeItemAtPath :mp3FilePath error : nil ])
{
NSLog ( @" 删除 mp3" );
}
if ([fileManager removeItemAtPath :cafFilePath error : nil ])
{
NSLog ( @" 删除 caf" );
}
}

10.上传到服务器

主要的逻辑是把MP3文件转成nsdata,然后通过base64string 转成字符串传给后台

NSString *url=[[HJInterfaceManager sharedInstance]addRichInfo];
NSMutableDictionary *mdic=[[NSMutableDictionary alloc]initWithCapacity:0];
NSString *urlStr=[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString *mp3FilePath = [urlStr stringByAppendingPathComponent:mp3FilePathName]; ;//
//    NSString *imagePath = [[NSBundle mainBundle] pathForResource:@"1" ofType:@"png"];
NSData *filedata=[NSData dataWithContentsOfFile:mp3FilePath];
SelfDataModel *selfModel=[SelfDataModel returnModelBySelectFMDB];
//        * @param familyId
//        * @param userId
//        * @param content
//        * @param richContent
//        * @param type
//        * @param sendTime
NSDate *date=[NSDate date];
NSDateFormatter *form=[[NSDateFormatter alloc]init];
[form setDateFormat:@"YYYY-MM-dd HH:mm:ss"];
NSString *datestr=[form stringFromDate:date ];
if (!self.isMyself) {
[mdic setObject:self.family.idNum forKey:@"familyId"];
}
[mdic setObject:selfModel.idNum forKey:@"userId"];
[mdic setObject:@"" forKey:@"content"];
[mdic setObject:[NSString stringWithFormat:@"%d",self.duration] forKey:@"duration"];
[mdic setObject:[filedata base64Encoding] forKey:@"richContents"];
[mdic setObject:@"SOUND" forKey:@"type"];
[mdic setObject:datestr forKey:@"sendTime"];
[HJHttpManager PostRequestWithUrl:url param:mdic finish:^(NSData *data) {
NSDictionary *dic=(NSDictionary *)data;
if ([dic[@"status"] isEqualToString:@"S"]) {
[MBProgressHUD showSuccess:@"发送成功"];
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(2 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
[self.navigationController popViewControllerAnimated:YES];
});
}else{
[MBProgressHUD showError:dic[@"message"]];
}
} failed:^(NSError *error) {
NSLog(@"请求失败");
}];

小帖士:

以下是监听声音分贝的方法

#pragma mark----监听声音分贝
/* 该方法确实会随环境音量变化而变化,但具体分贝值是否准确暂时没有研究 */
- (void)recordTimerChange {
//    [self.recorder updateMeters];//刷新音量数据
//    double lowPassResults = pow(10, (0.05 * [self.recorder peakPowerForChannel:0]));
//    NSLog(@"大小:%lf",lowPassResults);
self.recordview.timeLab.text=[NSString stringWithFormat:@"%d s",self.index];
self.index++;
[self.recorder updateMeters];
float  level;                // The linear 0.0 .. 1.0 value we need.
float  minDecibels = -80.0f; // Or use -60dB, which I measured in a silent room.
float  decibels    = [self.recorder averagePowerForChannel:0];
if (decibels < minDecibels)
{
level = 0.0f;
}
else if (decibels >= 0.0f)
{
level = 1.0f;
}
else
{
float  root            = 2.0f;
float  minAmp          = powf(10.0f, 0.05f * minDecibels);
float  inverseAmpRange = 1.0f / (1.0f - minAmp);
float  amp            = powf(10.0f, 0.05f * decibels);
float  adjAmp          = (amp - minAmp) * inverseAmpRange;
level = powf(adjAmp, 1.0f / root);
}
float ben=level*120;
if (ben<20) {
[self.recordview.imageview setImage:[UIImage imageNamed:@"order_voice_1"]];
}
else if (ben<35) {
[self.recordview.imageview setImage:[UIImage imageNamed:@"order_voice_2"]];
}
else{
[self.recordview.imageview setImage:[UIImage imageNamed:@"order_voice_3"]];
}
NSLog(@"大小%lf",ben);
}