这一节是实践,学习android原生SimplePlayer并自己码一遍,果然学到很多。
MyPlayer.h
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/AString.h>
#include <utils/KeyedVector.h>
namespace android {
class IGraphicBufferProducer;
struct MediaCodec;
class MediaCodecBuffer;
struct NuMediaExtractor;
class Surface;
struct ABuffer;
class AudioTrack;
struct ALooper;
class MyPlayer : public AHandler {
public:
MyPlayer();
status_t setDataSource(const char* path);
status_t setSurface(const sp<IGraphicBufferProducer> &bufferProducer);
status_t prepare();
status_t start();
status_t stop();
protected:
virtual ~MyPlayer();
virtual void onMessageReceived(const sp<AMessage> &msg);
private:
enum {
kWhatSetDataSource = 0,
kWhatSetSurface,
kWhatPrepare,
kWhatStart,
kWhatStop,
kWhatDoMoreStuff,
};
enum State{
UNINITIALIZED,
UNPREPARED,
STOPPED,
STARTED,
};
struct BufferInfo{
size_t mIndex;
size_t mOffset;
size_t mSize;
int64_t mPresentationTimeUs;
uint32_t mFlags;
};
struct CodecState {
sp<MediaCodec> mCodec;
Vector<sp<MediaCodecBuffer>> mBuffers[2];
Vector<sp<ABuffer>> mCSD;
List<size_t> mAvailInputBufferIndices;
List<BufferInfo> mAvailOutputBufferInfos;
sp<AudioTrack> mAudioTrack;
uint32_t mNumFramesWritten;
};
status_t onPrepare();
status_t onStart();
status_t onStop();
status_t onDoMoreStuff();
status_t onOutputFormatChanged(CodecState *);
status_t renderAudio(CodecState* state, BufferInfo* info, const sp<MediaCodecBuffer> &buffer);
AString mPath;
State mState;
sp<Surface> mSurface;
sp<NuMediaExtractor> mExtractor;
sp<ALooper> mCodecLooper;
KeyedVector<size_t, CodecState> mStateByTrackIndex;
int32_t mDoMoreStuffGeneration;
int64_t mStartTimeRealUs;
};
}
MyPlayer.cpp
//define LOG_NDEBUG 0
#define LOG_TAG "MyPlayer"
#include "MyPlayer.h"
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ADebug.h>
#include <gui/Surface.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/NuMediaExtractor.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/MediaCodecBuffer.h>
#include <media/IMediaHTTPService.h>
#include <mediadrm/ICrypto.h>
#include <media/AudioTrack.h>
namespace android {
status_t PostAndAwaitResponse(const sp<AMessage> &msg, sp<AMessage> *response)
{
status_t err = msg->postAndAwaitResponse(response);
if(err != OK)
return err;
if(!(*response)->findInt32("err", &err))
{
err = OK;
}
return err;
}
MyPlayer::MyPlayer()
: mState(UNINITIALIZED),
mDoMoreStuffGeneration(0),
mStartTimeRealUs(-1ll)
{
}
MyPlayer::~MyPlayer()
{
}
status_t MyPlayer::setDataSource(const char* path)
{
sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
msg->setString("path", path);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
}
status_t MyPlayer::setSurface(const sp<IGraphicBufferProducer> &BufferProducer)
{
sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
sp<Surface> surface;
if(BufferProducer != NULL)
surface = new Surface(BufferProducer);
msg->setObject("surface", surface);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
}
status_t MyPlayer::prepare()
{
sp<AMessage> msg = new AMessage(kWhatPrepare, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
}
status_t MyPlayer::start()
{
sp<AMessage> msg = new AMessage(kWhatStart, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
}
status_t MyPlayer::stop()
{
sp<AMessage> msg = new AMessage(kWhatStop, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
}
void MyPlayer::onMessageReceived(const sp<AMessage> &msg)
{
switch(msg->what())
{
case kWhatSetDataSource:
{
status_t err;
if(mState != UNINITIALIZED)
err = INVALID_OPERATION;
else {
CHECK(msg->findString("path", &mPath));
mState = UNPREPARED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatSetSurface:
{
status_t err;
if(mState != UNPREPARED)
err = INVALID_OPERATION;
else{
sp<RefBase> obj;
CHECK(msg->findObject("surface", &obj));
mSurface = static_cast<Surface*>(obj.get());
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatPrepare:
{
status_t err;
if(mState != UNPREPARED)
err = INVALID_OPERATION;
else{
err = onPrepare();
if(err == OK)
mState = STOPPED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatStart:
{
status_t err = OK;
if(mState == UNPREPARED){
err = onPrepare();
if(err == OK)
mState = STOPPED;
}
if(err == OK)
{
if(mState != STOPPED)
err = INVALID_OPERATION;
else{
err = onStart();
if(err == OK)
mState = STARTED;
}
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatStop:
{
status_t err;
if(mState != STARTED)
err = INVALID_OPERATION;
else{
err = onStop();
if(err == OK)
mState = STOPPED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatDoMoreStuff:
{
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
if(generation != mDoMoreStuffGeneration)
break;
status_t err = onDoMoreStuff();
if(err == OK)
msg->post(10000ll);
break;
}
default:
break;
}
}
status_t MyPlayer::onPrepare()
{
CHECK_EQ(mState, UNPREPARED);
// 创建NuMediaExtractor
mExtractor = new NuMediaExtractor();
status_t err = mExtractor->setDataSource(NULL, mPath.c_str());
if(err != OK)
{
mExtractor.clear();
return err;
}
if(mCodecLooper == NULL)
{
mCodecLooper = new ALooper;
mCodecLooper->start();
}
bool haveAudio = false;
bool haveVideo = false;
for(size_t i = 0; i < mExtractor->countTracks(); i++)
{
sp<AMessage> format;
status_t err = mExtractor->getTrackFormat(i, &format);
CHECK_EQ(err, (status_t)OK);
AString mime;
CHECK(format->findString("mime", &mime));
bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
if(!haveAudio && !strncasecmp(mime.c_str(), "audio/", 6))
haveAudio = true;
else if(!haveVideo && isVideo)
haveVideo = true;
else
continue;
// 获取IMediaSource
err = mExtractor->selectTrack(i);
CHECK_EQ(err, (status_t)OK);
// 为audio 和 video track分别创建一个CodecState结构体,保存mediacodec相关内容
// 这里涉及到KeyedVector的用法,add方法的第一个参数为key,第二个参数为value,返回添加元素的索引
// editValueAt,查找指定索引对应的value
// keyAt,找到指定索引对应的key值
// editValueFor,查找指定key对应的value
CodecState* state = &mStateByTrackIndex.editValueAt(mStateByTrackIndex.add(i, CodecState()));
state->mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false);
state->mNumFramesWritten = 0; // 用于audio
CHECK(state->mCodec != NULL);
// configure mediacodec
err = state->mCodec->configure(format, isVideo?mSurface:NULL, NULL, 0);
CHECK_EQ(err, OK);
size_t j = 0;
sp<ABuffer> buffer;
// 检查mediaformat中是否有csd buffer,可以使用AStringPrintf做字符串拼接
while(format->findBuffer(AStringPrintf("csd-%d", j).c_str(), &buffer)){
state->mCSD.push_back(buffer);
j++;
}
}
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
ALOGD("mStateByTrackIndex[%d].key = %d", i, mStateByTrackIndex.keyAt(i));
}
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState* state = &mStateByTrackIndex.editValueAt(i);
// 先启动mediacodec,启动之后才会完成buffer的分配工作
status_t err = state->mCodec->start();
CHECK_EQ(err, (status_t)OK);
// 将两个track中mediacodec的input和output buffer都保存到CodecState中
err = state->mCodec->getInputBuffers(&state->mBuffers[0]);
CHECK_EQ(err, (status_t)OK);
err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
CHECK_EQ(err, (status_t)OK);
// 如果有csd buffer要先写给decoder
for(size_t j = 0; j < state->mCSD.size(); ++j)
{
const sp<ABuffer> &srcBuffer = state->mCSD.itemAt(j);
size_t index;
// 阻塞等待获取一个inputbuffer index
err = state->mCodec->dequeueInputBuffer(&index, -1ll);
CHECK_EQ(err, (status_t)OK);
const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
// 检查dstbuffer的空间是否足够
// ABuffer的size方法返回真实数据长度,capactiy返回最大容量,offset返回起始读取位置
CHECK_LE(srcBuffer->size(), dstBuffer->capacity());
dstBuffer->setRange(0, srcBuffer->size());
memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
// 将buffer送给decoder,由于是CSD buffer,所以flag需要添加BUFFER_FLAG_CODECCONFIG
err = state->mCodec->queueInputBuffer(index, 0, dstBuffer->size(), 0ll, MediaCodec::BUFFER_FLAG_CODECCONFIG);
CHECK_EQ(err, (status_t)OK);
}
}
return OK;
}
status_t MyPlayer::onStart()
{
CHECK_EQ(mState, STOPPED);
// start之后会一直发送kWhatDoMoreStuff消息,不断执行onDoMoreStuff方法
sp<AMessage> msg = new AMessage(kWhatDoMoreStuff, this);
msg->setInt32("generation", ++mDoMoreStuffGeneration);
msg->post();
return OK;
}
status_t MyPlayer::onStop()
{
return OK;
}
status_t MyPlayer::onDoMoreStuff()
{
// 1、为每个track获取一个input 和一个output buffer
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState *state = &mStateByTrackIndex.editValueAt(i);
status_t err;
// 这里会不阻塞轮询,直到获取到一个input buffer
do{
size_t index;
err = state->mCodec->dequeueInputBuffer(&index);
if(err == OK)
{
// 将index加入到列表当中
state->mAvailInputBufferIndices.push_back(index);
}
else
{
}
}while(err == OK);
// 同样是不阻塞轮询,直到获取到一个ouput buffer
do{
BufferInfo info;
// dequeueOutputBuffer拿到的不仅仅是index,还有buffer的offset、size,PTS,FLAGS等信息;
// dequeueOutputBuffer的返回值带有一些特殊信息,所以传回的buffer并不一定是解码结果,也可能是decoder送回的信息
err = state->mCodec->dequeueOutputBuffer(&info.mIndex, &info.mOffset, &info.mSize, &info.mPresentationTimeUs, &info.mFlags);
if(err == OK)
{
// 如果是解码后的数据,将bufferInfo加入到列表等待处理
state->mAvailOutputBufferInfos.push_back(info);
}
else if(err == INFO_FORMAT_CHANGED)
{
// 如果返回值为INFO_FORMAT_CHANGED,说明视频格式发生变化
// 执行onOutPutFormatChanged方法中会创建AudioTrack
// 我觉得 AudioTrack也可以在MediaExtractor->getTrackFormat拿到audio track时创建
err = onOutputFormatChanged(state);
CHECK_EQ(err, (status_t)OK);
}
else if(err == INFO_OUTPUT_BUFFERS_CHANGED)
{
// 如果是INFO_OUTPUT_BUFFERS_CHANGED,说明用于输出的buffer发生变化,需要重新获取一次output buffer列表
err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
CHECK_EQ(err, (status_t)OK);
}
else
{
}
}while(err == OK || err == INFO_FORMAT_CHANGED || err == INFO_OUTPUT_BUFFERS_CHANGED);
}
// 2、处理input buffer,这里不会向上面遍历所有track,这里的方法是按顺序读取文件,如果有某个track没有input buffer了就退出循环
for(;;)
{
size_t trackIndex;
// 获取当前数据所属的track index,返回值不等于OK说明到达了EOS
status_t err = mExtractor->getSampleTrackIndex(&trackIndex);
if(err != OK){
ALOGD("get input EOS");
break;
}
else{
// 拿到track对应的codecstate
CodecState *state = &mStateByTrackIndex.editValueFor(trackIndex);
// 如果当前CodecState中没有input buffer则直接退出循环
if(state->mAvailInputBufferIndices.empty())
break;
// 取出一个index
size_t index = *state->mAvailInputBufferIndices.begin();
state->mAvailInputBufferIndices.erase(state->mAvailInputBufferIndices.begin());
// input和output buffer都是MediaCodecBuffer,base方法可以获取到存储数据的指针
const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
sp<ABuffer> abuffer = new ABuffer(dstBuffer->base(), dstBuffer->capacity());
// 将数据读到MediaCodecBuffer中
err = mExtractor->readSampleData(abuffer);
CHECK_EQ(err, (status_t)OK);
// 设置可读取范围
dstBuffer->setRange(abuffer->offset(), abuffer->size());
int64_t timeUs;
// 获取数据对应的PTS
CHECK_EQ(mExtractor->getSampleTime(&timeUs), OK);
// 将数据送给decoder,同时附带PTS信息,不带FLAG
err = state->mCodec->queueInputBuffer(index, dstBuffer->offset(), dstBuffer->size(), timeUs, 0);
CHECK_EQ(err, (status_t)OK);
ALOGD("enqueue input data on track %u", trackIndex);
// 将Extractor指向下一笔数据
err = mExtractor->advance();
CHECK_EQ(err, (status_t)OK);
}
}
int64_t nowUs = ALooper::GetNowUs();
// 初始化开始播放时间
if(mStartTimeRealUs < 0ll)
{
mStartTimeRealUs = nowUs + 1000000ll;
}
// 3、处理output buffer并做av sync,遍历所有track
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState *state = &mStateByTrackIndex.editValueAt(i);
// 一次性处理所有的outputbuffer
while(!state->mAvailOutputBufferInfos.empty()){
BufferInfo *info = &*state->mAvailOutputBufferInfos.begin();
// 计算真实播放时间
int64_t whenRealUs = info->mPresentationTimeUs + mStartTimeRealUs;
// 计算真实播放时间和当前系统时间的距离
int64_t lateByUs = nowUs - whenRealUs;
// 如果系统时间距离播放时间小于10ms,则去渲染
if(lateByUs > -10000ll)
{
bool release = true;
// 如果系统时间已经过了播放时间30ms则丢弃帧
if(lateByUs > 30000ll)
{
ALOGD("track %u, buffer late by %lld us, dropping", mStateByTrackIndex.keyAt(i),lateByUs);
// 直接调用release
state->mCodec->releaseOutputBuffer(info->mIndex);
}
else{
// 用于audio
if(state->mAudioTrack != NULL)
{
// 获取到output buffer
const sp<MediaCodecBuffer> &srcBuffer = state->mBuffers[1].itemAt(info->mIndex);
// 将数据写到audioTrack中,但是有可能一次写不完
renderAudio(state, info, srcBuffer);
// 根据mSize判断是否写完,写完了才回去release
if(info->mSize > 0)
release = false;
}
// render,由于是等到绘制时间才去release and render,所以不用指定PTS
if(release)
state->mCodec->renderOutputBufferAndRelease(info->mIndex);
}
// 从列表中删除
if(release){
state->mAvailOutputBufferInfos.erase(state->mAvailOutputBufferInfos.begin());
info = NULL;
}
else
break;
}else {
// 如果系统时间早于播放时间10ms,则不去渲染
ALOGD("track %u buffer early by %lld us", mStateByTrackIndex.keyAt(i), lateByUs);
break;
}
}
}
return OK;
}
status_t MyPlayer::onOutputFormatChanged(CodecState *state)
{
sp<AMessage> format;
// getOutputFormat可以获取decoder返回的format
status_t err = state->mCodec->getOutputFormat(&format);
if(err != OK)
return err;
AString mime;
CHECK(format->findString("mime", &mime));
if(!strncasecmp(mime.c_str(), "audio/", 6))
{
int32_t channelCount;
int32_t sampleRate = 0;
// 获取通道数和采样率
CHECK(format->findInt32("channel-count", &channelCount));
CHECK(format->findInt32("sample-rate", &sampleRate));
// 创建一个AudioTrack
state->mAudioTrack = new AudioTrack(AUDIO_STREAM_MUSIC, sampleRate, AUDIO_FORMAT_PCM_16_BIT, audio_channel_out_mask_from_count(channelCount), 0);
}
return OK;
}
status_t MyPlayer::renderAudio(CodecState* state, BufferInfo* info, const sp<MediaCodecBuffer> &buffer)
{
CHECK(state->mAudioTrack != NULL);
// 开启audio render
if(state->mAudioTrack->stopped())
state->mAudioTrack->start();
uint32_t numFramesPlayed;
// 获取当前已经播放的帧数
CHECK_EQ(state->mAudioTrack->getPosition(&numFramesPlayed), OK);
// 获取可以写入的帧数
uint32_t numFramesAvailableToWrite = state->mAudioTrack->frameCount() - (state->mNumFramesWritten - numFramesPlayed);
// 计算可写入数据大小
size_t numBytesAvailableToWrite = numFramesAvailableToWrite * state->mAudioTrack->frameSize();
size_t copy = info->mSize;
if(copy > numBytesAvailableToWrite)
copy = numBytesAvailableToWrite;
if(copy == 0)
return OK;
int64_t startTimeUs = ALooper::GetNowUs();
// 写入数据,注意这里有偏移量offset(上次可能没写完)
ssize_t nbytes = state->mAudioTrack->write(buffer->base() + info->mOffset, copy);
CHECK_EQ(nbytes, (ssize_t)copy);
int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
// 计算写入帧数
uint32_t numFramesWritten = nbytes / state->mAudioTrack->frameSize();
if(delayUs > 2000ll){
ALOGD("AudioTrack write took %lld us", delayUs);
}
// 将offset和size记录并返回,用于判断数据是否全部写入
info->mOffset += nbytes;
info->mSize -= nbytes;
// 记录已经写入帧数
state->mNumFramesWritten += numFramesWritten;
return OK;
}
}
TestPlayer.cpp
//#define LOG_NDEBUG 0
#define LOG_TAG "TestPlayer"
#include "MyPlayer.h"
#include <gui/SurfaceControl.h>
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
#include <ui/DisplayConfig.h>
#include <media/stagefright/foundation/ALooper.h>
#include <binder/IBinder.h>
#include <media/stagefright/foundation/ADebug.h>
int main(int argc, char** argv)
{
using namespace android;
sp<MyPlayer> player = new MyPlayer();
sp<android::ALooper> looper = new android::ALooper();
// 创建looper处理MyPlayer的消息
looper->registerHandler(player);
// 运行在新的线程上
looper->start();
// 创建Surface
sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
CHECK_EQ(composerClient->initCheck(), (status_t)OK);
const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
CHECK(display != NULL);
DisplayConfig config;
CHECK_EQ(SurfaceComposerClient::getActiveDisplayConfig(display, &config), NO_ERROR);
const ui::Size &resolution = config.resolution;
const ssize_t displayWidth = resolution.getWidth();
const ssize_t displayHeight = resolution.getHeight();
ALOGD("display is %d x %d\n", displayWidth, displayHeight);
sp<SurfaceControl> control = composerClient->createSurface(String8("A Surface"), displayWidth/2, displayHeight/2, PIXEL_FORMAT_RGB_565, 0);
CHECK(control != NULL);
CHECK(control->isValid());
SurfaceComposerClient::Transaction{}.setLayer(control, INT_MAX).show(control).apply();
sp<Surface> surface = control->getSurface();
CHECK(surface != NULL);
// 开始播放
player->setDataSource(argv[1]);
player->setSurface(surface->getIGraphicBufferProducer());
player->start();
// sleep 60s,等待播放60s
sleep(60);
composerClient->dispose();
looper->stop();
return 0;
}
Android.bp
cc_binary {
name: "MyPlayer",
srcs: [
"TestPlayer.cpp",
"MyPlayer.cpp",
],
local_include_dirs: [
"include"
],
shared_libs: [
"libstagefright",
"libmedia",
"libstagefright_foundation",
"libgui",
"libaudioclient",
"liblog",
"libutils",
"libcutils",
"libmedia_omx",
"libui",
],
/*export_include_dirs: [
"include"
],*/
export_shared_lib_headers:[
"libstagefright",
"libmedia",
],
header_libs: [
"libmediametrics_headers",
"libmediadrm_headers",
"libaudioclient_headers"
],
}
记录一下编译过程中发现的问题:
1、local_include_dirs 和 export_include_dirs都是列表,所以要用 [] 将引用的内容框起来
2、cc_binary中不允许出现export_include_dirs,因为不需要导出头文件
3、main函数不能被包在 namespace android当中,不然会找不到main函数,可以在main函数中加上using namespace android,ALooper仍然需要命名空间、
4、还有以class形式替代头文件可能会出现的错误