Android8.0 vsync訊號
VSync是Android4.1黃油計劃引入的三大核心元素之一,主要為解決使用者互動體驗差的問題。Android通過VSync機制來提高顯示效果,通常這個訊號是由顯示驅動產生,這樣才能達到最佳效果。但是Android為了能執行在不支援VSync機制的裝置上,也提供了軟體模擬產生VSync訊號的手段。vsync訊號主要由閘刀控制執行緒EventControlThread,事件執行緒EventThread,控制執行緒VsyncThread,BitTube , MessageQueue來實現。
1. VSYNC訊號的傳遞
frameworks/native/services/surfaceflinger/SurfaceFlinger.cpp 在SurfaceFlinger的init()函式
sp<VSyncSource> vsyncSrc = new DispSyncSource(&mPrimaryDispSync,
vsyncPhaseOffsetNs, true, "app"); //app同步源
mEventThread = new EventThread(vsyncSrc, *this, false);//事件執行緒
sp<VSyncSource> sfVsyncSrc = new DispSyncSource(&mPrimaryDispSync,
sfVsyncPhaseOffsetNs, true, "sf");
mSFEventThread = new EventThread(sfVsyncSrc, *this, true); //
mEventQueue.setEventThread(mSFEventThread); //繫結事件佇列
frameworks/native/services/surfaceflinger/MessageQueue.cpp
void MessageQueue::setEventThread(const sp<EventThread>& eventThread)
{
mEventThread = eventThread;
mEvents = eventThread->createEventConnection(); //建立連線
mEvents->stealReceiveChannel(&mEventTube);//事件Tunnel, mEventTube是BitTube
mLooper->addFd(mEventTube.getFd(), 0, Looper::EVENT_INPUT, //向訊息迴圈註冊事件
MessageQueue::cb_eventReceiver, this); //cb_eventReceiver處理BitTunnel來的事件
}
frameworks/native/services/surfaceflinger/EventThread.cpp 建立Connection與BitTube通訊
sp<EventThread::Connection> EventThread::createEventConnection() const {
return new Connection(const_cast<EventThread*>(this));
}
frameworks/native/services/surfaceflinger/EventThread.cpp 初始化
EventThread::Connection::Connection(
const sp<EventThread>& eventThread)
: count(-1), mEventThread(eventThread), mChannel(gui::BitTube::DefaultSize)
{
}
frameworks/native/services/surfaceflinger/MessageQueue.cpp //設定檔案描述符
status_t EventThread::Connection::stealReceiveChannel(gui::BitTube* outChannel) {
outChannel->setReceiveFd(mChannel.moveReceiveFd());
return NO_ERROR;
}
frameworks/native/libs/gui/BitTube.cpp
void BitTube::setReceiveFd(base::unique_fd&& receiveFd) {
mReceiveFd = std::move(receiveFd);
}
frameworks/native/services/surfaceflinger/EventThread.h Connection是EventThread的內部類
class EventThread : public Thread, private VSyncSource::Callback {
class Connection : public BnDisplayEventConnection { //事件連線的Binder服務端
public:
explicit Connection(const sp<EventThread>& eventThread);
status_t postEvent(const DisplayEventReceiver::Event& event);
// count >= 1 : continuous event. count is the vsync rate
// count == 0 : one-shot event that has not fired
// count ==-1 : one-shot event that fired this round / disabled
int32_t count;
private:
virtual ~Connection();
virtual void onFirstRef();
status_t stealReceiveChannel(gui::BitTube* outChannel) override;
status_t setVsyncRate(uint32_t count) override;
void requestNextVsync() override; // asynchronous
sp<EventThread> const mEventThread;
gui::BitTube mChannel;
};
......
}
system/core/libutils/Looper.cpp 這部分屬於訊息迴圈,藉助epoll的多路IO複用,阻塞機制;詳細以後再講,最終會執行Looper_callbackFunc callback回撥函式也就是上文所描述的MessageQueue::cb_eventReceiver函式
int Looper::addFd(int fd, int ident, int events, Looper_callbackFunc callback, void* data) {
return addFd(fd, ident, events, callback ? new SimpleLooperCallback(callback) : NULL, data);
}
int Looper::addFd(int fd, int ident, int events, const sp<LooperCallback>& callback, void* data) {
.......
if (!callback.get()) {
if (! mAllowNonCallbacks) {
ALOGE("Invalid attempt to set NULL callback but not allowed for this looper.");
return -1;
}
if (ident < 0) {
ALOGE("Invalid attempt to set NULL callback with ident < 0.");
return -1;
}
} else {
ident = POLL_CALLBACK;
}
{ // acquire lock
AutoMutex _l(mLock);
Request request;
request.fd = fd;
request.ident = ident;
request.events = events;
request.seq = mNextRequestSeq++;
request.callback = callback;
request.data = data;
if (mNextRequestSeq == -1) mNextRequestSeq = 0; // reserve sequence number -1
struct epoll_event eventItem; //linux系統多路IO複用epoll
request.initEventItem(&eventItem);
ssize_t requestIndex = mRequests.indexOfKey(fd); //查詢requestIndex
if (requestIndex < 0) { //未找到則新建
int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, fd, & eventItem);
if (epollResult < 0) {
ALOGE("Error adding epoll events for fd %d: %s", fd, strerror(errno));
return -1;
}
mRequests.add(fd, request); //加入請求佇列
} else { //找到則更新修改
int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_MOD, fd, & eventItem);
if (epollResult < 0) { /如果/修改失敗,新建
if (errno == ENOENT) {
// Tolerate ENOENT because it means that an older file descriptor was
// closed before its callback was unregistered and meanwhile a new
// file descriptor with the same number has been created and is now
// being registered for the first time. This error may occur naturally
// when a callback has the side-effect of closing the file descriptor
// before returning and unregistering itself. Callback sequence number
// checks further ensure that the race is benign.
//
// Unfortunately due to kernel limitations we need to rebuild the epoll
// set from scratch because it may contain an old file handle that we are
// now unable to remove since its file descriptor is no longer valid.
// No such problem would have occurred if we were using the poll system
// call instead, but that approach carries others disadvantages.
#if DEBUG_CALLBACKS
ALOGD("%p ~ addFd - EPOLL_CTL_MOD failed due to file descriptor "
"being recycled, falling back on EPOLL_CTL_ADD: %s",
this, strerror(errno));
#endif
epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, fd, & eventItem); //新建
if (epollResult < 0) {
ALOGE("Error modifying or adding epoll events for fd %d: %s",
fd, strerror(errno));
return -1;
}
scheduleEpollRebuildLocked();
} else { //修改成功
ALOGE("Error modifying epoll events for fd %d: %s", fd, strerror(errno));
return -1;
}
}
mRequests.replaceValueAt(requestIndex, request); //替換request
}
} // release lock
return 1;
}
frameworks/native/services/surfaceflinger/SurfaceFlinger.cpp SurfaceFlinger啟動時會對訊息佇列初始化
void SurfaceFlinger::onFirstRef()
{
mEventQueue.init(this);
}
frameworks/native/services/surfaceflinger/MessageQueue.cpp
void MessageQueue::init(const sp<SurfaceFlinger>& flinger)
{
mFlinger = flinger;
mLooper = new Looper(true);
mHandler = new Handler(*this);
}
frameworks/native/services/surfaceflinger/EventThread.cpp EventThread開始處理佇列事件
bool EventThread::threadLoop() {
DisplayEventReceiver::Event event;
Vector< sp<EventThread::Connection> > signalConnections; //連線佇列
signalConnections = waitForEvent(&event); //等待
// dispatch events to listeners...
const size_t count = signalConnections.size(); //連線數量
for (size_t i=0 ; i<count ; i++) {
const sp<Connection>& conn(signalConnections[i]); //取
// now see if we still need to report this event
status_t err = conn->postEvent(event); //提交事件
if (err == -EAGAIN || err == -EWOULDBLOCK) {
// The destination doesn't accept events anymore, it's probably
// full. For now, we just drop the events on the floor.
// FIXME: Note that some events cannot be dropped and would have
// to be re-sent later.
// Right-now we don't have the ability to do this.
ALOGW("EventThread: dropping event (%08x) for connection %p",
event.header.type, conn.get());
} else if (err < 0) {
// handle any other error on the pipe as fatal. the only
// reasonable thing to do is to clean-up this connection.
// The most common error we'll get here is -EPIPE.
removeDisplayEventConnection(signalConnections[i]);
}
}
return true;
}
frameworks/native/services/surfaceflinger/EventThread.cpp 向BitTube傳送事件
status_t EventThread::Connection::postEvent(
const DisplayEventReceiver::Event& event) {
ssize_t size = DisplayEventReceiver::sendEvents(&mChannel, &event, 1);
return size < 0 ? status_t(size) : status_t(NO_ERROR);
}
frameworks/native/libs/gui/DisplayEventReceiver.cpp
ssize_t DisplayEventReceiver::sendEvents(gui::BitTube* dataChannel,
Event const* events, size_t count)
{
return gui::BitTube::sendObjects(dataChannel, events, count);
}
frameworks/native/libs/gui/BitTube.cpp
ssize_t BitTube::sendObjects(BitTube* tube, void const* events, size_t count, size_t objSize) {
const char* vaddr = reinterpret_cast<const char*>(events);
ssize_t size = tube->write(vaddr, count * objSize); //寫資料
// should never happen because of SOCK_SEQPACKET
LOG_ALWAYS_FATAL_IF((size >= 0) && (size % static_cast<ssize_t>(objSize)),
"BitTube::sendObjects(count=%zu, size=%zu), res=%zd (partial events were "
"sent!)",
count, objSize, size);
// ALOGE_IF(size<0, "error %d sending %d events", size, count);
return size < 0 ? size : size / static_cast<ssize_t>(objSize);
}
frameworks/native/libs/gui/BitTube.cpp
ssize_t BitTube::write(void const* vaddr, size_t size) {
ssize_t err, len;
do {
len = ::send(mSendFd, vaddr, size, MSG_DONTWAIT | MSG_NOSIGNAL); //往套接傳送訊號
// cannot return less than size, since we're using SOCK_SEQPACKET
err = len < 0 ? errno : 0;
} while (err == EINTR);
return err == 0 ? len : -err;
}
rameworks/native/libs/gui/BitTube.cpp 從BitTube的初始化函式可以看出使用了套接來通訊
void BitTube::init(size_t rcvbuf, size_t sndbuf) {
int sockets[2];
if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets) == 0) {
size_t size = DEFAULT_SOCKET_BUFFER_SIZE;
setsockopt(sockets[0], SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof(rcvbuf));
setsockopt(sockets[1], SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
// since we don't use the "return channel", we keep it small...
setsockopt(sockets[0], SOL_SOCKET, SO_SNDBUF, &size, sizeof(size));
setsockopt(sockets[1], SOL_SOCKET, SO_RCVBUF, &size, sizeof(size));
fcntl(sockets[0], F_SETFL, O_NONBLOCK);
fcntl(sockets[1], F_SETFL, O_NONBLOCK);
mReceiveFd.reset(sockets[0]);
mSendFd.reset(sockets[1]);
} else {
mReceiveFd.reset();
ALOGE("BitTube: pipe creation failed (%s)", strerror(errno));
}
}
frameworks/native/services/surfaceflinger/MessageQueue.cpp 訊息迴圈的回撥函式cb_eventReceiver主動讀取BitTube端的資料
int MessageQueue::cb_eventReceiver(int fd, int events, void* data) {
MessageQueue* queue = reinterpret_cast<MessageQueue *>(data);
return queue->eventReceiver(fd, events);
}
int MessageQueue::eventReceiver(int /*fd*/, int /*events*/) {
ssize_t n;
DisplayEventReceiver::Event buffer[8];
while ((n = DisplayEventReceiver::getEvents(&mEventTube, buffer, 8)) > 0) {
for (int i=0 ; i<n ; i++) {
if (buffer[i].header.type == DisplayEventReceiver::DISPLAY_EVENT_VSYNC) {
mHandler->dispatchInvalidate(); //派發事件
break;
}
}
}
return 1;
}
那麼為什麼訊息迴圈會知道Event何時被髮送到BitTube,然後從BitTube的socket接收端讀取呢,我們不得不回到setEventThread函式,設計得相當精妙
void MessageQueue::setEventThread(const sp<EventThread>& eventThread)
{
//mEventTube是 gui::BitTube,也就是mEventTube.getFd() 是訊息佇列與BitTube共用的檔案描述符
mLooper->addFd(mEventTube.getFd(), 0, Looper::EVENT_INPUT,
MessageQueue::cb_eventReceiver, this);
}
frameworks/native/libs/gui/DisplayEventReceiver.cpp
ssize_t DisplayEventReceiver::getEvents(gui::BitTube* dataChannel,
Event* events, size_t count)
{
return gui::BitTube::recvObjects(dataChannel, events, count);
}
frameworks/native/libs/gui/DisplayEventReceiver.cpp
ssize_t BitTube::recvObjects(BitTube* tube, void* events, size_t count, size_t objSize) {
char* vaddr = reinterpret_cast<char*>(events);
ssize_t size = tube->read(vaddr, count * objSize);
// should never happen because of SOCK_SEQPACKET
LOG_ALWAYS_FATAL_IF((size >= 0) && (size % static_cast<ssize_t>(objSize)),
"BitTube::recvObjects(count=%zu, size=%zu), res=%zd (partial events were "
"received!)",
count, objSize, size);
// ALOGE_IF(size<0, "error %d receiving %d events", size, count);
return size < 0 ? size : size / static_cast<ssize_t>(objSize);
}
frameworks/native/libs/gui/DisplayEventReceiver.cpp
ssize_t BitTube::read(void* vaddr, size_t size) {
ssize_t err, len;
do {
len = ::recv(mReceiveFd, vaddr, size, M