1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "VulkanManager.h"
18
19 #include <EGL/egl.h>
20 #include <EGL/eglext.h>
21 #include <GrBackendSemaphore.h>
22 #include <GrBackendSurface.h>
23 #include <GrDirectContext.h>
24 #include <GrTypes.h>
25 #include <android/sync.h>
26 #include <ui/FatVector.h>
27 #include <vk/GrVkExtensions.h>
28 #include <vk/GrVkTypes.h>
29
30 #include <cstring>
31
32 #include <gui/TraceUtils.h>
33 #include "Properties.h"
34 #include "RenderThread.h"
35 #include "pipeline/skia/ShaderCache.h"
36 #include "renderstate/RenderState.h"
37
38 namespace android {
39 namespace uirenderer {
40 namespace renderthread {
41
free_features_extensions_structs(const VkPhysicalDeviceFeatures2 & features)42 static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
43 // All Vulkan structs that could be part of the features chain will start with the
44 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
45 // so we can get access to the pNext for the next struct.
46 struct CommonVulkanHeader {
47 VkStructureType sType;
48 void* pNext;
49 };
50
51 void* pNext = features.pNext;
52 while (pNext) {
53 void* current = pNext;
54 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
55 free(current);
56 }
57 }
58
59 GrVkGetProc VulkanManager::sSkiaGetProp = [](const char* proc_name, VkInstance instance,
__anonb12d84ae0102(const char* proc_name, VkInstance instance, VkDevice device) 60 VkDevice device) {
61 if (device != VK_NULL_HANDLE) {
62 if (strcmp("vkQueueSubmit", proc_name) == 0) {
63 return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueSubmit;
64 } else if (strcmp("vkQueueWaitIdle", proc_name) == 0) {
65 return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueWaitIdle;
66 }
67 return vkGetDeviceProcAddr(device, proc_name);
68 }
69 return vkGetInstanceProcAddr(instance, proc_name);
70 };
71
72 #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
73 #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
74 #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
75
getInstance()76 sp<VulkanManager> VulkanManager::getInstance() {
77 // cache a weakptr to the context to enable a second thread to share the same vulkan state
78 static wp<VulkanManager> sWeakInstance = nullptr;
79 static std::mutex sLock;
80
81 std::lock_guard _lock{sLock};
82 sp<VulkanManager> vulkanManager = sWeakInstance.promote();
83 if (!vulkanManager.get()) {
84 vulkanManager = new VulkanManager();
85 sWeakInstance = vulkanManager;
86 }
87
88 return vulkanManager;
89 }
90
~VulkanManager()91 VulkanManager::~VulkanManager() {
92 if (mDevice != VK_NULL_HANDLE) {
93 mDeviceWaitIdle(mDevice);
94 mDestroyDevice(mDevice, nullptr);
95 }
96
97 if (mInstance != VK_NULL_HANDLE) {
98 mDestroyInstance(mInstance, nullptr);
99 }
100
101 mGraphicsQueue = VK_NULL_HANDLE;
102 mDevice = VK_NULL_HANDLE;
103 mPhysicalDevice = VK_NULL_HANDLE;
104 mInstance = VK_NULL_HANDLE;
105 mInstanceExtensionsOwner.clear();
106 mInstanceExtensions.clear();
107 mDeviceExtensionsOwner.clear();
108 mDeviceExtensions.clear();
109 free_features_extensions_structs(mPhysicalDeviceFeatures2);
110 mPhysicalDeviceFeatures2 = {};
111 }
112
setupDevice(GrVkExtensions & grExtensions,VkPhysicalDeviceFeatures2 & features)113 void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
114 VkResult err;
115
116 constexpr VkApplicationInfo app_info = {
117 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
118 nullptr, // pNext
119 "android framework", // pApplicationName
120 0, // applicationVersion
121 "android framework", // pEngineName
122 0, // engineVerison
123 mAPIVersion, // apiVersion
124 };
125
126 {
127 GET_PROC(EnumerateInstanceExtensionProperties);
128
129 uint32_t extensionCount = 0;
130 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
131 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
132 mInstanceExtensionsOwner.resize(extensionCount);
133 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
134 mInstanceExtensionsOwner.data());
135 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
136 bool hasKHRSurfaceExtension = false;
137 bool hasKHRAndroidSurfaceExtension = false;
138 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
139 mInstanceExtensions.push_back(extension.extensionName);
140 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
141 hasKHRSurfaceExtension = true;
142 }
143 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
144 hasKHRAndroidSurfaceExtension = true;
145 }
146 }
147 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
148 }
149
150 const VkInstanceCreateInfo instance_create = {
151 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
152 nullptr, // pNext
153 0, // flags
154 &app_info, // pApplicationInfo
155 0, // enabledLayerNameCount
156 nullptr, // ppEnabledLayerNames
157 (uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
158 mInstanceExtensions.data(), // ppEnabledExtensionNames
159 };
160
161 GET_PROC(CreateInstance);
162 err = mCreateInstance(&instance_create, nullptr, &mInstance);
163 LOG_ALWAYS_FATAL_IF(err < 0);
164
165 GET_INST_PROC(CreateDevice);
166 GET_INST_PROC(DestroyInstance);
167 GET_INST_PROC(EnumerateDeviceExtensionProperties);
168 GET_INST_PROC(EnumeratePhysicalDevices);
169 GET_INST_PROC(GetPhysicalDeviceFeatures2);
170 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
171 GET_INST_PROC(GetPhysicalDeviceProperties);
172 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
173
174 uint32_t gpuCount;
175 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
176 LOG_ALWAYS_FATAL_IF(!gpuCount);
177 // Just returning the first physical device instead of getting the whole array. Since there
178 // should only be one device on android.
179 gpuCount = 1;
180 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
181 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
182 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
183
184 VkPhysicalDeviceProperties physDeviceProperties;
185 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
186 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
187 mDriverVersion = physDeviceProperties.driverVersion;
188
189 // query to get the initial queue props size
190 uint32_t queueCount;
191 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
192 LOG_ALWAYS_FATAL_IF(!queueCount);
193
194 // now get the actual queue props
195 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
196 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
197
198 // iterate to find the graphics queue
199 mGraphicsQueueIndex = queueCount;
200 for (uint32_t i = 0; i < queueCount; i++) {
201 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
202 mGraphicsQueueIndex = i;
203 break;
204 }
205 }
206 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
207
208 {
209 uint32_t extensionCount = 0;
210 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
211 nullptr);
212 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
213 mDeviceExtensionsOwner.resize(extensionCount);
214 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
215 mDeviceExtensionsOwner.data());
216 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
217 bool hasKHRSwapchainExtension = false;
218 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
219 mDeviceExtensions.push_back(extension.extensionName);
220 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
221 hasKHRSwapchainExtension = true;
222 }
223 }
224 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
225 }
226
227 grExtensions.init(sSkiaGetProp, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
228 mInstanceExtensions.data(), mDeviceExtensions.size(),
229 mDeviceExtensions.data());
230
231 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
232
233 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
234 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
235 features.pNext = nullptr;
236
237 // Setup all extension feature structs we may want to use.
238 void** tailPNext = &features.pNext;
239
240 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
241 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
242 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
243 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
244 LOG_ALWAYS_FATAL_IF(!blend);
245 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
246 blend->pNext = nullptr;
247 *tailPNext = blend;
248 tailPNext = &blend->pNext;
249 }
250
251 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
252 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
253 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
254 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
255 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
256 ycbcrFeature->pNext = nullptr;
257 *tailPNext = ycbcrFeature;
258 tailPNext = &ycbcrFeature->pNext;
259
260 // query to get the physical device features
261 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
262 // this looks like it would slow things down,
263 // and we can't depend on it on all platforms
264 features.features.robustBufferAccess = VK_FALSE;
265
266 float queuePriorities[1] = {0.0};
267
268 void* queueNextPtr = nullptr;
269
270 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
271
272 if (Properties::contextPriority != 0 &&
273 grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
274 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
275 queuePriorityCreateInfo.sType =
276 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
277 queuePriorityCreateInfo.pNext = nullptr;
278 switch (Properties::contextPriority) {
279 case EGL_CONTEXT_PRIORITY_LOW_IMG:
280 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
281 break;
282 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
283 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
284 break;
285 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
286 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
287 break;
288 default:
289 LOG_ALWAYS_FATAL("Unsupported context priority");
290 }
291 queueNextPtr = &queuePriorityCreateInfo;
292 }
293
294 const VkDeviceQueueCreateInfo queueInfo = {
295 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
296 queueNextPtr, // pNext
297 0, // VkDeviceQueueCreateFlags
298 mGraphicsQueueIndex, // queueFamilyIndex
299 1, // queueCount
300 queuePriorities, // pQueuePriorities
301 };
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
305 &features, // pNext
306 0, // VkDeviceCreateFlags
307 1, // queueCreateInfoCount
308 &queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
311 (uint32_t)mDeviceExtensions.size(), // extensionCount
312 mDeviceExtensions.data(), // ppEnabledExtensionNames
313 nullptr, // ppEnabledFeatures
314 };
315
316 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
317
318 GET_DEV_PROC(AllocateCommandBuffers);
319 GET_DEV_PROC(BeginCommandBuffer);
320 GET_DEV_PROC(CmdPipelineBarrier);
321 GET_DEV_PROC(CreateCommandPool);
322 GET_DEV_PROC(CreateFence);
323 GET_DEV_PROC(CreateSemaphore);
324 GET_DEV_PROC(DestroyCommandPool);
325 GET_DEV_PROC(DestroyDevice);
326 GET_DEV_PROC(DestroyFence);
327 GET_DEV_PROC(DestroySemaphore);
328 GET_DEV_PROC(DeviceWaitIdle);
329 GET_DEV_PROC(EndCommandBuffer);
330 GET_DEV_PROC(FreeCommandBuffers);
331 GET_DEV_PROC(GetDeviceQueue);
332 GET_DEV_PROC(GetSemaphoreFdKHR);
333 GET_DEV_PROC(ImportSemaphoreFdKHR);
334 GET_DEV_PROC(QueueSubmit);
335 GET_DEV_PROC(QueueWaitIdle);
336 GET_DEV_PROC(ResetCommandBuffer);
337 GET_DEV_PROC(ResetFences);
338 GET_DEV_PROC(WaitForFences);
339 GET_DEV_PROC(FrameBoundaryANDROID);
340 }
341
initialize()342 void VulkanManager::initialize() {
343 std::lock_guard _lock{mInitializeLock};
344
345 if (mDevice != VK_NULL_HANDLE) {
346 return;
347 }
348
349 GET_PROC(EnumerateInstanceVersion);
350 uint32_t instanceVersion;
351 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
352 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
353
354 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
355
356 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
357
358 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
359 mSwapBehavior = SwapBehavior::BufferAge;
360 }
361 }
362
createContext(const GrContextOptions & options,ContextType contextType)363 sk_sp<GrDirectContext> VulkanManager::createContext(const GrContextOptions& options,
364 ContextType contextType) {
365
366 GrVkBackendContext backendContext;
367 backendContext.fInstance = mInstance;
368 backendContext.fPhysicalDevice = mPhysicalDevice;
369 backendContext.fDevice = mDevice;
370 backendContext.fQueue = mGraphicsQueue;
371 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
372 backendContext.fMaxAPIVersion = mAPIVersion;
373 backendContext.fVkExtensions = &mExtensions;
374 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
375 backendContext.fGetProc = sSkiaGetProp;
376
377 return GrDirectContext::MakeVulkan(backendContext, options);
378 }
379
getVkFunctorInitParams() const380 VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
381 return VkFunctorInitParams{
382 .instance = mInstance,
383 .physical_device = mPhysicalDevice,
384 .device = mDevice,
385 .queue = mGraphicsQueue,
386 .graphics_queue_index = mGraphicsQueueIndex,
387 .api_version = mAPIVersion,
388 .enabled_instance_extension_names = mInstanceExtensions.data(),
389 .enabled_instance_extension_names_length =
390 static_cast<uint32_t>(mInstanceExtensions.size()),
391 .enabled_device_extension_names = mDeviceExtensions.data(),
392 .enabled_device_extension_names_length =
393 static_cast<uint32_t>(mDeviceExtensions.size()),
394 .device_features_2 = &mPhysicalDeviceFeatures2,
395 };
396 }
397
dequeueNextBuffer(VulkanSurface * surface)398 Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
399 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
400
401 if (bufferInfo == nullptr) {
402 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
403 return Frame(-1, -1, 0);
404 }
405
406 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
407
408 if (bufferInfo->dequeue_fence != -1) {
409 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
410 bool isSignalPending = false;
411 if (finfo != NULL) {
412 isSignalPending = finfo->status != 1;
413 sync_file_info_free(finfo);
414 }
415 if (isSignalPending) {
416 int fence_clone = dup(bufferInfo->dequeue_fence);
417 if (fence_clone == -1) {
418 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
419 errno);
420 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
421 } else {
422 VkSemaphoreCreateInfo semaphoreInfo;
423 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
424 semaphoreInfo.pNext = nullptr;
425 semaphoreInfo.flags = 0;
426 VkSemaphore semaphore;
427 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
428 if (err != VK_SUCCESS) {
429 ALOGE("Failed to create import semaphore, err: %d", err);
430 close(fence_clone);
431 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
432 } else {
433 VkImportSemaphoreFdInfoKHR importInfo;
434 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
435 importInfo.pNext = nullptr;
436 importInfo.semaphore = semaphore;
437 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
438 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
439 importInfo.fd = fence_clone;
440
441 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
442 if (err != VK_SUCCESS) {
443 ALOGE("Failed to import semaphore, err: %d", err);
444 mDestroySemaphore(mDevice, semaphore, nullptr);
445 close(fence_clone);
446 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
447 } else {
448 GrBackendSemaphore backendSemaphore;
449 backendSemaphore.initVulkan(semaphore);
450 // Skia will take ownership of the VkSemaphore and delete it once the wait
451 // has finished. The VkSemaphore also owns the imported fd, so it will
452 // close the fd when it is deleted.
453 bufferInfo->skSurface->wait(1, &backendSemaphore);
454 // The following flush blocks the GPU immediately instead of waiting for
455 // other drawing ops. It seems dequeue_fence is not respected otherwise.
456 // TODO: remove the flush after finding why backendSemaphore is not working.
457 bufferInfo->skSurface->flushAndSubmit();
458 }
459 }
460 }
461 }
462 }
463
464 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
465 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
466 }
467
468 struct DestroySemaphoreInfo {
469 PFN_vkDestroySemaphore mDestroyFunction;
470 VkDevice mDevice;
471 VkSemaphore mSemaphore;
472 // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
473 // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
474 // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
475 // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
476 // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
477 // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
478 int mRefs = 2;
479
DestroySemaphoreInfoandroid::uirenderer::renderthread::DestroySemaphoreInfo480 DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
481 VkSemaphore semaphore)
482 : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
483 };
484
destroy_semaphore(void * context)485 static void destroy_semaphore(void* context) {
486 DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
487 --info->mRefs;
488 if (!info->mRefs) {
489 info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
490 delete info;
491 }
492 }
493
finishFrame(SkSurface * surface)494 void VulkanManager::finishFrame(SkSurface* surface) {
495 ATRACE_NAME("Vulkan finish frame");
496 ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr,
497 "finishFrame already has an outstanding semaphore");
498
499 VkExportSemaphoreCreateInfo exportInfo;
500 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
501 exportInfo.pNext = nullptr;
502 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
503
504 VkSemaphoreCreateInfo semaphoreInfo;
505 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
506 semaphoreInfo.pNext = &exportInfo;
507 semaphoreInfo.flags = 0;
508 VkSemaphore semaphore;
509 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
510 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
511
512 GrBackendSemaphore backendSemaphore;
513 backendSemaphore.initVulkan(semaphore);
514
515 GrFlushInfo flushInfo;
516 if (err == VK_SUCCESS) {
517 mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
518 flushInfo.fNumSemaphores = 1;
519 flushInfo.fSignalSemaphores = &backendSemaphore;
520 flushInfo.fFinishedProc = destroy_semaphore;
521 flushInfo.fFinishedContext = mDestroySemaphoreContext;
522 } else {
523 semaphore = VK_NULL_HANDLE;
524 }
525 GrSemaphoresSubmitted submitted =
526 surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo);
527 GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
528 ALOGE_IF(!context, "Surface is not backed by gpu");
529 context->submit();
530 if (semaphore != VK_NULL_HANDLE) {
531 if (submitted == GrSemaphoresSubmitted::kYes) {
532 mSwapSemaphore = semaphore;
533 if (mFrameBoundaryANDROID) {
534 // retrieve VkImage used as render target
535 VkImage image = VK_NULL_HANDLE;
536 GrBackendRenderTarget backendRenderTarget =
537 surface->getBackendRenderTarget(SkSurface::kFlushRead_BackendHandleAccess);
538 if (backendRenderTarget.isValid()) {
539 GrVkImageInfo info;
540 if (backendRenderTarget.getVkImageInfo(&info)) {
541 image = info.fImage;
542 } else {
543 ALOGE("Frame boundary: backend is not vulkan");
544 }
545 } else {
546 ALOGE("Frame boundary: invalid backend render target");
547 }
548 // frameBoundaryANDROID needs to know about mSwapSemaphore, but
549 // it won't wait on it.
550 mFrameBoundaryANDROID(mDevice, mSwapSemaphore, image);
551 }
552 } else {
553 destroy_semaphore(mDestroySemaphoreContext);
554 mDestroySemaphoreContext = nullptr;
555 }
556 }
557 skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
558 }
559
swapBuffers(VulkanSurface * surface,const SkRect & dirtyRect)560 void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
561 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
562 ATRACE_NAME("Finishing GPU work");
563 mDeviceWaitIdle(mDevice);
564 }
565
566 int fenceFd = -1;
567 if (mSwapSemaphore != VK_NULL_HANDLE) {
568 VkSemaphoreGetFdInfoKHR getFdInfo;
569 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
570 getFdInfo.pNext = nullptr;
571 getFdInfo.semaphore = mSwapSemaphore;
572 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
573
574 VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
575 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
576 } else {
577 ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
578
579 std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
580 mQueueWaitIdle(mGraphicsQueue);
581 }
582 if (mDestroySemaphoreContext) {
583 destroy_semaphore(mDestroySemaphoreContext);
584 }
585
586 surface->presentCurrentBuffer(dirtyRect, fenceFd);
587 mSwapSemaphore = VK_NULL_HANDLE;
588 mDestroySemaphoreContext = nullptr;
589 }
590
destroySurface(VulkanSurface * surface)591 void VulkanManager::destroySurface(VulkanSurface* surface) {
592 // Make sure all submit commands have finished before starting to destroy objects.
593 if (VK_NULL_HANDLE != mGraphicsQueue) {
594 std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
595 mQueueWaitIdle(mGraphicsQueue);
596 }
597 mDeviceWaitIdle(mDevice);
598
599 delete surface;
600 }
601
createSurface(ANativeWindow * window,ColorMode colorMode,sk_sp<SkColorSpace> surfaceColorSpace,SkColorType surfaceColorType,GrDirectContext * grContext,uint32_t extraBuffers)602 VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
603 ColorMode colorMode,
604 sk_sp<SkColorSpace> surfaceColorSpace,
605 SkColorType surfaceColorType,
606 GrDirectContext* grContext,
607 uint32_t extraBuffers) {
608 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
609 if (!window) {
610 return nullptr;
611 }
612
613 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
614 *this, extraBuffers);
615 }
616
fenceWait(int fence,GrDirectContext * grContext)617 status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
618 if (!hasVkContext()) {
619 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
620 return INVALID_OPERATION;
621 }
622
623 // Block GPU on the fence.
624 int fenceFd = ::dup(fence);
625 if (fenceFd == -1) {
626 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
627 return -errno;
628 }
629
630 VkSemaphoreCreateInfo semaphoreInfo;
631 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
632 semaphoreInfo.pNext = nullptr;
633 semaphoreInfo.flags = 0;
634 VkSemaphore semaphore;
635 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
636 if (VK_SUCCESS != err) {
637 close(fenceFd);
638 ALOGE("Failed to create import semaphore, err: %d", err);
639 return UNKNOWN_ERROR;
640 }
641 VkImportSemaphoreFdInfoKHR importInfo;
642 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
643 importInfo.pNext = nullptr;
644 importInfo.semaphore = semaphore;
645 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
646 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
647 importInfo.fd = fenceFd;
648
649 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
650 if (VK_SUCCESS != err) {
651 mDestroySemaphore(mDevice, semaphore, nullptr);
652 close(fenceFd);
653 ALOGE("Failed to import semaphore, err: %d", err);
654 return UNKNOWN_ERROR;
655 }
656
657 GrBackendSemaphore beSemaphore;
658 beSemaphore.initVulkan(semaphore);
659
660 // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
661 // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
662 grContext->wait(1, &beSemaphore);
663 grContext->flushAndSubmit();
664
665 return OK;
666 }
667
createReleaseFence(int * nativeFence,GrDirectContext * grContext)668 status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
669 *nativeFence = -1;
670 if (!hasVkContext()) {
671 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
672 return INVALID_OPERATION;
673 }
674
675 VkExportSemaphoreCreateInfo exportInfo;
676 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
677 exportInfo.pNext = nullptr;
678 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
679
680 VkSemaphoreCreateInfo semaphoreInfo;
681 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
682 semaphoreInfo.pNext = &exportInfo;
683 semaphoreInfo.flags = 0;
684 VkSemaphore semaphore;
685 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
686 if (VK_SUCCESS != err) {
687 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
688 return INVALID_OPERATION;
689 }
690
691 GrBackendSemaphore backendSemaphore;
692 backendSemaphore.initVulkan(semaphore);
693
694 DestroySemaphoreInfo* destroyInfo =
695 new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
696 // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
697 // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
698 // when it is done with the semaphore.
699 GrFlushInfo flushInfo;
700 flushInfo.fNumSemaphores = 1;
701 flushInfo.fSignalSemaphores = &backendSemaphore;
702 flushInfo.fFinishedProc = destroy_semaphore;
703 flushInfo.fFinishedContext = destroyInfo;
704 GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
705 grContext->submit();
706
707 if (submitted == GrSemaphoresSubmitted::kNo) {
708 ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
709 destroy_semaphore(destroyInfo);
710 return INVALID_OPERATION;
711 }
712
713 VkSemaphoreGetFdInfoKHR getFdInfo;
714 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
715 getFdInfo.pNext = nullptr;
716 getFdInfo.semaphore = semaphore;
717 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
718
719 int fenceFd = 0;
720
721 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
722 destroy_semaphore(destroyInfo);
723 if (VK_SUCCESS != err) {
724 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
725 return INVALID_OPERATION;
726 }
727 *nativeFence = fenceFd;
728
729 return OK;
730 }
731
732 } /* namespace renderthread */
733 } /* namespace uirenderer */
734 } /* namespace android */
735