Merge pull request #93739 from AThousandShips/lock_unlock
[Core] Allow locking/unlocking of `MutexLock`
This commit is contained in:
commit
ce44c91223
4 changed files with 59 additions and 26 deletions
|
@ -81,17 +81,17 @@ struct _IP_ResolverPrivate {
|
|||
continue;
|
||||
}
|
||||
|
||||
mutex.lock();
|
||||
MutexLock lock(mutex);
|
||||
List<IPAddress> response;
|
||||
String hostname = queue[i].hostname;
|
||||
IP::Type type = queue[i].type;
|
||||
mutex.unlock();
|
||||
lock.temp_unlock();
|
||||
|
||||
// We should not lock while resolving the hostname,
|
||||
// only when modifying the queue.
|
||||
IP::get_singleton()->_resolve_hostname(response, hostname, type);
|
||||
|
||||
MutexLock lock(mutex);
|
||||
lock.temp_relock();
|
||||
// Could have been completed by another function, or deleted.
|
||||
if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
|
||||
continue;
|
||||
|
@ -131,21 +131,22 @@ PackedStringArray IP::resolve_hostname_addresses(const String &p_hostname, Type
|
|||
List<IPAddress> res;
|
||||
String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
|
||||
|
||||
resolver->mutex.lock();
|
||||
if (resolver->cache.has(key)) {
|
||||
res = resolver->cache[key];
|
||||
} else {
|
||||
// This should be run unlocked so the resolver thread can keep resolving
|
||||
// other requests.
|
||||
resolver->mutex.unlock();
|
||||
_resolve_hostname(res, p_hostname, p_type);
|
||||
resolver->mutex.lock();
|
||||
// We might be overriding another result, but we don't care as long as the result is valid.
|
||||
if (res.size()) {
|
||||
resolver->cache[key] = res;
|
||||
{
|
||||
MutexLock lock(resolver->mutex);
|
||||
if (resolver->cache.has(key)) {
|
||||
res = resolver->cache[key];
|
||||
} else {
|
||||
// This should be run unlocked so the resolver thread can keep resolving
|
||||
// other requests.
|
||||
lock.temp_unlock();
|
||||
_resolve_hostname(res, p_hostname, p_type);
|
||||
lock.temp_relock();
|
||||
// We might be overriding another result, but we don't care as long as the result is valid.
|
||||
if (res.size()) {
|
||||
resolver->cache[key] = res;
|
||||
}
|
||||
}
|
||||
}
|
||||
resolver->mutex.unlock();
|
||||
|
||||
PackedStringArray result;
|
||||
for (const IPAddress &E : res) {
|
||||
|
|
|
@ -690,10 +690,10 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
|
|||
if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
|
||||
const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
|
||||
while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
|
||||
thread_load_lock.~MutexLock();
|
||||
thread_load_lock.temp_unlock();
|
||||
bool exit = !_ensure_load_progress();
|
||||
OS::get_singleton()->delay_usec(1000);
|
||||
new (&thread_load_lock) MutexLock(thread_load_mutex);
|
||||
thread_load_lock.temp_relock();
|
||||
if (exit) {
|
||||
break;
|
||||
}
|
||||
|
@ -754,7 +754,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
|
|||
bool loader_is_wtp = load_task.task_id != 0;
|
||||
if (loader_is_wtp) {
|
||||
// Loading thread is in the worker pool.
|
||||
thread_load_mutex.unlock();
|
||||
p_thread_load_lock.temp_unlock();
|
||||
|
||||
PREPARE_FOR_WTP_WAIT
|
||||
Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
|
||||
|
@ -770,7 +770,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
|
|||
_run_load_task(&load_task);
|
||||
}
|
||||
|
||||
thread_load_mutex.lock();
|
||||
p_thread_load_lock.temp_relock();
|
||||
load_task.awaited = true;
|
||||
|
||||
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
|
||||
|
@ -1208,7 +1208,7 @@ void ResourceLoader::clear_translation_remaps() {
|
|||
void ResourceLoader::clear_thread_load_tasks() {
|
||||
// Bring the thing down as quickly as possible without causing deadlocks or leaks.
|
||||
|
||||
thread_load_mutex.lock();
|
||||
MutexLock thread_load_lock(thread_load_mutex);
|
||||
cleaning_tasks = true;
|
||||
|
||||
while (true) {
|
||||
|
@ -1227,9 +1227,9 @@ void ResourceLoader::clear_thread_load_tasks() {
|
|||
if (none_running) {
|
||||
break;
|
||||
}
|
||||
thread_load_mutex.unlock();
|
||||
thread_load_lock.temp_unlock();
|
||||
OS::get_singleton()->delay_usec(1000);
|
||||
thread_load_mutex.lock();
|
||||
thread_load_lock.temp_relock();
|
||||
}
|
||||
|
||||
while (user_load_tokens.begin()) {
|
||||
|
@ -1244,7 +1244,6 @@ void ResourceLoader::clear_thread_load_tasks() {
|
|||
thread_load_tasks.clear();
|
||||
|
||||
cleaning_tasks = false;
|
||||
thread_load_mutex.unlock();
|
||||
}
|
||||
|
||||
void ResourceLoader::load_path_remaps() {
|
||||
|
|
|
@ -72,7 +72,7 @@ public:
|
|||
|
||||
template <typename MutexT>
|
||||
class MutexLock {
|
||||
THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
|
||||
mutable THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
|
||||
|
||||
public:
|
||||
explicit MutexLock(const MutexT &p_mutex) :
|
||||
|
@ -82,8 +82,18 @@ public:
|
|||
template <typename T = MutexT>
|
||||
_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
|
||||
typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
|
||||
return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(lock);
|
||||
return lock;
|
||||
}
|
||||
|
||||
_ALWAYS_INLINE_ void temp_relock() const {
|
||||
lock.lock();
|
||||
}
|
||||
|
||||
_ALWAYS_INLINE_ void temp_unlock() const {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
|
||||
};
|
||||
|
||||
using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
|
||||
|
@ -109,6 +119,9 @@ template <typename MutexT>
|
|||
class MutexLock {
|
||||
public:
|
||||
MutexLock(const MutexT &p_mutex) {}
|
||||
|
||||
void temp_relock() const {}
|
||||
void temp_unlock() const {}
|
||||
};
|
||||
|
||||
using Mutex = MutexImpl;
|
||||
|
|
|
@ -108,6 +108,16 @@ public:
|
|||
~MutexLock() {
|
||||
mutex.unlock();
|
||||
}
|
||||
|
||||
_ALWAYS_INLINE_ void temp_relock() const {
|
||||
mutex.lock();
|
||||
}
|
||||
|
||||
_ALWAYS_INLINE_ void temp_unlock() const {
|
||||
mutex.unlock();
|
||||
}
|
||||
|
||||
// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
|
||||
};
|
||||
|
||||
#ifdef __clang__
|
||||
|
@ -128,6 +138,16 @@ public:
|
|||
void unlock() const {}
|
||||
};
|
||||
|
||||
template <int Tag>
|
||||
class MutexLock<SafeBinaryMutex<Tag>> {
|
||||
public:
|
||||
MutexLock(const SafeBinaryMutex<Tag> &p_mutex) {}
|
||||
~MutexLock() {}
|
||||
|
||||
void temp_relock() const {}
|
||||
void temp_unlock() const {}
|
||||
};
|
||||
|
||||
#endif // THREADS_ENABLED
|
||||
|
||||
#endif // SAFE_BINARY_MUTEX_H
|
||||
|
|
Loading…
Reference in a new issue