LV2: Move nearly all notifications out of all mutex scopes including IDM

This commit is contained in:
Eladash 2022-08-04 13:13:51 +03:00 committed by Ivan
parent b55a052a22
commit 34bae90820
19 changed files with 181 additions and 151 deletions

View file

@ -606,6 +606,7 @@ void cell_audio_thread::advance(u64 timestamp)
for (u32 i = 0; i < queue_count; i++) for (u32 i = 0; i < queue_count; i++)
{ {
lv2_obj::notify_all_t notify;
queues[i]->send(event_sources[i], 0, 0, 0); queues[i]->send(event_sources[i], 0, 0, 0);
} }
} }

View file

@ -1362,6 +1362,7 @@ void spu_thread::cpu_return()
if (ensure(group->running)-- == 1) if (ensure(group->running)-- == 1)
{ {
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(group->mutex); std::lock_guard lock(group->mutex);
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
@ -4269,6 +4270,8 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
state += cpu_flag::wait; state += cpu_flag::wait;
lv2_obj::notify_all_t notify;
const u32 code = value >> 24; const u32 code = value >> 24;
{ {
if (code < 64) if (code < 64)

View file

@ -1192,6 +1192,7 @@ DECLARE(lv2_obj::g_ppu){};
DECLARE(lv2_obj::g_pending){}; DECLARE(lv2_obj::g_pending){};
thread_local DECLARE(lv2_obj::g_to_notify){}; thread_local DECLARE(lv2_obj::g_to_notify){};
thread_local DECLARE(lv2_obj::g_postpone_notify_barrier){};
thread_local DECLARE(lv2_obj::g_to_awake); thread_local DECLARE(lv2_obj::g_to_awake);
// Scheduler queue for timeouts (wait until -> thread) // Scheduler queue for timeouts (wait until -> thread)
@ -1205,31 +1206,57 @@ namespace cpu_counter
void remove(cpu_thread*) noexcept; void remove(cpu_thread*) noexcept;
} }
void lv2_obj::sleep(cpu_thread& cpu, const u64 timeout, bool notify_later) void lv2_obj::sleep(cpu_thread& cpu, const u64 timeout)
{ {
// Should already be performed when using this flag // Should already be performed when using this flag
if (!notify_later) if (!g_postpone_notify_barrier)
{ {
vm::temporary_unlock(cpu); prepare_for_sleep(cpu);
cpu_counter::remove(&cpu);
} }
{ {
std::lock_guard lock{g_mutex}; std::lock_guard lock{g_mutex};
sleep_unlocked(cpu, timeout, notify_later); sleep_unlocked(cpu, timeout);
if (!g_to_awake.empty())
{
// Schedule pending entries
awake_unlocked({});
}
schedule_all();
} }
if (!g_postpone_notify_barrier)
{
notify_all();
}
g_to_awake.clear(); g_to_awake.clear();
} }
bool lv2_obj::awake(cpu_thread* const thread, bool notify_later, s32 prio) bool lv2_obj::awake(cpu_thread* thread, s32 prio)
{ {
// Too risky to postpone it in case the notified thread may wait for this thread to free its passive lock bool result = false;
if (!notify_later)
{ {
vm::temporary_unlock(); std::lock_guard lock(g_mutex);
result = awake_unlocked(thread, prio);
schedule_all();
} }
std::lock_guard lock(g_mutex); if (result)
return awake_unlocked(thread, notify_later, prio); {
if (auto cpu = cpu_thread::get_current(); cpu && cpu->is_paused())
{
vm::temporary_unlock();
}
}
if (!g_postpone_notify_barrier)
{
notify_all();
}
return result;
} }
bool lv2_obj::yield(cpu_thread& thread) bool lv2_obj::yield(cpu_thread& thread)
@ -1241,10 +1268,10 @@ bool lv2_obj::yield(cpu_thread& thread)
ppu->raddr = 0; // Clear reservation ppu->raddr = 0; // Clear reservation
} }
return awake(&thread, false, yield_cmd); return awake(&thread, yield_cmd);
} }
void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout, bool notify_later) void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout)
{ {
const u64 start_time = get_guest_system_time(); const u64 start_time = get_guest_system_time();
@ -1341,19 +1368,9 @@ void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout, bool notify_later)
} }
} }
} }
if (!g_to_awake.empty())
{
// Schedule pending entries
awake_unlocked({}, notify_later);
}
else
{
schedule_all(notify_later);
}
} }
bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio) bool lv2_obj::awake_unlocked(cpu_thread* cpu, s32 prio)
{ {
// Check thread type // Check thread type
AUDIT(!cpu || cpu->id_type() == 1); AUDIT(!cpu || cpu->id_type() == 1);
@ -1498,9 +1515,9 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio)
// Emplace current thread // Emplace current thread
if (!emplace_thread(cpu)) if (!emplace_thread(cpu))
{ {
if (notify_later) if (g_postpone_notify_barrier)
{ {
// notify_later includes common optimizations regarding syscalls // This flag includes common optimizations regarding syscalls
// one of which is to allow a lock-free version of syscalls with awake behave as semaphore post: always notifies the thread, even if it hasn't slept yet // one of which is to allow a lock-free version of syscalls with awake behave as semaphore post: always notifies the thread, even if it hasn't slept yet
cpu->state += cpu_flag::signal; cpu->state += cpu_flag::signal;
} }
@ -1515,7 +1532,7 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio)
// Emplace threads from list // Emplace threads from list
if (!emplace_thread(_cpu)) if (!emplace_thread(_cpu))
{ {
if (notify_later) if (g_postpone_notify_barrier)
{ {
_cpu->state += cpu_flag::signal; _cpu->state += cpu_flag::signal;
} }
@ -1557,7 +1574,6 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio)
} }
} }
schedule_all(notify_later);
return changed_queue; return changed_queue;
} }
@ -1569,11 +1585,11 @@ void lv2_obj::cleanup()
g_pending = 0; g_pending = 0;
} }
void lv2_obj::schedule_all(bool notify_later) void lv2_obj::schedule_all()
{ {
if (!g_pending && g_to_sleep.empty()) if (!g_pending && g_to_sleep.empty())
{ {
usz notify_later_idx = notify_later ? 0 : std::size(g_to_notify) - 1; usz notify_later_idx = 0;
auto target = +g_ppu; auto target = +g_ppu;
@ -1592,7 +1608,7 @@ void lv2_obj::schedule_all(bool notify_later)
} }
else else
{ {
g_to_notify[notify_later_idx++] = target; g_to_notify[notify_later_idx++] = &target->state;
} }
} }
} }

View file

@ -142,12 +142,10 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id); sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
{ {
if (atomic_storage<ppu_thread*>::load(cond.sq)) if (atomic_storage<ppu_thread*>::load(cond.sq))
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(cond.mutex->mutex); std::lock_guard lock(cond.mutex->mutex);
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol)) if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
@ -162,7 +160,7 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
if (cond.mutex->try_own(*cpu)) if (cond.mutex->try_own(*cpu))
{ {
cond.awake(cpu, true); cond.awake(cpu);
} }
} }
} }
@ -182,12 +180,10 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id); sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
{ {
if (atomic_storage<ppu_thread*>::load(cond.sq)) if (atomic_storage<ppu_thread*>::load(cond.sq))
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(cond.mutex->mutex); std::lock_guard lock(cond.mutex->mutex);
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
@ -213,7 +209,7 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
if (result) if (result)
{ {
cond.awake(result, true); cond.awake(result);
} }
} }
}); });
@ -232,7 +228,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id); sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> int const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond) -> int
{ {
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id)) if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id))
{ {
@ -241,8 +237,6 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
if (atomic_storage<ppu_thread*>::load(cond.sq)) if (atomic_storage<ppu_thread*>::load(cond.sq))
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(cond.mutex->mutex); std::lock_guard lock(cond.mutex->mutex);
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu) for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
@ -259,7 +253,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
if (cond.mutex->try_own(*cpu)) if (cond.mutex->try_own(*cpu))
{ {
cond.awake(cpu, true); cond.awake(cpu);
} }
return 1; return 1;
@ -294,14 +288,14 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
auto& sstate = *ppu.optional_savestate_state; auto& sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> s64 const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond) -> s64
{ {
if (!ppu.loaded_from_savestate && atomic_storage<u32>::load(cond.mutex->control.raw().owner) != ppu.id) if (!ppu.loaded_from_savestate && atomic_storage<u32>::load(cond.mutex->control.raw().owner) != ppu.id)
{ {
return -1; return -1;
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex->mutex); std::lock_guard lock(cond.mutex->mutex);
@ -320,7 +314,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
lv2_obj::emplace(cond.sq, &ppu); lv2_obj::emplace(cond.sq, &ppu);
} }
cond.sleep(ppu, timeout, true); cond.sleep(ppu, timeout);
return static_cast<u32>(syscall_state >> 32); return static_cast<u32>(syscall_state >> 32);
} }
@ -342,7 +336,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
lv2_obj::emplace(cond.sq, &ppu); lv2_obj::emplace(cond.sq, &ppu);
// Sleep current thread and schedule mutex waiter // Sleep current thread and schedule mutex waiter
cond.sleep(ppu, timeout, true); cond.sleep(ppu, timeout);
// Save the recursive value // Save the recursive value
return count; return count;

View file

@ -109,10 +109,8 @@ std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
extern void resume_spu_thread_group_from_waiting(spu_thread& spu); extern void resume_spu_thread_group_from_waiting(spu_thread& spu);
CellError lv2_event_queue::send(lv2_event event, bool notify_later) CellError lv2_event_queue::send(lv2_event event)
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
if (!exists) if (!exists)
@ -153,7 +151,7 @@ CellError lv2_event_queue::send(lv2_event event, bool notify_later)
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event; std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
awake(&ppu, notify_later); awake(&ppu);
} }
else else
{ {
@ -414,14 +412,14 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
ppu.gpr[3] = CELL_OK; ppu.gpr[3] = CELL_OK;
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue) -> CellError const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id, [&, notify = lv2_obj::notify_all_t()](lv2_event_queue& queue) -> CellError
{ {
if (queue.type != SYS_PPU_QUEUE) if (queue.type != SYS_PPU_QUEUE)
{ {
return CELL_EINVAL; return CELL_EINVAL;
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(queue.mutex); std::lock_guard lock(queue.mutex);
@ -435,7 +433,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
if (queue.events.empty()) if (queue.events.empty())
{ {
queue.sleep(ppu, timeout, true); queue.sleep(ppu, timeout);
lv2_obj::emplace(queue.pq, &ppu); lv2_obj::emplace(queue.pq, &ppu);
return CELL_EBUSY; return CELL_EBUSY;
} }
@ -700,13 +698,13 @@ error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, data2=0x%llx, data3=0x%llx)", eport_id, data1, data2, data3); sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, data2=0x%llx, data3=0x%llx)", eport_id, data1, data2, data3);
const auto port = idm::check<lv2_obj, lv2_event_port>(eport_id, [&](lv2_event_port& port) -> CellError const auto port = idm::check<lv2_obj, lv2_event_port>(eport_id, [&, notify = lv2_obj::notify_all_t()](lv2_event_port& port) -> CellError
{ {
if (lv2_obj::check(port.queue)) if (lv2_obj::check(port.queue))
{ {
const u64 source = port.name ? port.name : (s64{process_getpid()} << 32) | u64{eport_id}; const u64 source = port.name ? port.name : (s64{process_getpid()} << 32) | u64{eport_id};
return port.queue->send(source, data1, data2, data3, true); return port.queue->send(source, data1, data2, data3);
} }
return CELL_ENOTCONN; return CELL_ENOTCONN;

View file

@ -103,11 +103,11 @@ struct lv2_event_queue final : public lv2_obj
static void save_ptr(utils::serial&, lv2_event_queue*); static void save_ptr(utils::serial&, lv2_event_queue*);
static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue); static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue);
CellError send(lv2_event event, bool notify_later = false); CellError send(lv2_event event);
CellError send(u64 source, u64 d1, u64 d2, u64 d3, bool notify_later = false) CellError send(u64 source, u64 d1, u64 d2, u64 d3)
{ {
return send(std::make_tuple(source, d1, d2, d3), notify_later); return send(std::make_tuple(source, d1, d2, d3));
} }
// Get event queue by its global key // Get event queue by its global key

View file

@ -141,7 +141,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
return CELL_EINVAL; return CELL_EINVAL;
} }
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag) -> CellError const auto flag = idm::get<lv2_obj, lv2_event_flag>(id, [&, notify = lv2_obj::notify_all_t()](lv2_event_flag& flag) -> CellError
{ {
if (flag.pattern.fetch_op([&](u64& pat) if (flag.pattern.fetch_op([&](u64& pat)
{ {
@ -152,7 +152,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
return {}; return {};
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(flag.mutex); std::lock_guard lock(flag.mutex);
@ -169,7 +169,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
return CELL_EPERM; return CELL_EPERM;
} }
flag.sleep(ppu, timeout, true); flag.sleep(ppu, timeout);
lv2_obj::emplace(flag.sq, &ppu); lv2_obj::emplace(flag.sq, &ppu);
return CELL_EBUSY; return CELL_EBUSY;
}); });
@ -314,7 +314,7 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
return CELL_OK; return CELL_OK;
} }
if (true) if (lv2_obj::notify_all_t notify; true)
{ {
std::lock_guard lock(flag->mutex); std::lock_guard lock(flag->mutex);
@ -454,6 +454,8 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
u32 value = 0; u32 value = 0;
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(flag->mutex); std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu) for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu)

View file

@ -101,7 +101,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
fmt::throw_exception("Unknown mode (%d)", mode); fmt::throw_exception("Unknown mode (%d)", mode);
} }
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> int const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> int
{ {
ppu_thread* cpu = nullptr; ppu_thread* cpu = nullptr;
@ -129,8 +129,6 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
if (atomic_storage<ppu_thread*>::load(cond.sq)) if (atomic_storage<ppu_thread*>::load(cond.sq))
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(cond.mutex); std::lock_guard lock(cond.mutex);
if (cpu) if (cpu)
@ -189,7 +187,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
if (result) if (result)
{ {
cond.awake(result, true); cond.awake(result);
} }
return 1; return 1;
@ -238,7 +236,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
fmt::throw_exception("Unknown mode (%d)", mode); fmt::throw_exception("Unknown mode (%d)", mode);
} }
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> s32 const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> s32
{ {
lv2_lwmutex* mutex{}; lv2_lwmutex* mutex{};
@ -254,8 +252,6 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
if (atomic_storage<ppu_thread*>::load(cond.sq)) if (atomic_storage<ppu_thread*>::load(cond.sq))
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(cond.mutex); std::lock_guard lock(cond.mutex);
u32 result = 0; u32 result = 0;
@ -293,7 +289,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
if (result && mode == 2) if (result && mode == 2)
{ {
lv2_obj::awake_all(true); lv2_obj::awake_all();
} }
return result; return result;
@ -328,7 +324,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
auto& sstate = *ppu.optional_savestate_state; auto& sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond)
{ {
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id); mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
@ -340,7 +336,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
// Increment lwmutex's lwcond's waiters count // Increment lwmutex's lwcond's waiters count
mutex->lwcond_waiters++; mutex->lwcond_waiters++;
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex); std::lock_guard lock(cond.mutex);
@ -377,7 +373,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
} }
// Sleep current thread and schedule lwmutex waiter // Sleep current thread and schedule lwmutex waiter
cond.sleep(ppu, timeout, true); cond.sleep(ppu, timeout);
}); });
if (!cond || !mutex) if (!cond || !mutex)

View file

@ -139,7 +139,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
ppu.gpr[3] = CELL_OK; ppu.gpr[3] = CELL_OK;
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{ {
if (s32 signal = mutex.lv2_control.fetch_op([](auto& data) if (s32 signal = mutex.lv2_control.fetch_op([](auto& data)
{ {
@ -160,7 +160,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
return true; return true;
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
if (s32 signal = mutex.try_own(&ppu)) if (s32 signal = mutex.try_own(&ppu))
{ {
@ -172,7 +172,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
return true; return true;
} }
mutex.sleep(ppu, timeout, true); mutex.sleep(ppu, timeout);
return false; return false;
}); });
@ -290,15 +290,13 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id); sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{ {
if (mutex.try_unlock(false)) if (mutex.try_unlock(false))
{ {
return; return;
} }
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex); std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>()) if (const auto cpu = mutex.reown<ppu_thread>())
@ -309,7 +307,7 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
return; return;
} }
mutex.awake(cpu, true); mutex.awake(cpu);
return; return;
} }
}); });
@ -328,15 +326,13 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id); sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{ {
if (mutex.try_unlock(true)) if (mutex.try_unlock(true))
{ {
return; return;
} }
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex); std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>(true)) if (const auto cpu = mutex.reown<ppu_thread>(true))
@ -348,7 +344,7 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
} }
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY; static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu, true); mutex.awake(cpu);
return; return;
} }
}); });

View file

@ -139,7 +139,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout); sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout);
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex) const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex& mutex)
{ {
CellError result = mutex.try_lock(ppu); CellError result = mutex.try_lock(ppu);
@ -160,7 +160,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
if (result == CELL_EBUSY) if (result == CELL_EBUSY)
{ {
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
if (mutex.try_own(ppu)) if (mutex.try_own(ppu))
{ {
@ -168,7 +168,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
} }
else else
{ {
mutex.sleep(ppu, timeout, true); mutex.sleep(ppu, timeout);
} }
} }
@ -292,14 +292,12 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id); sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex) -> CellError const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex& mutex) -> CellError
{ {
auto result = mutex.try_unlock(ppu); auto result = mutex.try_unlock(ppu);
if (result == CELL_EBUSY) if (result == CELL_EBUSY)
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex.mutex); std::lock_guard lock(mutex.mutex);
if (auto cpu = mutex.reown<ppu_thread>()) if (auto cpu = mutex.reown<ppu_thread>())
@ -310,7 +308,7 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
return {}; return {};
} }
mutex.awake(cpu, true); mutex.awake(cpu);
} }
result = {}; result = {};

View file

@ -358,7 +358,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
sys_net_sockaddr sn_addr{}; sys_net_sockaddr sn_addr{};
std::shared_ptr<lv2_socket> new_socket{}; std::shared_ptr<lv2_socket> new_socket{};
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{ {
const auto [success, res, res_socket, res_addr] = sock.accept(); const auto [success, res, res_socket, res_addr] = sock.accept();
@ -391,6 +391,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
return false; return false;
}); });
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
return false; return false;
}); });
@ -482,7 +483,7 @@ error_code sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr>
return -SYS_NET_EAFNOSUPPORT; return -SYS_NET_EAFNOSUPPORT;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
return sock.bind(sn_addr); return sock.bind(sn_addr);
}); });
@ -519,7 +520,7 @@ error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr
s32 result = 0; s32 result = 0;
sys_net_sockaddr sn_addr = *addr; sys_net_sockaddr sn_addr = *addr;
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{ {
const auto success = sock.connect(sn_addr); const auto success = sock.connect(sn_addr);
@ -544,8 +545,8 @@ error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr
return false; return false;
}); });
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
return false; return false;
}); });
@ -612,7 +613,7 @@ error_code sys_net_bnet_getpeername(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sock
return -SYS_NET_EINVAL; return -SYS_NET_EINVAL;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
auto [res, sn_addr] = sock.getpeername(); auto [res, sn_addr] = sock.getpeername();
@ -650,7 +651,7 @@ error_code sys_net_bnet_getsockname(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sock
return -SYS_NET_EINVAL; return -SYS_NET_EINVAL;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
auto [res, sn_addr] = sock.getsockname(); auto [res, sn_addr] = sock.getsockname();
@ -708,7 +709,7 @@ error_code sys_net_bnet_getsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optnam
return -SYS_NET_EINVAL; return -SYS_NET_EINVAL;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
if (len < sizeof(s32)) if (len < sizeof(s32))
{ {
@ -750,7 +751,7 @@ error_code sys_net_bnet_listen(ppu_thread& ppu, s32 s, s32 backlog)
return -SYS_NET_EINVAL; return -SYS_NET_EINVAL;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
return sock.listen(backlog); return sock.listen(backlog);
}); });
@ -788,7 +789,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
s32 result = 0; s32 result = 0;
sys_net_sockaddr sn_addr{}; sys_net_sockaddr sn_addr{};
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{ {
const auto success = sock.recvfrom(flags, len); const auto success = sock.recvfrom(flags, len);
@ -832,6 +833,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
return false; return false;
}); });
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
return false; return false;
}); });
@ -929,7 +931,7 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 l
const std::vector<u8> buf_copy(vm::_ptr<const char>(buf.addr()), vm::_ptr<const char>(buf.addr()) + len); const std::vector<u8> buf_copy(vm::_ptr<const char>(buf.addr()), vm::_ptr<const char>(buf.addr()) + len);
s32 result{}; s32 result{};
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{ {
auto success = sock.sendto(flags, buf_copy, sn_addr); auto success = sock.sendto(flags, buf_copy, sn_addr);
@ -958,6 +960,7 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 l
return false; return false;
}); });
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
return false; return false;
}); });
@ -1036,7 +1039,7 @@ error_code sys_net_bnet_setsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optnam
std::vector<u8> optval_copy(vm::_ptr<u8>(optval.addr()), vm::_ptr<u8>(optval.addr() + optlen)); std::vector<u8> optval_copy(vm::_ptr<u8>(optval.addr()), vm::_ptr<u8>(optval.addr() + optlen));
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
return sock.setsockopt(level, optname, optval_copy); return sock.setsockopt(level, optname, optval_copy);
}); });
@ -1065,7 +1068,7 @@ error_code sys_net_bnet_shutdown(ppu_thread& ppu, s32 s, s32 how)
return -SYS_NET_EINVAL; return -SYS_NET_EINVAL;
} }
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32 const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{ {
return sock.shutdown(how); return sock.shutdown(how);
}); });
@ -1181,6 +1184,8 @@ error_code sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 n
{ {
fds_buf.assign(fds.get_ptr(), fds.get_ptr() + nfds); fds_buf.assign(fds.get_ptr(), fds.get_ptr() + nfds);
lv2_obj::prepare_for_sleep(ppu);
std::unique_lock nw_lock(g_fxo->get<network_context>().s_nw_mutex); std::unique_lock nw_lock(g_fxo->get<network_context>().s_nw_mutex);
std::shared_lock lock(id_manager::g_mutex); std::shared_lock lock(id_manager::g_mutex);

View file

@ -76,6 +76,9 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
// Avoid cases where cleaning causes the destructor to be called inside IDM lock scope (for performance) // Avoid cases where cleaning causes the destructor to be called inside IDM lock scope (for performance)
std::shared_ptr<void> old_ppu; std::shared_ptr<void> old_ppu;
lv2_obj::notify_all_t notify;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(id_manager::g_mutex); std::lock_guard lock(id_manager::g_mutex);
// Get joiner ID // Get joiner ID
@ -105,6 +108,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
// Unqueue // Unqueue
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
notify.cleanup();
// Remove suspend state (TODO) // Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend; ppu.state -= cpu_flag::suspend;
@ -138,11 +142,11 @@ s32 sys_ppu_thread_yield(ppu_thread& ppu)
error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr) error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr)
{ {
ppu.state += cpu_flag::wait; lv2_obj::prepare_for_sleep(ppu);
sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr); sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr);
auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread) -> CellError auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread& thread) -> CellError
{ {
if (&ppu == &thread) if (&ppu == &thread)
{ {
@ -173,6 +177,7 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
if (!result) if (!result)
{ {
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
} }
else if (result == CELL_EAGAIN) else if (result == CELL_EAGAIN)
@ -180,6 +185,7 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
thread.joiner.notify_one(); thread.joiner.notify_one();
} }
notify.cleanup();
return result; return result;
}); });
@ -471,7 +477,7 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id); sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread) -> CellError const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread& thread) -> CellError
{ {
if (!thread.state.test_and_reset(cpu_flag::stop)) if (!thread.state.test_and_reset(cpu_flag::stop))
{ {
@ -479,7 +485,7 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
return CELL_EBUSY; return CELL_EBUSY;
} }
lv2_obj::awake(&thread); ensure(lv2_obj::awake(&thread));
thread.cmd_list thread.cmd_list
({ ({

View file

@ -102,7 +102,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock) const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock& rwlock)
{ {
const s64 val = rwlock.owner; const s64 val = rwlock.owner;
@ -114,7 +114,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
} }
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex); std::lock_guard lock(rwlock.mutex);
@ -132,7 +132,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
if (_old > 0 || _old & 1) if (_old > 0 || _old & 1)
{ {
rwlock.sleep(ppu, timeout, true); rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.rq, &ppu); lv2_obj::emplace(rwlock.rq, &ppu);
return false; return false;
} }
@ -276,6 +276,8 @@ error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
return CELL_ESRCH; return CELL_ESRCH;
} }
lv2_obj::notify_all_t notify;
if (rwlock.ret) if (rwlock.ret)
{ {
return CELL_OK; return CELL_OK;
@ -330,7 +332,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout); sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock) -> s64 const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock& rwlock) -> s64
{ {
const s64 val = rwlock.owner; const s64 val = rwlock.owner;
@ -346,7 +348,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
return val; return val;
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex); std::lock_guard lock(rwlock.mutex);
@ -364,7 +366,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
if (_old != 0) if (_old != 0)
{ {
rwlock.sleep(ppu, timeout, true); rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.wq, &ppu); lv2_obj::emplace(rwlock.wq, &ppu);
} }
@ -532,7 +534,7 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
return CELL_EPERM; return CELL_EPERM;
} }
if (rwlock.ret & 1) if (lv2_obj::notify_all_t notify; rwlock.ret & 1)
{ {
std::lock_guard lock(rwlock->mutex); std::lock_guard lock(rwlock->mutex);

View file

@ -111,7 +111,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
sys_semaphore.trace("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id, timeout); sys_semaphore.trace("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id, timeout);
const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&](lv2_sema& sema) const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&, notify = lv2_obj::notify_all_t()](lv2_sema& sema)
{ {
const s32 val = sema.val; const s32 val = sema.val;
@ -123,13 +123,13 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
} }
} }
lv2_obj::notify_all_t notify(ppu); lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(sema.mutex); std::lock_guard lock(sema.mutex);
if (sema.val-- <= 0) if (sema.val-- <= 0)
{ {
sema.sleep(ppu, timeout, true); sema.sleep(ppu, timeout);
lv2_obj::emplace(sema.sq, &ppu); lv2_obj::emplace(sema.sq, &ppu);
return false; return false;
} }
@ -275,6 +275,8 @@ error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
return CELL_EINVAL; return CELL_EINVAL;
} }
lv2_obj::notify_all_t notify;
if (sem.ret) if (sem.ret)
{ {
return CELL_OK; return CELL_OK;

View file

@ -1399,6 +1399,8 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
do do
{ {
lv2_obj::prepare_for_sleep(ppu);
std::unique_lock lock(group->mutex); std::unique_lock lock(group->mutex);
const auto state = +group->run_state; const auto state = +group->run_state;
@ -1433,8 +1435,11 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
group->waiter = &ppu; group->waiter = &ppu;
} }
lv2_obj::sleep(ppu); {
lock.unlock(); lv2_obj::notify_all_t notify;
lv2_obj::sleep(ppu);
lock.unlock();
}
while (auto state = +ppu.state) while (auto state = +ppu.state)
{ {

View file

@ -225,17 +225,17 @@ public:
private: private:
// Remove the current thread from the scheduling queue, register timeout // Remove the current thread from the scheduling queue, register timeout
static void sleep_unlocked(cpu_thread&, u64 timeout, bool notify_later); static void sleep_unlocked(cpu_thread&, u64 timeout);
// Schedule the thread // Schedule the thread
static bool awake_unlocked(cpu_thread*, bool notify_later = false, s32 prio = enqueue_cmd); static bool awake_unlocked(cpu_thread*, s32 prio = enqueue_cmd);
public: public:
static constexpr u64 max_timeout = u64{umax} / 1000; static constexpr u64 max_timeout = u64{umax} / 1000;
static void sleep(cpu_thread& cpu, const u64 timeout = 0, bool notify_later = false); static void sleep(cpu_thread& cpu, const u64 timeout = 0);
static bool awake(cpu_thread* const thread, bool notify_later = false, s32 prio = enqueue_cmd); static bool awake(cpu_thread* thread, s32 prio = enqueue_cmd);
// Returns true on successful context switch, false otherwise // Returns true on successful context switch, false otherwise
static bool yield(cpu_thread& thread); static bool yield(cpu_thread& thread);
@ -243,12 +243,12 @@ public:
static void set_priority(cpu_thread& thread, s32 prio) static void set_priority(cpu_thread& thread, s32 prio)
{ {
ensure(prio + 512u < 3712); ensure(prio + 512u < 3712);
awake(&thread, false, prio); awake(&thread, prio);
} }
static inline void awake_all(bool notify_later = false) static inline void awake_all()
{ {
awake({}, notify_later); awake({});
g_to_awake.clear(); g_to_awake.clear();
} }
@ -503,37 +503,32 @@ public:
if (!cpu) if (!cpu)
{ {
g_to_notify[0] = nullptr; g_to_notify[0] = nullptr;
g_postpone_notify_barrier = false;
return; return;
} }
if (cpu->state & cpu_flag::signal) // Note: by the time of notification the thread could have been deallocated which is why the direct function is used
{ // TODO: Pass a narrower mask
cpu->state.notify_one(cpu_flag::suspend + cpu_flag::signal); atomic_wait_engine::notify_one(cpu, 4, atomic_wait::default_mask<atomic_bs_t<cpu_flag>>);
}
} }
} }
template <typename T = int> // Can be called before the actual sleep call in order to move it out of mutex scope
static inline void prepare_for_sleep(cpu_thread& cpu)
{
vm::temporary_unlock(cpu);
cpu_counter::remove(&cpu);
}
struct notify_all_t struct notify_all_t
{ {
notify_all_t() noexcept = default; notify_all_t() noexcept
notify_all_t(T& cpu) noexcept
{ {
vm::temporary_unlock(cpu); g_postpone_notify_barrier = true;
cpu_counter::remove(&cpu);
} }
~notify_all_t() noexcept ~notify_all_t() noexcept
{ {
if constexpr (!std::is_base_of_v<cpu_thread, T>)
{
if (auto cpu = cpu_thread::get_current(); cpu && cpu->is_paused())
{
vm::temporary_unlock(*cpu);
}
}
lv2_obj::notify_all(); lv2_obj::notify_all();
} }
}; };
@ -551,8 +546,11 @@ private:
// Waiting for the response from // Waiting for the response from
static u32 g_pending; static u32 g_pending;
// Pending list of threads to notify // Pending list of threads to notify (cpu_thread::state ptr)
static thread_local std::add_pointer_t<class cpu_thread> g_to_notify[4]; static thread_local std::add_pointer_t<const void> g_to_notify[4];
static void schedule_all(bool notify_later); // If a notify_all_t object exists locally, postpone notifications to the destructor of it (not recursive, notifies on the first destructor for safety)
static thread_local bool g_postpone_notify_barrier;
static void schedule_all();
}; };

View file

@ -60,6 +60,8 @@ u64 lv2_timer::check()
// If aborting, perform the last accurate check for event // If aborting, perform the last accurate check for event
if (_now >= next) if (_now >= next)
{ {
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
if (next = expire; _now < next) if (next = expire; _now < next)

View file

@ -382,11 +382,16 @@ namespace vm
void temporary_unlock(cpu_thread& cpu) noexcept void temporary_unlock(cpu_thread& cpu) noexcept
{ {
if (!(cpu.state & cpu_flag::wait)) cpu.state += cpu_flag::wait; bs_t<cpu_flag> add_state = cpu_flag::wait;
if (g_tls_locked && g_tls_locked->compare_and_swap_test(&cpu, nullptr)) if (g_tls_locked && g_tls_locked->compare_and_swap_test(&cpu, nullptr))
{ {
cpu.state += cpu_flag::memory; add_state += cpu_flag::memory;
}
if (add_state - cpu.state)
{
cpu.state += add_state;
} }
} }

View file

@ -315,10 +315,11 @@ private:
friend class atomic_wait::list; friend class atomic_wait::list;
static void wait(const void* data, u32 size, u128 old_value, u64 timeout, u128 mask, atomic_wait::info* ext = nullptr); static void wait(const void* data, u32 size, u128 old_value, u64 timeout, u128 mask, atomic_wait::info* ext = nullptr);
public:
static void notify_one(const void* data, u32 size, u128 mask128); static void notify_one(const void* data, u32 size, u128 mask128);
static void notify_all(const void* data, u32 size, u128 mask128); static void notify_all(const void* data, u32 size, u128 mask128);
public:
static void set_wait_callback(bool(*cb)(const void* data, u64 attempts, u64 stamp0)); static void set_wait_callback(bool(*cb)(const void* data, u64 attempts, u64 stamp0));
static void set_notify_callback(void(*cb)(const void* data, u64 progress)); static void set_notify_callback(void(*cb)(const void* data, u64 progress));