Initial Commit

This commit is contained in:
Sajid 2024-09-30 12:06:17 +06:00
commit db51236165
42 changed files with 4843 additions and 0 deletions

10
.editorconfig Normal file
View file

@ -0,0 +1,10 @@
# editorconfig.org
root = true
[*]
charset = utf-8
indent_style = space
indent_size = 4
insert_final_newline = true
end_of_line = lf

399
.gitignore vendored Normal file
View file

@ -0,0 +1,399 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
[Ll]ogs/
[Oo]ut/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.tlog
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio 6 auto-generated project file (contains which files were open etc.)
*.vbp
# Visual Studio 6 workspace and project file (working project files containing files to include in project)
*.dsw
*.dsp
# Visual Studio 6 technical files
*.ncb
*.aps
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# Visual Studio History (VSHistory) files
.vshistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
# VS Code files for those working on multiple tools
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
*.code-workspace
# Local History for Visual Studio Code
.history/
# Windows Installer files from build outputs
*.cab
*.msi
*.msix
*.msm
*.msp
# JetBrains Rider
*.sln.iml

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "thirdparty/PowerRecomp"]
path = thirdparty/PowerRecomp
url = https://github.com/hedge-dev/PowerRecomp

19
CMakeLists.txt Normal file
View file

@ -0,0 +1,19 @@
cmake_minimum_required (VERSION 3.20)
set(SWA_THIRDPARTY_ROOT ${CMAKE_SOURCE_DIR}/thirdparty)
set(CMAKE_CXX_STANDARD 23)
set(BUILD_SHARED_LIBS OFF)
# Enable Hot Reload for MSVC compilers if supported.
if (POLICY CMP0141)
cmake_policy(SET CMP0141 NEW)
set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT "$<IF:$<AND:$<C_COMPILER_ID:MSVC>,$<CXX_COMPILER_ID:MSVC>>,$<$<CONFIG:Debug,RelWithDebInfo>:EditAndContinue>,$<$<CONFIG:Debug,RelWithDebInfo>:ProgramDatabase>>")
endif()
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
add_subdirectory(${SWA_THIRDPARTY_ROOT})
project("UnleashedRecomp-ALL")
# Include sub-projects.
add_subdirectory("UnleashedRecomp")

28
CMakeSettings.json Normal file
View file

@ -0,0 +1,28 @@
{
"configurations": [
{
"name": "x64-Clang-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "clang_cl_x64_x64" ],
"variables": []
},
{
"name": "x64-Clang-Release",
"generator": "Ninja",
"configurationType": "RelWithDebInfo",
"buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "clang_cl_x64_x64" ],
"variables": []
}
]
}

View file

@ -0,0 +1,61 @@
project("UnleashedRecomp")
set(TARGET_NAME "SWA")
file(GLOB "*.cpp")
add_compile_definitions(SWA_IMPL)
add_compile_options(
"/D_HAS_EXCEPTIONS=0"
"/fp:strict"
"/GS-"
"/EHa-"
"-march=haswell"
"-fno-strict-aliasing")
file(GLOB SWA_RECOMPILED_SOURCES "ppc/*.cpp")
set(SWA_KERNEL_CXX_SOURCES
"kernel/imports.cpp"
"kernel/xdm.cpp"
"kernel/heap.cpp"
"kernel/memory.cpp"
"kernel/xam.cpp"
"kernel/io/file_system.cpp"
)
set(SWA_CPU_CXX_SOURCES
"cpu/guest_thread.cpp"
"cpu/code_cache.cpp"
)
set(SWA_GPU_CXX_SOURCES
"gpu/window.cpp"
)
set(SWA_HID_CXX_SOURCES
"hid/hid.cpp"
)
set(SWA_CXX_SOURCES
"main.cpp"
${SWA_KERNEL_CXX_SOURCES}
${SWA_CPU_CXX_SOURCES}
${SWA_GPU_CXX_SOURCES}
${SWA_HID_CXX_SOURCES}
)
add_executable(UnleashedRecomp ${SWA_RECOMPILED_SOURCES} ${SWA_CXX_SOURCES})
set_target_properties(UnleashedRecomp PROPERTIES OUTPUT_NAME ${TARGET_NAME})
target_link_libraries(UnleashedRecomp PUBLIC
PowerUtils
o1heap
xxHash::xxhash
unordered_dense::unordered_dense
winmm
ntdll
comctl32
)
target_include_directories(UnleashedRecomp PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_precompile_headers(UnleashedRecomp PUBLIC "ppc/ppc_recomp_shared.h")

75
UnleashedRecomp/Config.h Normal file
View file

@ -0,0 +1,75 @@
#pragma once
#define INI_FILE "SWA.ini"
#define INI_BEGIN_SECTION(section) { std::string CurrentSection = section;
#define INI_END_SECTION() }
#define INI_READ_STRING(var) var = BasicIni::Get(ini, CurrentSection, #var, var)
#define INI_READ_BOOLEAN(var) var = BasicIni::GetBoolean(ini, CurrentSection, #var, var)
#define INI_READ_FLOAT(var) var = BasicIni::GetFloat(ini, CurrentSection, #var, var)
#define INI_READ_INTEGER(var) var = BasicIni::GetInteger(ini, CurrentSection, #var, var)
#define INI_READ_DOUBLE(var) var = BasicIni::GetDouble(ini, CurrentSection, #var, var)
#define INI_READ_ENUM(type, var) var = (type)BasicIni::GetInteger(ini, CurrentSection, #var, var)
enum ELanguage : uint32_t
{
ELanguage_English = 1,
ELanguage_Japanese,
ELanguage_German,
ELanguage_French,
ELanguage_Spanish,
ELanguage_Italian
};
enum EScoreBehaviour : uint32_t
{
EScoreBehaviour_CheckpointReset,
EScoreBehaviour_CheckpointRetain
};
enum EMovieScaleMode : uint32_t
{
EMovieScaleMode_Stretch,
EMovieScaleMode_Fit,
EMovieScaleMode_Fill
};
enum EUIScaleMode : uint32_t
{
EUIScaleMode_Stretch,
EUIScaleMode_Edge,
EUIScaleMode_Centre
};
class Config
{
public:
// System
inline static ELanguage Language = ELanguage_English;
inline static EScoreBehaviour ScoreBehaviour = EScoreBehaviour_CheckpointReset;
inline static bool Hints = true;
inline static bool WerehogHubTransformVideo = true;
inline static bool BootToTitle = false;
// Controls
inline static bool XButtonHoming = true;
inline static bool UnleashCancel = false;
// Audio
inline static bool WerehogBattleMusic = true;
// Video
inline static uint32_t Width = 1280;
inline static uint32_t Height = 720;
inline static int32_t ShadowResolution = 4096;
inline static size_t MSAA = 4;
inline static EMovieScaleMode MovieScaleMode = EMovieScaleMode_Fit;
inline static EUIScaleMode UIScaleMode = EUIScaleMode_Centre;
inline static bool AlphaToCoverage = false;
inline static bool Fullscreen = false;
inline static bool VSync = false;
inline static uint32_t BufferCount = 3;
static void Read();
};

23
UnleashedRecomp/Mutex.h Normal file
View file

@ -0,0 +1,23 @@
#pragma once
struct Mutex : CRITICAL_SECTION
{
Mutex()
{
InitializeCriticalSection(this);
}
~Mutex()
{
DeleteCriticalSection(this);
}
void lock()
{
EnterCriticalSection(this);
}
void unlock()
{
LeaveCriticalSection(this);
}
};

View file

@ -0,0 +1,37 @@
#include <stdafx.h>
#include "code_cache.h"
#include "ppc_context.h"
CodeCache::CodeCache()
{
bucket = (char*)VirtualAlloc(nullptr, 0x200000000, MEM_RESERVE, PAGE_READWRITE);
assert(bucket);
}
CodeCache::~CodeCache()
{
VirtualFree(bucket, 0, MEM_RELEASE);
}
void CodeCache::Init()
{
for (size_t i = 0; PPCFuncMappings[i].guest != 0; i++)
{
if (PPCFuncMappings[i].host != nullptr)
{
VirtualAlloc(bucket + PPCFuncMappings[i].guest * 2, sizeof(void*), MEM_COMMIT, PAGE_READWRITE);
*(void**)(bucket + PPCFuncMappings[i].guest * 2) = PPCFuncMappings[i].host;
}
}
}
void CodeCache::Insert(uint32_t guest, const void* host)
{
VirtualAlloc(bucket + static_cast<uint64_t>(guest) * 2, sizeof(void*), MEM_COMMIT, PAGE_READWRITE);
*reinterpret_cast<const void**>(bucket + static_cast<uint64_t>(guest) * 2) = host;
}
void* CodeCache::Find(uint32_t guest) const
{
return *reinterpret_cast<void**>(bucket + static_cast<uint64_t>(guest) * 2);
}

View file

@ -0,0 +1,16 @@
#pragma once
struct CodeCache
{
char* bucket{};
CodeCache();
~CodeCache();
void Init();
void Insert(uint32_t guest, const void* host);
void* Find(uint32_t guest) const;
};
extern CodeCache gCodeCache;

View file

@ -0,0 +1,23 @@
#pragma once
#include "ppc_context.h"
#include "memory.h"
struct GuestCode
{
inline static void Run(void* hostAddress, PPCContext* ctx, void* baseAddress, void* callStack)
{
ctx->fpscr.loadFromHost();
reinterpret_cast<PPCFunc*>(hostAddress)(*ctx, reinterpret_cast<uint8_t*>(baseAddress));
}
inline static void Run(void* hostAddress, PPCContext* ctx)
{
ctx->fpscr.loadFromHost();
reinterpret_cast<PPCFunc*>(hostAddress)(*ctx, reinterpret_cast<uint8_t*>(gMemory.base));
}
inline static void Run(void* hostAddress)
{
Run(hostAddress, GetPPCContext());
}
};

View file

@ -0,0 +1,143 @@
#include <stdafx.h>
#include "guest_thread.h"
#include <kernel/memory.h>
#include <kernel/heap.h>
#include <kernel/function.h>
#include "code_cache.h"
#include "guest_code.h"
#include "ppc_context.h"
constexpr size_t PCR_SIZE = 0xAB0;
constexpr size_t TLS_SIZE = 0x100;
constexpr size_t TEB_SIZE = 0x2E0;
constexpr size_t STACK_SIZE = 0x40000;
constexpr size_t CALL_STACK_SIZE = 0x8000;
constexpr size_t TOTAL_SIZE = PCR_SIZE + TLS_SIZE + TEB_SIZE + STACK_SIZE + CALL_STACK_SIZE;
constexpr size_t TEB_OFFSET = PCR_SIZE + TLS_SIZE;
DWORD GuestThread::Start(uint32_t function)
{
const GuestThreadParameter parameter{ function };
return Start(parameter);
}
DWORD GuestThread::Start(const GuestThreadParameter& parameter)
{
auto* thread = (uint8_t*)gUserHeap.Alloc(TOTAL_SIZE);
const auto procMask = (uint8_t)(parameter.flags >> 24);
const auto cpuNumber = procMask == 0 ? 0 : 7 - std::countl_zero(procMask);
memset(thread, 0, TOTAL_SIZE);
*(uint32_t*)thread = std::byteswap(gMemory.MapVirtual(thread + PCR_SIZE)); // tls pointer
*(uint32_t*)(thread + 0x100) = std::byteswap(gMemory.MapVirtual(thread + PCR_SIZE + TLS_SIZE)); // teb pointer
*(thread + 0x10C) = cpuNumber;
*(uint32_t*)(thread + PCR_SIZE + 0x10) = 0xFFFFFFFF; // that one TLS entry that felt quirky
*(uint32_t*)(thread + PCR_SIZE + TLS_SIZE + 0x14C) = std::byteswap(GetCurrentThreadId()); // thread id
PPCContext ppcContext{};
ppcContext.fn = (uint8_t*)gCodeCache.bucket;
ppcContext.r1.u64 = gMemory.MapVirtual(thread + PCR_SIZE + TLS_SIZE + TEB_SIZE + STACK_SIZE); // stack pointer
ppcContext.r3.u64 = parameter.value;
ppcContext.r13.u64 = gMemory.MapVirtual(thread);
SetPPCContext(ppcContext);
GuestCode::Run(gCodeCache.Find(parameter.function), &ppcContext, gMemory.Translate(0), gMemory.Translate(ppcContext.r1.u32));
gUserHeap.Free(thread);
return (DWORD)ppcContext.r3.u64;
}
DWORD HostThreadStart(void* pParameter)
{
auto* parameter = static_cast<GuestThreadParameter*>(pParameter);
const auto result = GuestThread::Start(*parameter);
delete parameter;
return result;
}
HANDLE GuestThread::Start(uint32_t function, uint32_t parameter, uint32_t flags, LPDWORD threadId)
{
const auto hostCreationFlags = (flags & 1) != 0 ? CREATE_SUSPENDED : 0;
//return CreateThread(nullptr, 0, Start, (void*)((uint64_t(parameter) << 32) | function), suspended ? CREATE_SUSPENDED : 0, threadId);
return CreateThread(nullptr, 0, HostThreadStart, new GuestThreadParameter{ function, parameter, flags }, hostCreationFlags, threadId);
}
void GuestThread::SetThreadName(uint32_t id, const char* name)
{
#pragma pack(push,8)
const DWORD MS_VC_EXCEPTION = 0x406D1388;
typedef struct tagTHREADNAME_INFO
{
DWORD dwType; // Must be 0x1000.
LPCSTR szName; // Pointer to name (in user addr space).
DWORD dwThreadID; // Thread ID (-1=caller thread).
DWORD dwFlags; // Reserved for future use, must be zero.
} THREADNAME_INFO;
#pragma pack(pop)
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = id;
info.dwFlags = 0;
__try
{
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
}
}
void GuestThread::SetLastError(DWORD error)
{
auto* thread = (char*)gMemory.Translate(GetPPCContext()->r13.u32);
if (*(DWORD*)(thread + 0x150))
{
// Program doesn't want errors
return;
}
// TEB + 0x160 : Win32LastError
*(DWORD*)(thread + TEB_OFFSET + 0x160) = std::byteswap(error);
}
PPCContext* GuestThread::Invoke(uint32_t address)
{
auto* ctx = GetPPCContext();
GuestCode::Run(gCodeCache.Find(address), ctx);
return ctx;
}
void SetThreadNameImpl(uint32_t a1, uint32_t threadId, uint32_t* name)
{
GuestThread::SetThreadName(threadId, (const char*)gMemory.Translate(std::byteswap(*name)));
}
int GetThreadPriorityImpl(uint32_t hThread)
{
return GetThreadPriority((HANDLE)hThread);
}
DWORD SetThreadIdealProcessorImpl(uint32_t hThread, DWORD dwIdealProcessor)
{
return SetThreadIdealProcessor((HANDLE)hThread, dwIdealProcessor);
}
GUEST_FUNCTION_HOOK(sub_82DFA2E8, SetThreadNameImpl);
GUEST_FUNCTION_HOOK(sub_82BD57A8, GetThreadPriorityImpl);
GUEST_FUNCTION_HOOK(sub_82BD5910, SetThreadIdealProcessorImpl);
void GuestThread::InitHooks()
{
}

View file

@ -0,0 +1,21 @@
#pragma once
struct PPCContext;
struct GuestThreadParameter
{
uint32_t function;
uint32_t value;
uint32_t flags;
};
struct GuestThread
{
static DWORD Start(uint32_t function);
static DWORD Start(const GuestThreadParameter& parameter);
static HANDLE Start(uint32_t function, uint32_t parameter, uint32_t flags, LPDWORD threadId);
static void SetThreadName(uint32_t id, const char* name);
static void SetLastError(DWORD error);
static PPCContext* Invoke(uint32_t address);
static void InitHooks();
};

View file

@ -0,0 +1,15 @@
#pragma once
#include "ppc/ppc_context.h"
#include "ppc/ppc_recomp_shared.h"
inline thread_local PPCContext* gPPCContext;
inline PPCContext* GetPPCContext()
{
return gPPCContext;
}
inline void SetPPCContext(PPCContext& ctx)
{
gPPCContext = &ctx;
}

View file

@ -0,0 +1,69 @@
#pragma once
#ifdef _MSVC
#define SWA_DLLEXPORT __declspec(dllexport)
#define SWA_DLLIMPORT __declspec(dllimport)
#else
#define SWA_DLLEXPORT __attribute__((dllexport))
#define SWA_DLLIMPORT __attribute__((dllimport))
#endif
#ifdef SWA_IMPL
#define SWA_API extern "C" SWA_DLLEXPORT
#else
#define SWA_API extern "C" SWA_DLLIMPORT
#endif
template<typename T>
void ByteSwap(T& value)
{
value = std::byteswap(value);
}
template<typename T>
T RoundUp(const T& in_rValue, uint32_t in_round)
{
return (in_rValue + in_round - 1) & ~(in_round - 1);
}
template<typename T>
T RoundDown(const T& in_rValue, uint32_t in_round)
{
return in_rValue & ~(in_round - 1);
}
inline bool FileExists(const char* path)
{
const auto attributes = GetFileAttributesA(path);
return attributes != INVALID_FILE_ATTRIBUTES && !(attributes & FILE_ATTRIBUTE_DIRECTORY);
}
inline bool DirectoryExists(const char* path)
{
const auto attributes = GetFileAttributesA(path);
return attributes != INVALID_FILE_ATTRIBUTES && !!(attributes & FILE_ATTRIBUTE_DIRECTORY);
}
inline size_t StringHash(const std::string_view& str)
{
return XXH3_64bits(str.data(), str.size());
}
template<typename TValue>
constexpr size_t FirstBitLow(TValue value)
{
constexpr size_t nbits = sizeof(TValue) * 8;
constexpr auto zero = TValue{};
constexpr auto one = static_cast<TValue>(1);
for (size_t i = 0; i < nbits; i++)
{
if ((value & (one << i)) != zero)
{
return i;
}
}
return 0;
}

View file

@ -0,0 +1 @@
#include "Window.h"

View file

@ -0,0 +1 @@
#pragma once

View file

@ -0,0 +1,22 @@
#include <stdafx.h>
#include "hid.h"
DWORD hid::GetState(DWORD dwUserIndex, XAMINPUT_STATE* pState)
{
return 1;
}
DWORD hid::SetState(DWORD dwUserIndex, XAMINPUT_VIBRATION* pVibration)
{
return 1;
}
DWORD hid::GetCapabilities(DWORD dwUserIndex, XAMINPUT_CAPABILITIES* pCaps)
{
return 1;
}
int hid::OnSDLEvent(void*, SDL_Event* event)
{
return 0;
}

13
UnleashedRecomp/hid/hid.h Normal file
View file

@ -0,0 +1,13 @@
#pragma once
union SDL_Event;
namespace hid
{
void Init();
DWORD GetState(DWORD dwUserIndex, XAMINPUT_STATE* pState);
DWORD SetState(DWORD dwUserIndex, XAMINPUT_VIBRATION* pVibration);
DWORD GetCapabilities(DWORD dwUserIndex, XAMINPUT_CAPABILITIES* pCaps);
int OnSDLEvent(void*, SDL_Event* event);
}

View file

@ -0,0 +1,40 @@
#pragma once
template<typename T>
struct FreeList
{
std::vector<T> items;
std::vector<size_t> freed{};
void Free(T& item)
{
std::destroy_at(&item);
freed.push_back(&item - items.data());
}
void Free(size_t index)
{
std::destroy_at(&items[index]);
freed.push_back(index);
}
size_t Alloc()
{
if (freed.size())
{
auto idx = freed[freed.size() - 1];
freed.pop_back();
std::construct_at(&items[idx]);
return idx;
}
items.emplace_back();
return items.size() - 1;
}
T& operator[](size_t idx)
{
return items[idx];
}
};

View file

@ -0,0 +1,216 @@
#pragma once
#include <cpu/ppc_context.h>
#include <array>
#include "xbox.h"
template <typename R, typename... T>
constexpr std::tuple<T...> function_args(R(*)(T...)) noexcept
{
return std::tuple<T...>();
}
template<auto V>
static constexpr decltype(V) constant_v = V;
template<typename T>
static constexpr bool is_precise_v = std::is_same_v<T, float> || std::is_same_v<T, double>;
template<auto Func>
struct arg_count_t
{
static constexpr size_t value = std::tuple_size_v<decltype(function_args(Func))>;
};
template<typename TCallable, int I = 0, typename ...TArgs>
std::enable_if_t<(I >= sizeof...(TArgs)), void> _tuple_for(std::tuple<TArgs...>&, const TCallable& callable) noexcept
{
}
template<typename TCallable, int I = 0, typename ...TArgs>
std::enable_if_t<(I < sizeof...(TArgs)), void> _tuple_for(std::tuple<TArgs...>& tpl, const TCallable& callable) noexcept
{
callable(std::get<I>(tpl), I);
_tuple_for<TCallable, I + 1>(tpl, callable);
}
struct ArgTranslator
{
FORCEINLINE constexpr static uint64_t GetIntegerArgumentValue(const PPCContext& ctx, uint8_t* base, size_t arg) noexcept
{
if (arg <= 7)
{
switch (arg)
{
case 0: return ctx.r3.u32;
case 1: return ctx.r4.u32;
case 2: return ctx.r5.u32;
case 3: return ctx.r6.u32;
case 4: return ctx.r7.u32;
case 5: return ctx.r8.u32;
case 6: return ctx.r9.u32;
case 7: return ctx.r10.u32;
default: break;
}
}
return *reinterpret_cast<XLPDWORD>(base + ctx.r1.u32 + 0x54 + ((arg - 8) * 8));
}
FORCEINLINE static double GetPrecisionArgumentValue(const PPCContext& ctx, uint8_t* base, size_t arg) noexcept
{
switch (arg)
{
case 0: return ctx.f1.f64;
case 1: return ctx.f2.f64;
case 2: return ctx.f3.f64;
case 3: return ctx.f4.f64;
case 4: return ctx.f5.f64;
case 5: return ctx.f6.f64;
case 6: return ctx.f7.f64;
case 7: return ctx.f8.f64;
case 8: return ctx.f9.f64;
case 9: return ctx.f10.f64;
case 10: return ctx.f11.f64;
case 11: return ctx.f12.f64;
case 12: return ctx.f13.f64;
[[unlikely]] default: break;
}
// how did you end up here
return 0;
}
template<typename T>
FORCEINLINE constexpr static std::enable_if_t<!std::is_pointer_v<T>, T> GetValue(PPCContext& ctx, uint8_t* base, size_t idx) noexcept
{
if constexpr (is_precise_v<T>)
{
return static_cast<T>(GetPrecisionArgumentValue(ctx, base, idx));
}
else
{
return static_cast<T>(GetIntegerArgumentValue(ctx, base, idx));
}
}
template<typename T>
FORCEINLINE constexpr static std::enable_if_t<std::is_pointer_v<T>, T> GetValue(PPCContext& ctx, uint8_t* base, size_t idx) noexcept
{
const auto v = GetIntegerArgumentValue(ctx, base, idx);
if (!v)
{
return nullptr;
}
return reinterpret_cast<T>(base + static_cast<uint32_t>(v));
}
};
struct Argument
{
int type{};
int ordinal{};
};
template<auto Func>
constexpr std::array<Argument, arg_count_t<Func>::value> GatherFunctionArguments()
{
std::array<Argument, arg_count_t<Func>::value> args{};
int intOrdinal{};
int floatOrdinal{};
size_t i{};
if constexpr (!args.empty())
{
std::apply([&](const auto& first, const auto&... rest)
{
auto append = [&]<typename T>(const T & v)
{
if constexpr (is_precise_v<T>)
{
args[i] = { 1, floatOrdinal++ };
}
else
{
intOrdinal++;
args[i] = { 0, static_cast<int>(i) }; // what the fuck
}
i++;
};
append(first);
(append(rest), ...);
}, function_args(Func));
}
return args;
}
template<auto Func, size_t I>
struct arg_ordinal_t
{
static constexpr size_t value = GatherFunctionArguments<Func>()[I].ordinal;
};
template<auto Func, int I = 0, typename ...TArgs>
void _translate_args(PPCContext& ctx, uint8_t* base, std::tuple<TArgs...>&) noexcept
requires (I >= sizeof...(TArgs))
{
}
template <auto Func, int I = 0, typename ...TArgs>
std::enable_if_t<(I < sizeof...(TArgs)), void> _translate_args(PPCContext& ctx, uint8_t* base, std::tuple<TArgs...>& tpl) noexcept
{
using T = std::tuple_element_t<I, std::remove_reference_t<decltype(tpl)>>;
std::get<I>(tpl) = ArgTranslator::GetValue<T>(ctx, base, arg_ordinal_t<Func, I>::value);
_translate_args<Func, I + 1>(ctx, base, tpl);
}
template<auto Func>
PPC_FUNC(GuestFunction)
{
using ret_t = decltype(std::apply(Func, function_args(Func)));
auto args = function_args(Func);
_translate_args<Func>(ctx, base, args);
if constexpr (std::is_same_v<ret_t, void>)
{
std::apply(Func, args);
}
else
{
auto v = std::apply(Func, args);
if constexpr (std::is_pointer<ret_t>())
{
if (v != nullptr)
{
ctx.r3.u64 = static_cast<uint32_t>(reinterpret_cast<size_t>(v) - reinterpret_cast<size_t>(base));
}
else
{
ctx.r3.u64 = NULL;
}
}
else if constexpr (is_precise_v<ret_t>)
{
ctx.f1.f64 = v;
}
else
{
ctx.r3.u64 = (uint64_t)v;
}
}
}
#define GUEST_FUNCTION_HOOK(subroutine, function) \
PPC_FUNC(subroutine) { GuestFunction<function>(ctx, base); }
#define GUEST_FUNCTION_STUB(subroutine) \
PPC_FUNC(subroutine) { }

View file

@ -0,0 +1,135 @@
#include <stdafx.h>
#include "heap.h"
#include "memory.h"
#include "function.h"
constexpr size_t RESERVED_BEGIN = 0x7FEA0000;
constexpr size_t RESERVED_END = 0xA0000000;
void Heap::Init()
{
gMemory.Alloc(0x20000, RESERVED_BEGIN - 0x20000, MEM_COMMIT);
heap = o1heapInit(gMemory.Translate(0x20000), RESERVED_BEGIN - 0x20000);
gMemory.Alloc(RESERVED_END, 0x100000000 - RESERVED_END, MEM_COMMIT);
physicalHeap = o1heapInit(gMemory.Translate(RESERVED_END), 0x100000000 - RESERVED_END);
}
void* Heap::Alloc(size_t size)
{
std::lock_guard lock(mutex);
return o1heapAllocate(heap, std::max<size_t>(1, size));
}
void* Heap::AllocPhysical(size_t size, size_t alignment)
{
size = std::max<size_t>(1, size);
alignment = alignment == 0 ? 0x1000 : std::max<size_t>(16, alignment);
std::lock_guard lock(physicalMutex);
void* ptr = o1heapAllocate(physicalHeap, size + alignment);
size_t aligned = ((size_t)ptr + alignment) & ~(alignment - 1);
*((void**)aligned - 1) = ptr;
*((size_t*)aligned - 2) = size + O1HEAP_ALIGNMENT;
return (void*)aligned;
}
void Heap::Free(void* ptr)
{
if (ptr >= physicalHeap)
{
std::lock_guard lock(physicalMutex);
o1heapFree(physicalHeap, *((void**)ptr - 1));
}
else
{
std::lock_guard lock(mutex);
o1heapFree(heap, ptr);
}
}
size_t Heap::Size(void* ptr)
{
if (ptr)
return *((size_t*)ptr - 2) - O1HEAP_ALIGNMENT; // relies on fragment header in o1heap.c
return 0;
}
uint32_t RtlAllocateHeap(uint32_t heapHandle, uint32_t flags, uint32_t size)
{
void* ptr = gUserHeap.Alloc(size);
if ((flags & 0x8) != 0)
memset(ptr, 0, size);
assert(ptr);
return gMemory.MapVirtual(ptr);
}
uint32_t RtlReAllocateHeap(uint32_t heapHandle, uint32_t flags, uint32_t memoryPointer, uint32_t size)
{
void* ptr = gUserHeap.Alloc(size);
if ((flags & 0x8) != 0)
memset(ptr, 0, size);
if (memoryPointer != 0)
{
void* oldPtr = gMemory.Translate(memoryPointer);
memcpy(ptr, oldPtr, std::min<size_t>(size, gUserHeap.Size(oldPtr)));
gUserHeap.Free(oldPtr);
}
assert(ptr);
return gMemory.MapVirtual(ptr);
}
uint32_t RtlFreeHeap(uint32_t heapHandle, uint32_t flags, uint32_t memoryPointer)
{
if (memoryPointer != NULL)
gUserHeap.Free(gMemory.Translate(memoryPointer));
return true;
}
uint32_t RtlSizeHeap(uint32_t heapHandle, uint32_t flags, uint32_t memoryPointer)
{
if (memoryPointer != NULL)
return (uint32_t)gUserHeap.Size(gMemory.Translate(memoryPointer));
return 0;
}
uint32_t XAlloc(uint32_t size, uint32_t flags)
{
void* ptr = (flags & 0x80000000) != 0 ?
gUserHeap.AllocPhysical(size, (1ull << ((flags >> 24) & 0xF))) :
gUserHeap.Alloc(size);
if ((flags & 0x40000000) != 0)
memset(ptr, 0, size);
assert(ptr);
return gMemory.MapVirtual(ptr);
}
void XFree(uint32_t baseAddress, uint32_t flags)
{
if (baseAddress != NULL)
gUserHeap.Free(gMemory.Translate(baseAddress));
}
GUEST_FUNCTION_STUB(sub_82BD7788); // HeapCreate
GUEST_FUNCTION_STUB(sub_82BD9250); // HeapDestroy
GUEST_FUNCTION_HOOK(sub_82BD7D30, RtlAllocateHeap);
GUEST_FUNCTION_HOOK(sub_82BD8600, RtlFreeHeap);
GUEST_FUNCTION_HOOK(sub_82BD88F0, RtlReAllocateHeap);
GUEST_FUNCTION_HOOK(sub_82BD6FD0, RtlSizeHeap);
// Seems like these handle allocation of virtual and physical pages
GUEST_FUNCTION_HOOK(sub_831CC9C8, XAlloc);
GUEST_FUNCTION_HOOK(sub_831CCA60, XFree);

View file

@ -0,0 +1,38 @@
#pragma once
#include "Mutex.h"
#include <o1heap.h>
struct Heap
{
Mutex mutex;
O1HeapInstance* heap;
Mutex physicalMutex;
O1HeapInstance* physicalHeap;
void Init();
void* Alloc(size_t size);
void* AllocPhysical(size_t size, size_t alignment);
void Free(void* ptr);
size_t Size(void* ptr);
template<typename T, typename... Args>
T* Alloc(Args... args)
{
T* obj = (T*)Alloc(sizeof(T));
new (obj) T(std::forward<Args>(args)...);
return obj;
}
template<typename T, typename... Args>
T* AllocPhysical(Args... args)
{
T* obj = (T*)AllocPhysical(sizeof(T), alignof(T));
new (obj) T(std::forward<Args>(args)...);
return obj;
}
};
extern Heap gUserHeap;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,244 @@
#include <stdafx.h>
#include "file_system.h"
#include <kernel/xam.h>
#include <kernel/xdm.h>
#include <kernel/function.h>
#include <cpu/guest_thread.h>
bool FindHandleCloser(void* handle)
{
FindClose(handle);
return false;
}
static uint32_t CreateFileImpl(
LPCSTR lpFileName,
DWORD dwDesiredAccess,
DWORD dwShareMode,
LPSECURITY_ATTRIBUTES lpSecurityAttributes,
DWORD dwCreationDisposition,
DWORD dwFlagsAndAttributes)
{
const auto handle = (uint32_t)CreateFileA(
FileSystem::TransformPath(lpFileName),
dwDesiredAccess,
dwShareMode,
nullptr,
dwCreationDisposition,
dwFlagsAndAttributes & ~(FILE_FLAG_NO_BUFFERING | FILE_FLAG_OVERLAPPED),
nullptr);
GuestThread::SetLastError(GetLastError());
printf("CreateFileA(%s, %x, %x, %x, %x, %x): %x\n", lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, handle);
return handle;
}
static DWORD GetFileSizeImpl(
uint32_t hFile,
LPDWORD lpFileSizeHigh)
{
DWORD fileSize = GetFileSize((HANDLE)hFile, lpFileSizeHigh);
if (lpFileSizeHigh != nullptr)
*lpFileSizeHigh = std::byteswap(*lpFileSizeHigh);
return fileSize;
}
BOOL GetFileSizeExImpl(
uint32_t hFile,
PLARGE_INTEGER lpFileSize)
{
BOOL result = GetFileSizeEx((HANDLE)hFile, lpFileSize);
if (result)
lpFileSize->QuadPart = std::byteswap(lpFileSize->QuadPart);
return result;
}
BOOL ReadFileImpl(
uint32_t hFile,
LPVOID lpBuffer,
DWORD nNumberOfBytesToRead,
XLPDWORD lpNumberOfBytesRead,
XOVERLAPPED* lpOverlapped)
{
if (lpOverlapped != nullptr)
{
LONG distanceToMoveHigh = lpOverlapped->OffsetHigh;
if (SetFilePointer((HANDLE)hFile, lpOverlapped->Offset, &distanceToMoveHigh, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
return FALSE;
}
DWORD numberOfBytesRead;
BOOL result = ReadFile((HANDLE)hFile, lpBuffer, nNumberOfBytesToRead, &numberOfBytesRead, nullptr);
if (result)
{
if (lpOverlapped != nullptr)
{
lpOverlapped->Internal = 0;
lpOverlapped->InternalHigh = numberOfBytesRead;
if (lpOverlapped->hEvent != NULL)
SetEvent((HANDLE)lpOverlapped->hEvent.get());
}
else if (lpNumberOfBytesRead != nullptr)
{
*lpNumberOfBytesRead = numberOfBytesRead;
}
}
//printf("ReadFile(): %x %x %x %x %x %x\n", hFile, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, lpOverlapped, result);
return result;
}
DWORD SetFilePointerImpl(
uint32_t hFile,
LONG lDistanceToMove,
PLONG lpDistanceToMoveHigh,
DWORD dwMoveMethod)
{
LONG distanceToMoveHigh = lpDistanceToMoveHigh ? std::byteswap(*lpDistanceToMoveHigh) : 0;
DWORD result = SetFilePointer((HANDLE)hFile, lDistanceToMove, lpDistanceToMoveHigh ? &distanceToMoveHigh : nullptr, dwMoveMethod);
if (lpDistanceToMoveHigh != nullptr)
*lpDistanceToMoveHigh = std::byteswap(distanceToMoveHigh);
return result;
}
BOOL SetFilePointerExImpl(
uint32_t hFile,
LONG lDistanceToMove,
PLARGE_INTEGER lpNewFilePointer,
DWORD dwMoveMethod)
{
LARGE_INTEGER distanceToMove;
distanceToMove.QuadPart = lDistanceToMove;
DWORD result = SetFilePointerEx((HANDLE)hFile, distanceToMove, lpNewFilePointer, dwMoveMethod);
if (lpNewFilePointer != nullptr)
lpNewFilePointer->QuadPart = std::byteswap(lpNewFilePointer->QuadPart);
return result;
}
uint32_t FindFirstFileImpl(
LPCSTR lpFileName,
LPWIN32_FIND_DATAA lpFindFileData)
{
auto& data = *lpFindFileData;
const auto handle = FindFirstFileA(FileSystem::TransformPath(lpFileName), &data);
GuestThread::SetLastError(GetLastError());
if (handle == INVALID_HANDLE_VALUE)
{
return 0xFFFFFFFF;
}
ByteSwap(data.dwFileAttributes);
ByteSwap(*(uint64_t*)&data.ftCreationTime);
ByteSwap(*(uint64_t*)&data.ftLastAccessTime);
ByteSwap(*(uint64_t*)&data.ftLastWriteTime);
ByteSwap(*(uint64_t*)&data.nFileSizeHigh);
return GUEST_HANDLE(ObInsertObject(handle, FindHandleCloser));
}
uint32_t FindNextFileImpl(uint32_t Handle, LPWIN32_FIND_DATAA lpFindFileData)
{
auto* handle = ObQueryObject(HOST_HANDLE(Handle));
auto& data = *lpFindFileData;
const auto result = FindNextFileA(handle, &data);
ByteSwap(data.dwFileAttributes);
ByteSwap(*(uint64_t*)&data.ftCreationTime);
ByteSwap(*(uint64_t*)&data.ftLastAccessTime);
ByteSwap(*(uint64_t*)&data.ftLastWriteTime);
ByteSwap(*(uint64_t*)&data.nFileSizeHigh);
return result;
}
BOOL ReadFileExImpl(
uint32_t hFile,
LPVOID lpBuffer,
DWORD nNumberOfBytesToRead,
XOVERLAPPED* lpOverlapped,
uint32_t lpCompletionRoutine)
{
LONG distanceToMoveHigh = lpOverlapped->OffsetHigh;
if (SetFilePointer((HANDLE)hFile, lpOverlapped->Offset, &distanceToMoveHigh, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
return FALSE;
DWORD numberOfBytesRead;
BOOL result = ReadFile((HANDLE)hFile, lpBuffer, nNumberOfBytesToRead, &numberOfBytesRead, nullptr);
if (result)
{
lpOverlapped->Internal = 0;
lpOverlapped->InternalHigh = numberOfBytesRead;
if (lpOverlapped->hEvent != NULL)
SetEvent((HANDLE)lpOverlapped->hEvent.get());
}
//printf("ReadFileEx(): %x %x %x %x %x %x\n", hFile, lpBuffer, nNumberOfBytesToRead, lpOverlapped, lpCompletionRoutine, result);
return result;
}
DWORD GetFileAttributesAImpl(LPCSTR lpFileName)
{
return GetFileAttributesA(FileSystem::TransformPath(lpFileName));
}
BOOL WriteFileImpl(
uint32_t hFile,
LPCVOID lpBuffer,
DWORD nNumberOfBytesToWrite,
LPDWORD lpNumberOfBytesWritten,
LPOVERLAPPED lpOverlapped)
{
assert(lpOverlapped == nullptr);
BOOL result = WriteFile((HANDLE)hFile, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten, nullptr);
if (result && lpNumberOfBytesWritten != nullptr)
ByteSwap(*lpNumberOfBytesWritten);
return result;
}
const char* FileSystem::TransformPath(const char* path)
{
thread_local char builtPath[2048]{};
const char* relativePath = strstr(path, ":\\");
if (relativePath != nullptr)
{
// rooted folder, handle direction
const std::string_view root = std::string_view{ path, path + (relativePath - path) };
const auto newRoot = XamGetRootPath(root);
if (!newRoot.empty())
{
strncpy(builtPath, newRoot.data(), newRoot.size());
builtPath[newRoot.size()] = '\\';
strcpy(builtPath + newRoot.size() + 1, relativePath + 2);
return builtPath;
}
}
return relativePath != nullptr ? relativePath + 2 : path;
}
GUEST_FUNCTION_HOOK(sub_82BD4668, CreateFileImpl);
GUEST_FUNCTION_HOOK(sub_82BD4600, GetFileSizeImpl);
GUEST_FUNCTION_HOOK(sub_82BD5608, GetFileSizeExImpl);
GUEST_FUNCTION_HOOK(sub_82BD4478, ReadFileImpl);
GUEST_FUNCTION_HOOK(sub_831CD3E8, SetFilePointerImpl);
GUEST_FUNCTION_HOOK(sub_831CE888, SetFilePointerExImpl);
GUEST_FUNCTION_HOOK(sub_831CDC58, FindFirstFileImpl);
GUEST_FUNCTION_HOOK(sub_831CDC00, FindNextFileImpl);
GUEST_FUNCTION_HOOK(sub_831CDF40, ReadFileExImpl);
GUEST_FUNCTION_HOOK(sub_831CD6E8, GetFileAttributesAImpl);
GUEST_FUNCTION_HOOK(sub_831CE3F8, CreateFileImpl);
GUEST_FUNCTION_HOOK(sub_82BD4860, WriteFileImpl);

View file

@ -0,0 +1,6 @@
#pragma once
struct FileSystem
{
static const char* TransformPath(const char* path);
};

View file

@ -0,0 +1,37 @@
#include <stdafx.h>
#include "memory.h"
Memory::Memory(void* address, size_t size) : size(size)
{
base = (char*)VirtualAlloc(address, size, MEM_RESERVE, PAGE_READWRITE);
}
void* Memory::Alloc(size_t offset, size_t size, uint32_t type)
{
return VirtualAlloc(base + offset, size, type, PAGE_READWRITE);
}
void* Memory::Commit(size_t offset, size_t size)
{
return Alloc(offset, size, MEM_COMMIT);
}
void* Memory::Reserve(size_t offset, size_t size)
{
return Alloc(offset, size, MEM_RESERVE);
}
void* Memory::Translate(size_t offset) const noexcept
{
return base + offset;
}
uint32_t Memory::MapVirtual(void* host) const noexcept
{
return static_cast<uint32_t>(static_cast<char*>(host) - base);
}
extern "C" void* MmGetHostAddress(uint32_t ptr)
{
return gMemory.Translate(ptr);
}

View file

@ -0,0 +1,21 @@
#pragma once
class Memory
{
public:
char* base{};
size_t size{};
size_t guestBase{};
Memory(void* address, size_t size);
void* Alloc(size_t offset, size_t size, uint32_t type);
void* Commit(size_t offset, size_t size);
void* Reserve(size_t offset, size_t size);
void* Translate(size_t offset) const noexcept;
uint32_t MapVirtual(void* host) const noexcept;
};
extern Memory gMemory;

View file

@ -0,0 +1,403 @@
#include <stdafx.h>
#include "xam.h"
#include "xdm.h"
#include <hid/hid.h>
#include <gpu/window.h>
#include <cpu/guest_thread.h>
#include <ranges>
#include <unordered_set>
#include <CommCtrl.h>
#include "xxHashMap.h"
// Needed for commctrl
#pragma comment(linker, "/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='amd64' publicKeyToken='6595b64144ccf1df' language='*'\"")
std::array<xxHashMap<XHOSTCONTENT_DATA>, 3> gContentRegistry{};
std::unordered_set<XamListener*> gListeners{};
xxHashMap<std::string> gRootMap;
std::string_view XamGetRootPath(const std::string_view& root)
{
const auto result = gRootMap.find(StringHash(root));
if (result == gRootMap.end())
{
return "";
}
return result->second;
}
void XamRootCreate(const std::string_view& root, const std::string_view& path)
{
gRootMap.emplace(StringHash(root), path);
}
XamListener::XamListener()
{
gListeners.insert(this);
}
XamListener::~XamListener()
{
gListeners.erase(this);
}
XCONTENT_DATA XamMakeContent(DWORD type, const std::string_view& name)
{
XCONTENT_DATA data{ 1, type };
strncpy(data.szFileName, name.data(), sizeof(data.szFileName));
return data;
}
void XamRegisterContent(const XCONTENT_DATA& data, const std::string_view& root)
{
const auto idx = data.dwContentType - 1;
gContentRegistry[idx].emplace(StringHash(data.szFileName), XHOSTCONTENT_DATA{ data }).first->second.szRoot = root;
}
void XamRegisterContent(DWORD type, const std::string_view name, const std::string_view& root)
{
XCONTENT_DATA data{ 1, type, {}, "" };
strncpy(data.szFileName, name.data(), sizeof(data.szFileName));
XamRegisterContent(data, root);
}
SWA_API DWORD XamNotifyCreateListener(uint64_t qwAreas)
{
int handle;
auto* listener = ObCreateObject<XamListener>(handle);
listener->areas = qwAreas;
return GUEST_HANDLE(handle);
}
SWA_API void XamNotifyEnqueueEvent(DWORD dwId, DWORD dwParam)
{
for (const auto& listener : gListeners)
{
if (((1 << MSG_AREA(dwId)) & listener->areas) == 0)
{
continue;
}
listener->notifications.emplace_back(dwId, dwParam);
}
}
SWA_API bool XNotifyGetNext(DWORD hNotification, DWORD dwMsgFilter, XDWORD* pdwId, XDWORD* pParam)
{
auto& listener = *ObTryQueryObject<XamListener>(HOST_HANDLE(hNotification));
if (dwMsgFilter)
{
for (size_t i = 0; i < listener.notifications.size(); i++)
{
if (std::get<0>(listener.notifications[i]) == dwMsgFilter)
{
if (pdwId)
{
*pdwId = std::get<0>(listener.notifications[i]);
}
if (pParam)
{
*pParam = std::get<1>(listener.notifications[i]);
}
listener.notifications.erase(listener.notifications.begin() + i);
return true;
}
}
}
else
{
if (listener.notifications.empty())
{
return false;
}
if (pdwId)
{
*pdwId = std::get<0>(listener.notifications[0]);
}
if (pParam)
{
*pParam = std::get<1>(listener.notifications[0]);
}
listener.notifications.erase(listener.notifications.begin());
return true;
}
return false;
}
SWA_API uint32_t XamShowMessageBoxUI(DWORD dwUserIndex, XWORD* wszTitle, XWORD* wszText, DWORD cButtons,
xpointer<XWORD>* pwszButtons, DWORD dwFocusButton, DWORD dwFlags, XLPDWORD pResult, XXOVERLAPPED* pOverlapped)
{
// printf("!!! STUB !!! XamShowMessageBoxUI\n");
std::vector<std::wstring> texts{};
std::vector<TASKDIALOG_BUTTON> buttons{};
texts.emplace_back(reinterpret_cast<wchar_t*>(wszTitle));
texts.emplace_back(reinterpret_cast<wchar_t*>(wszText));
for (size_t i = 0; i < cButtons; i++)
{
texts.emplace_back(reinterpret_cast<wchar_t*>(pwszButtons[i].get()));
}
for (auto& text : texts)
{
for (size_t i = 0; i < text.size(); i++)
{
ByteSwap(text[i]);
}
}
for (size_t i = 0; i < cButtons; i++)
{
buttons.emplace_back(i, texts[2 + i].c_str());
}
XamNotifyEnqueueEvent(9, 1);
TASKDIALOGCONFIG config{};
config.cbSize = sizeof(config);
// config.hwndParent = Window::s_hWnd;
config.pszWindowTitle = texts[0].c_str();
config.pszContent = texts[1].c_str();
config.cButtons = cButtons;
config.pButtons = buttons.data();
int button{};
TaskDialogIndirect(&config, &button, nullptr, nullptr);
*pResult = button;
if (pOverlapped)
{
pOverlapped->dwCompletionContext = GetCurrentThreadId();
pOverlapped->Error = 0;
pOverlapped->Length = -1;
}
XamNotifyEnqueueEvent(9, 0);
return 0;
}
SWA_API uint32_t XamContentCreateEnumerator(DWORD dwUserIndex, DWORD DeviceID, DWORD dwContentType,
DWORD dwContentFlags, DWORD cItem, XLPDWORD pcbBuffer, XLPDWORD phEnum)
{
if (dwUserIndex != 0)
{
GuestThread::SetLastError(ERROR_NO_SUCH_USER);
return 0xFFFFFFFF;
}
const auto& registry = gContentRegistry[dwContentType - 1];
const auto& values = registry | std::views::values;
const int handle = ObInsertObject(new XamEnumerator(cItem, sizeof(_XCONTENT_DATA), values.begin(), values.end()));
if (pcbBuffer)
{
*pcbBuffer = sizeof(_XCONTENT_DATA) * cItem;
}
*phEnum = GUEST_HANDLE(handle);
return 0;
}
SWA_API uint32_t XamEnumerate(uint32_t hEnum, DWORD dwFlags, PVOID pvBuffer, DWORD cbBuffer, XLPDWORD pcItemsReturned, XXOVERLAPPED* pOverlapped)
{
auto* enumerator = ObTryQueryObject<XamEnumeratorBase>(HOST_HANDLE(hEnum));
const auto count = enumerator->Next(pvBuffer);
if (count == -1)
{
return ERROR_NO_MORE_FILES;
}
if (pcItemsReturned)
{
*pcItemsReturned = count;
}
return ERROR_SUCCESS;
}
SWA_API uint32_t XamContentCreateEx(DWORD dwUserIndex, LPCSTR szRootName, const XCONTENT_DATA* pContentData,
DWORD dwContentFlags, XLPDWORD pdwDisposition, XLPDWORD pdwLicenseMask,
DWORD dwFileCacheSize, uint64_t uliContentSize, PXXOVERLAPPED pOverlapped)
{
// printf("!!! STUB !!! XamContentCreateEx\n");
const auto& registry = gContentRegistry[pContentData->dwContentType - 1];
const auto exists = registry.contains(StringHash(pContentData->szFileName));
const auto mode = dwContentFlags & 0xF;
if (mode == CREATE_ALWAYS)
{
if (pdwDisposition) *pdwDisposition = XCONTENT_NEW;
if (!exists)
{
const char* root = "";
if (pContentData->dwContentType == XCONTENTTYPE_SAVEDATA)
{
root = ".\\save";
}
else if (pContentData->dwContentType == XCONTENTTYPE_DLC)
{
root = ".\\dlc";
}
else
{
root = ".";
}
XamRegisterContent(*pContentData, root);
CreateDirectoryA(root, nullptr);
XamRootCreate(szRootName, root);
}
else
{
XamRootCreate(szRootName, registry.find(StringHash(pContentData->szFileName))->second.szRoot);
}
return ERROR_SUCCESS;
}
if (mode == OPEN_EXISTING)
{
if (exists)
{
if (pdwDisposition) *pdwDisposition = XCONTENT_EXISTING;
XamRootCreate(szRootName, registry.find(StringHash(pContentData->szFileName))->second.szRoot);
return ERROR_SUCCESS;
}
else
{
if (pdwDisposition) *pdwDisposition = XCONTENT_NEW;
return ERROR_PATH_NOT_FOUND;
}
}
return ERROR_PATH_NOT_FOUND;
}
SWA_API uint32_t XamContentClose(LPCSTR szRootName, XXOVERLAPPED* pOverlapped)
{
// printf("!!! STUB !!! XamContentClose %s\n", szRootName);
gRootMap.erase(StringHash(szRootName));
return 0;
}
SWA_API uint32_t XamContentGetDeviceData(DWORD DeviceID, XDEVICE_DATA* pDeviceData)
{
// printf("!!! STUB !!! XamContentGetDeviceData\n");
pDeviceData->DeviceID = DeviceID;
pDeviceData->DeviceType = XCONTENTDEVICETYPE_HDD;
pDeviceData->ulDeviceBytes = 0x10000000;
pDeviceData->ulDeviceFreeBytes = 0x10000000;
pDeviceData->wszName[0] = 'S';
pDeviceData->wszName[1] = 'o';
pDeviceData->wszName[2] = 'n';
pDeviceData->wszName[3] = 'i';
pDeviceData->wszName[4] = 'c';
pDeviceData->wszName[5] = '\0';
return 0;
}
SWA_API uint32_t XamInputGetCapabilities(uint32_t unk, uint32_t userIndex, uint32_t flags, XAMINPUT_CAPABILITIES* caps)
{
//printf("!!! STUB !!! XamInputGetCapabilities\n");
uint32_t result = hid::GetCapabilities(userIndex, caps);
if (result == ERROR_SUCCESS)
{
ByteSwap(caps->Flags);
ByteSwap(caps->Gamepad.wButtons);
ByteSwap(caps->Gamepad.sThumbLX);
ByteSwap(caps->Gamepad.sThumbLY);
ByteSwap(caps->Gamepad.sThumbRX);
ByteSwap(caps->Gamepad.sThumbRY);
ByteSwap(caps->Vibration.wLeftMotorSpeed);
ByteSwap(caps->Vibration.wRightMotorSpeed);
}
return result;
}
SWA_API uint32_t XamInputGetState(uint32_t userIndex, uint32_t flags, XAMINPUT_STATE* state)
{
//printf("!!! STUB !!! XamInputGetState\n");
uint32_t result = hid::GetState(userIndex, state);
if (result == ERROR_SUCCESS)
{
ByteSwap(state->dwPacketNumber);
ByteSwap(state->Gamepad.wButtons);
ByteSwap(state->Gamepad.sThumbLX);
ByteSwap(state->Gamepad.sThumbLY);
ByteSwap(state->Gamepad.sThumbRX);
ByteSwap(state->Gamepad.sThumbRY);
}
else if (userIndex == 0)
{
memset(state, 0, sizeof(*state));
if (GetAsyncKeyState('W') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_Y;
if (GetAsyncKeyState('A') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_X;
if (GetAsyncKeyState('S') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_A;
if (GetAsyncKeyState('D') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_B;
if (GetAsyncKeyState('Q') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_LEFT_SHOULDER;
if (GetAsyncKeyState('E') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_RIGHT_SHOULDER;
if (GetAsyncKeyState('1') & 0x8000)
state->Gamepad.bLeftTrigger = 0xFF;
if (GetAsyncKeyState('3') & 0x8000)
state->Gamepad.bRightTrigger = 0xFF;
if (GetAsyncKeyState('I') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_DPAD_UP;
if (GetAsyncKeyState('J') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_DPAD_LEFT;
if (GetAsyncKeyState('K') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_DPAD_DOWN;
if (GetAsyncKeyState('L') & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_DPAD_RIGHT;
if (GetAsyncKeyState(VK_UP) & 0x8000)
state->Gamepad.sThumbLY = 32767;
if (GetAsyncKeyState(VK_LEFT) & 0x8000)
state->Gamepad.sThumbLX = -32768;
if (GetAsyncKeyState(VK_DOWN) & 0x8000)
state->Gamepad.sThumbLY = -32768;
if (GetAsyncKeyState(VK_RIGHT) & 0x8000)
state->Gamepad.sThumbLX = 32767;
if (GetAsyncKeyState(VK_RETURN) & 0x8000)
state->Gamepad.wButtons |= XAMINPUT_GAMEPAD_START;
ByteSwap(state->Gamepad.wButtons);
ByteSwap(state->Gamepad.sThumbLX);
ByteSwap(state->Gamepad.sThumbLY);
ByteSwap(state->Gamepad.sThumbRX);
ByteSwap(state->Gamepad.sThumbRY);
result = ERROR_SUCCESS;
}
return result;
}
SWA_API uint32_t XamInputSetState(uint32_t userIndex, uint32_t flags, XAMINPUT_VIBRATION* vibration)
{
//printf("!!! STUB !!! XamInputSetState\n");
ByteSwap(vibration->wLeftMotorSpeed);
ByteSwap(vibration->wRightMotorSpeed);
return hid::SetState(userIndex, vibration);
}

View file

@ -0,0 +1,109 @@
#pragma once
#include <xbox.h>
#define MSGID(Area, Number) (DWORD)((WORD)(Area) << 16 | (WORD)(Number))
#define MSG_AREA(msgid) (((msgid) >> 16) & 0xFFFF)
#define MSG_NUMBER(msgid) ((msgid) & 0xFFFF)
struct XamListener
{
uint32_t id{};
uint64_t areas{};
std::vector<std::tuple<DWORD, DWORD>> notifications;
XamListener(const XamListener&) = delete;
XamListener& operator=(const XamListener&) = delete;
XamListener();
~XamListener();
};
class XamEnumeratorBase
{
public:
virtual ~XamEnumeratorBase() = default;
virtual uint32_t Next(void* buffer)
{
return -1;
}
};
template<typename TIterator = std::vector<XHOSTCONTENT_DATA>::iterator>
class XamEnumerator : public XamEnumeratorBase
{
public:
uint32_t fetch;
size_t size;
TIterator position;
TIterator begin;
TIterator end;
XamEnumerator() = default;
XamEnumerator(uint32_t fetch, size_t size, TIterator begin, TIterator end) : fetch(fetch), size(size), position(begin), begin(begin), end(end)
{
}
uint32_t Next(void* buffer) override
{
if (position == end)
{
return -1;
}
if (buffer == nullptr)
{
for (size_t i = 0; i < fetch; i++)
{
if (position == end)
{
return i == 0 ? -1 : i;
}
++position;
}
}
for (size_t i = 0; i < fetch; i++)
{
if (position == end)
{
return i == 0 ? -1 : i;
}
memcpy(buffer, &*position, size);
++position;
buffer = (void*)((size_t)buffer + size);
}
return fetch;
}
};
XCONTENT_DATA XamMakeContent(DWORD type, const std::string_view& name);
void XamRegisterContent(const XCONTENT_DATA& data, const std::string_view& root);
std::string_view XamGetRootPath(const std::string_view& root);
void XamRootCreate(const std::string_view& root, const std::string_view& path);
SWA_API DWORD XamNotifyCreateListener(uint64_t qwAreas);
SWA_API void XamNotifyEnqueueEvent(DWORD dwId, DWORD dwParam); // i made it the fuck up
SWA_API bool XNotifyGetNext(DWORD hNotification, DWORD dwMsgFilter, XDWORD* pdwId, XDWORD* pParam);
SWA_API uint32_t XamShowMessageBoxUI(DWORD dwUserIndex, XWORD* wszTitle, XWORD* wszText, DWORD cButtons,
xpointer<XWORD>* pwszButtons, DWORD dwFocusButton, DWORD dwFlags, XLPDWORD pResult, XXOVERLAPPED* pOverlapped);
SWA_API uint32_t XamContentCreateEnumerator(DWORD dwUserIndex, DWORD DeviceID, DWORD dwContentType,
DWORD dwContentFlags, DWORD cItem, XLPDWORD pcbBuffer, XLPDWORD phEnum);
SWA_API uint32_t XamEnumerate(uint32_t hEnum, DWORD dwFlags, PVOID pvBuffer, DWORD cbBuffer, XLPDWORD pcItemsReturned, XXOVERLAPPED* pOverlapped);
SWA_API uint32_t XamContentCreateEx(DWORD dwUserIndex, LPCSTR szRootName, const XCONTENT_DATA* pContentData,
DWORD dwContentFlags, XLPDWORD pdwDisposition, XLPDWORD pdwLicenseMask,
DWORD dwFileCacheSize, uint64_t uliContentSize, PXXOVERLAPPED pOverlapped);
SWA_API uint32_t XamContentGetDeviceData(DWORD DeviceID, XDEVICE_DATA* pDeviceData);
SWA_API uint32_t XamContentClose(LPCSTR szRootName, XXOVERLAPPED* pOverlapped);
SWA_API uint32_t XamInputGetCapabilities(uint32_t unk, uint32_t userIndex, uint32_t flags, XAMINPUT_CAPABILITIES* caps);
SWA_API uint32_t XamInputGetState(uint32_t userIndex, uint32_t flags, XAMINPUT_STATE* state);
SWA_API uint32_t XamInputSetState(uint32_t userIndex, uint32_t flags, XAMINPUT_VIBRATION* vibration);

View file

@ -0,0 +1,49 @@
#include <stdafx.h>
#include "xdm.h"
#include "FreeList.h"
FreeList<std::tuple<std::unique_ptr<char>, TypeDestructor_t>> gKernelObjects;
Mutex gKernelLock;
void* ObQueryObject(size_t handle)
{
std::lock_guard guard{ gKernelLock };
if (handle >= gKernelObjects.items.size())
{
return nullptr;
}
return std::get<0>(gKernelObjects[handle]).get();
}
uint32_t ObInsertObject(void* object, TypeDestructor_t destructor)
{
std::lock_guard guard{ gKernelLock };
const auto handle = gKernelObjects.Alloc();
auto& holder = gKernelObjects[handle];
std::get<0>(holder).reset(static_cast<char*>(object));
std::get<1>(holder) = destructor;
return handle;
}
void ObCloseHandle(uint32_t handle)
{
std::lock_guard guard{ gKernelLock };
auto& obj = gKernelObjects[handle];
if (std::get<1>(obj)(std::get<0>(obj).get()))
{
std::get<0>(obj).reset();
}
else
{
std::get<0>(obj).release();
}
gKernelObjects.Free(handle);
}

View file

@ -0,0 +1,56 @@
#pragma once
#define DUMMY_HANDLE (DWORD)('HAND')
#define OBJECT_SIGNATURE (DWORD)'XBOX'
extern Mutex gKernelLock;
void* ObQueryObject(size_t handle);
uint32_t ObInsertObject(void* object, TypeDestructor_t destructor);
void ObCloseHandle(uint32_t handle);
template<typename T>
T* ObQueryObject(XDISPATCHER_HEADER& header)
{
std::lock_guard guard{ gKernelLock };
if (header.WaitListHead.Flink != OBJECT_SIGNATURE)
{
header.WaitListHead.Flink = OBJECT_SIGNATURE;
auto* obj = new T(reinterpret_cast<typename T::guest_type*>(&header));
header.WaitListHead.Blink = ObInsertObject(obj, DestroyObject<T>);
return obj;
}
return static_cast<T*>(ObQueryObject(header.WaitListHead.Blink.get()));
}
template<typename T>
size_t ObInsertObject(T* object)
{
return ObInsertObject(object, DestroyObject<T>);
}
template<typename T>
T* ObCreateObject(int& handle)
{
auto* obj = new T();
handle = ::ObInsertObject(obj, DestroyObject<T>);
return obj;
}
// Get object without initialisation
template<typename T>
T* ObTryQueryObject(XDISPATCHER_HEADER& header)
{
if (header.WaitListHead.Flink != OBJECT_SIGNATURE)
return nullptr;
return static_cast<T*>(ObQueryObject(header.WaitListHead.Blink));
}
template<typename T>
T* ObTryQueryObject(int handle)
{
return static_cast<T*>(ObQueryObject(handle));
}

104
UnleashedRecomp/main.cpp Normal file
View file

@ -0,0 +1,104 @@
#include <stdafx.h>
#include <cpu/code_cache.h>
#include <cpu/guest_thread.h>
#include <kernel/function.h>
#include <kernel/memory.h>
#include <kernel/heap.h>
#include <kernel/xam.h>
#include <kernel/io/file_system.h>
#include <file.h>
#include <xex.h>
#define GAME_XEX_PATH "game:\\default.xex"
const size_t XMAIOBegin = 0x7FEA0000;
const size_t XMAIOEnd = XMAIOBegin + 0x0000FFFF;
Memory gMemory{ reinterpret_cast<void*>(0x100000000), 0x100000000 };
Heap gUserHeap;
CodeCache gCodeCache;
int main()
{
#ifdef _WIN32
CoInitializeEx(nullptr, COINIT_MULTITHREADED);
#endif
gMemory.Alloc(0x10000, 0x1000, MEM_COMMIT);
gUserHeap.Init();
gCodeCache.Init();
gMemory.Alloc(XMAIOBegin, 0xFFFF, MEM_COMMIT);
const auto gameContent = XamMakeContent(XCONTENTTYPE_RESERVED, "Game");
const auto updateContent = XamMakeContent(XCONTENTTYPE_RESERVED, "Update");
XamRegisterContent(gameContent, DirectoryExists(".\\game") ? ".\\game" : ".");
XamRegisterContent(updateContent, ".\\update");
if (FileExists(".\\save\\SYS-DATA"))
{
XamRegisterContent(XamMakeContent(XCONTENTTYPE_SAVEDATA, "SYS-DATA"), ".\\save");
}
else if (FileExists(".\\SYS-DATA"))
{
XamRegisterContent(XamMakeContent(XCONTENTTYPE_SAVEDATA, "SYS-DATA"), ".");
}
// Mount game
XamContentCreateEx(0, "game", &gameContent, OPEN_EXISTING, nullptr, nullptr, 0, 0, nullptr);
XamContentCreateEx(0, "update", &updateContent, OPEN_EXISTING, nullptr, nullptr, 0, 0, nullptr);
// OS mounts game data to D:
XamContentCreateEx(0, "D", &gameContent, OPEN_EXISTING, nullptr, nullptr, 0, 0, nullptr);
auto loadResult = LoadFile(FileSystem::TransformPath(GAME_XEX_PATH));
if (!loadResult.has_value())
{
assert("Failed to load default.xex" && false);
return 1;
}
auto* xex = reinterpret_cast<XEX_HEADER*>(loadResult->data());
auto headers = reinterpret_cast<XEX_OPTIONAL_HEADER*>(&xex[1]);
auto security = reinterpret_cast<XEX2_SECURITY_INFO*>((char*)xex + xex->AddressOfSecurityInfo);
gMemory.Alloc(security->ImageBase, security->SizeOfImage, MEM_COMMIT);
auto format = Xex2FindOptionalHeader<XEX_FILE_FORMAT_INFO>(xex, XEX_HEADER_FILE_FORMAT_INFO);
auto entry = *Xex2FindOptionalHeader<uint32_t>(xex, XEX_HEADER_ENTRY_POINT);
ByteSwap(entry);
assert(format->CompressionType >= 1);
if (format->CompressionType == 1)
{
auto srcData = (char*)xex + xex->SizeOfHeader;
auto destData = (char*)gMemory.Translate(security->ImageBase);
auto numBlocks = (format->SizeOfHeader / sizeof(XEX_BASIC_FILE_COMPRESSION_INFO)) - 1;
auto blocks = reinterpret_cast<const XEX_BASIC_FILE_COMPRESSION_INFO*>(format + 1);
for (size_t i = 0; i < numBlocks; i++)
{
memcpy(destData, srcData, blocks[i].SizeOfData);
srcData += blocks[i].SizeOfData;
destData += blocks[i].SizeOfData;
memset(destData, 0, blocks[i].SizeOfPadding);
destData += blocks[i].SizeOfPadding;
}
}
GuestThread::Start(entry);
return 0;
}
GUEST_FUNCTION_STUB(__imp__vsprintf);
GUEST_FUNCTION_STUB(__imp___vsnprintf);
GUEST_FUNCTION_STUB(__imp__sprintf);
GUEST_FUNCTION_STUB(__imp___snprintf);
GUEST_FUNCTION_STUB(__imp___snwprintf);
GUEST_FUNCTION_STUB(__imp__vswprintf);
GUEST_FUNCTION_STUB(__imp___vscwprintf);
GUEST_FUNCTION_STUB(__imp__swprintf);

1
UnleashedRecomp/ppc/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
ppc_*

15
UnleashedRecomp/stdafx.h Normal file
View file

@ -0,0 +1,15 @@
#pragma once
#define NOMINMAX
#include <windows.h>
#include <mutex>
#include <vector>
#include <string>
#include <cassert>
#include <xbox.h>
#include <xxhash.h>
#include <ankerl/unordered_dense.h>
#include "framework.h"
#include "Mutex.h"
#include "Config.h"

View file

@ -0,0 +1,14 @@
#pragma once
struct xxHash
{
using is_avalanching = void;
uint64_t operator()(XXH64_hash_t const& x) const noexcept
{
return x;
}
};
template<typename T>
using xxHashMap = ankerl::unordered_dense::map<XXH64_hash_t, T, xxHash>;

8
thirdparty/.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
!*
# Visual Studio 2015/2017 cache/options directory
.vs/
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/

18
thirdparty/CMakeLists.txt vendored Normal file
View file

@ -0,0 +1,18 @@
include(FetchContent)
FetchContent_Declare(
unordered_dense
GIT_REPOSITORY https://github.com/martinus/unordered_dense.git
GIT_TAG main
)
FetchContent_Declare(
xxHash
GIT_REPOSITORY https://github.com/Cyan4973/xxHash.git
GIT_TAG v0.8.2
SOURCE_SUBDIR "cmake_unofficial"
)
FetchContent_MakeAvailable(unordered_dense)
FetchContent_MakeAvailable(xxHash)
add_subdirectory(${SWA_THIRDPARTY_ROOT}/PowerRecomp)
add_subdirectory(${SWA_THIRDPARTY_ROOT}/o1heap)

1
thirdparty/PowerRecomp vendored Submodule

@ -0,0 +1 @@
Subproject commit c4de70262f0bc6e44c95df99772c136d1bdd71cc

4
thirdparty/o1heap/CMakeLists.txt vendored Normal file
View file

@ -0,0 +1,4 @@
project("o1heap")
add_library(o1heap "o1heap.h" "o1heap.c")
target_include_directories(o1heap PUBLIC ".")

497
thirdparty/o1heap/o1heap.c vendored Normal file
View file

@ -0,0 +1,497 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
#include "o1heap.h"
#include <assert.h>
#include <limits.h>
// ---------------------------------------- BUILD CONFIGURATION OPTIONS ----------------------------------------
/// Define this macro to include build configuration header. This is an alternative to the -D compiler flag.
/// Usage example with CMake: "-DO1HEAP_CONFIG_HEADER=\"${CMAKE_CURRENT_SOURCE_DIR}/my_o1heap_config.h\""
#ifdef O1HEAP_CONFIG_HEADER
# include O1HEAP_CONFIG_HEADER
#endif
/// The assertion macro defaults to the standard assert().
/// It can be overridden to manually suppress assertion checks or use a different error handling policy.
#ifndef O1HEAP_ASSERT
// Intentional violation of MISRA: the assertion check macro cannot be replaced with a function definition.
# define O1HEAP_ASSERT(x) assert(x) // NOSONAR
#endif
/// Allow usage of compiler intrinsics for branch annotation and CLZ.
#ifndef O1HEAP_USE_INTRINSICS
# define O1HEAP_USE_INTRINSICS 1
#endif
/// Branch probability annotations are used to improve the worst case execution time (WCET). They are entirely optional.
#if O1HEAP_USE_INTRINSICS && !defined(O1HEAP_LIKELY)
# if defined(__GNUC__) || defined(__clang__) || defined(__CC_ARM)
// Intentional violation of MISRA: branch hinting macro cannot be replaced with a function definition.
# define O1HEAP_LIKELY(x) __builtin_expect((x), 1) // NOSONAR
# endif
#endif
#ifndef O1HEAP_LIKELY
# define O1HEAP_LIKELY(x) x
#endif
/// This option is used for testing only. Do not use in production.
#ifndef O1HEAP_PRIVATE
# define O1HEAP_PRIVATE static inline
#endif
/// Count leading zeros (CLZ) is used for fast computation of binary logarithm (which needs to be done very often).
/// Most of the modern processors (including the embedded ones) implement dedicated hardware support for fast CLZ
/// computation, which is available via compiler intrinsics. The default implementation will automatically use
/// the intrinsics for some of the compilers; for others it will default to the slow software emulation,
/// which can be overridden by the user via O1HEAP_CONFIG_HEADER. The library guarantees that the argument is positive.
#if O1HEAP_USE_INTRINSICS && !defined(O1HEAP_CLZ)
# if defined(__GNUC__) || defined(__clang__) || defined(__CC_ARM)
# if SIZE_MAX == 0xFFFFFFFFFFFFFFFF
# define O1HEAP_CLZ __builtin_clzll
# else
# define O1HEAP_CLZ __builtin_clzl
# endif
# endif
#endif
#ifndef O1HEAP_CLZ
O1HEAP_PRIVATE uint_fast8_t O1HEAP_CLZ(const size_t x)
{
O1HEAP_ASSERT(x > 0);
size_t t = ((size_t)1U) << ((sizeof(size_t) * CHAR_BIT) - 1U);
uint_fast8_t r = 0;
while ((x & t) == 0)
{
t >>= 1U;
r++;
}
return r;
}
#endif
// ---------------------------------------- INTERNAL DEFINITIONS ----------------------------------------
#if __STDC_VERSION__ < 201112L
// Intentional violation of MISRA: static assertion macro cannot be replaced with a function definition.
# define static_assert(x, ...) typedef char _static_assert_gl(_static_assertion_, __LINE__)[(x) ? 1 : -1] // NOSONAR
# define _static_assert_gl(a, b) _static_assert_gl_impl(a, b) // NOSONAR
// Intentional violation of MISRA: the paste operator ## cannot be avoided in this context.
# define _static_assert_gl_impl(a, b) a##b // NOSONAR
#endif
/// The overhead is at most O1HEAP_ALIGNMENT bytes large,
/// then follows the user data which shall keep the next fragment aligned.
#define FRAGMENT_SIZE_MIN (O1HEAP_ALIGNMENT * 2U)
/// This is risky, handle with care: if the allocation amount plus per-fragment overhead exceeds 2**(b-1),
/// where b is the pointer bit width, then ceil(log2(amount)) yields b; then 2**b causes an integer overflow.
/// To avoid this, we put a hard limit on fragment size (which is amount + per-fragment overhead): 2**(b-1)
#define FRAGMENT_SIZE_MAX ((SIZE_MAX >> 1U) + 1U)
/// Normally we should subtract log2(FRAGMENT_SIZE_MIN) but log2 is bulky to compute using the preprocessor only.
/// We will certainly end up with unused bins this way, but it is cheap to ignore.
#define NUM_BINS_MAX (sizeof(size_t) * CHAR_BIT)
static_assert((O1HEAP_ALIGNMENT& (O1HEAP_ALIGNMENT - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MIN& (FRAGMENT_SIZE_MIN - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MAX& (FRAGMENT_SIZE_MAX - 1U)) == 0U, "Not a power of 2");
typedef struct Fragment Fragment;
typedef struct FragmentHeader
{
Fragment* next;
Fragment* prev;
size_t size;
bool used;
} FragmentHeader;
static_assert(sizeof(FragmentHeader) <= O1HEAP_ALIGNMENT, "Memory layout error");
struct Fragment
{
FragmentHeader header;
// Everything past the header may spill over into the allocatable space. The header survives across alloc/free.
Fragment* next_free; // Next free fragment in the bin; NULL in the last one.
Fragment* prev_free; // Same but points back; NULL in the first one.
};
static_assert(sizeof(Fragment) <= FRAGMENT_SIZE_MIN, "Memory layout error");
struct O1HeapInstance
{
Fragment* bins[NUM_BINS_MAX]; ///< Smallest fragments are in the bin at index 0.
size_t nonempty_bin_mask; ///< Bit 1 represents a non-empty bin; bin at index 0 is for the smallest fragments.
O1HeapDiagnostics diagnostics;
};
/// The amount of space allocated for the heap instance.
/// Its size is padded up to O1HEAP_ALIGNMENT to ensure correct alignment of the allocation arena that follows.
#define INSTANCE_SIZE_PADDED ((sizeof(O1HeapInstance) + O1HEAP_ALIGNMENT - 1U) & ~(O1HEAP_ALIGNMENT - 1U))
static_assert(INSTANCE_SIZE_PADDED >= sizeof(O1HeapInstance), "Invalid instance footprint computation");
static_assert((INSTANCE_SIZE_PADDED% O1HEAP_ALIGNMENT) == 0U, "Invalid instance footprint computation");
/// Undefined for zero argument.
O1HEAP_PRIVATE uint_fast8_t log2Floor(const size_t x)
{
O1HEAP_ASSERT(x > 0);
// NOLINTNEXTLINE redundant cast to the same type.
return (uint_fast8_t)(((sizeof(x) * CHAR_BIT) - 1U) - ((uint_fast8_t)O1HEAP_CLZ(x)));
}
/// Special case: if the argument is zero, returns zero.
O1HEAP_PRIVATE uint_fast8_t log2Ceil(const size_t x)
{
// NOLINTNEXTLINE redundant cast to the same type.
return (x <= 1U) ? 0U : (uint_fast8_t)((sizeof(x) * CHAR_BIT) - ((uint_fast8_t)O1HEAP_CLZ(x - 1U)));
}
/// Raise 2 into the specified power.
/// You might be tempted to do something like (1U << power). WRONG! We humans are prone to forgetting things.
/// If you forget to cast your 1U to size_t or ULL, you may end up with undefined behavior.
O1HEAP_PRIVATE size_t pow2(const uint_fast8_t power)
{
return ((size_t)1U) << power;
}
/// This is equivalent to pow2(log2Ceil(x)). Undefined for x<2.
O1HEAP_PRIVATE size_t roundUpToPowerOf2(const size_t x)
{
O1HEAP_ASSERT(x >= 2U);
// NOLINTNEXTLINE redundant cast to the same type.
return ((size_t)1U) << ((sizeof(x) * CHAR_BIT) - ((uint_fast8_t)O1HEAP_CLZ(x - 1U)));
}
/// Links two fragments so that their next/prev pointers point to each other; left goes before right.
O1HEAP_PRIVATE void interlink(Fragment* const left, Fragment* const right)
{
if (O1HEAP_LIKELY(left != NULL))
{
left->header.next = right;
}
if (O1HEAP_LIKELY(right != NULL))
{
right->header.prev = left;
}
}
/// Adds a new fragment into the appropriate bin and updates the lookup mask.
O1HEAP_PRIVATE void rebin(O1HeapInstance* const handle, Fragment* const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint_fast8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when inserting.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Add the new fragment to the beginning of the bin list.
// I.e., each allocation will be returning the most-recently-used fragment -- good for caching.
fragment->next_free = handle->bins[idx];
fragment->prev_free = NULL;
if (O1HEAP_LIKELY(handle->bins[idx] != NULL))
{
handle->bins[idx]->prev_free = fragment;
}
handle->bins[idx] = fragment;
handle->nonempty_bin_mask |= pow2(idx);
}
/// Removes the specified fragment from its bin.
O1HEAP_PRIVATE void unbin(O1HeapInstance* const handle, const Fragment* const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint_fast8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when removing.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Remove the bin from the free fragment list.
if (O1HEAP_LIKELY(fragment->next_free != NULL))
{
fragment->next_free->prev_free = fragment->prev_free;
}
if (O1HEAP_LIKELY(fragment->prev_free != NULL))
{
fragment->prev_free->next_free = fragment->next_free;
}
// Update the bin header.
if (O1HEAP_LIKELY(handle->bins[idx] == fragment))
{
O1HEAP_ASSERT(fragment->prev_free == NULL);
handle->bins[idx] = fragment->next_free;
if (O1HEAP_LIKELY(handle->bins[idx] == NULL))
{
handle->nonempty_bin_mask &= ~pow2(idx);
}
}
}
// ---------------------------------------- PUBLIC API IMPLEMENTATION ----------------------------------------
O1HeapInstance* o1heapInit(void* const base, const size_t size)
{
O1HeapInstance* out = NULL;
if ((base != NULL) && ((((size_t)base) % O1HEAP_ALIGNMENT) == 0U) &&
(size >= (INSTANCE_SIZE_PADDED + FRAGMENT_SIZE_MIN)))
{
// Allocate the core heap metadata structure in the beginning of the arena.
O1HEAP_ASSERT(((size_t)base) % sizeof(O1HeapInstance*) == 0U);
out = (O1HeapInstance*)base;
out->nonempty_bin_mask = 0U;
for (size_t i = 0; i < NUM_BINS_MAX; i++)
{
out->bins[i] = NULL;
}
// Limit and align the capacity.
size_t capacity = size - INSTANCE_SIZE_PADDED;
if (capacity > FRAGMENT_SIZE_MAX)
{
capacity = FRAGMENT_SIZE_MAX;
}
while ((capacity % FRAGMENT_SIZE_MIN) != 0)
{
O1HEAP_ASSERT(capacity > 0U);
capacity--;
}
O1HEAP_ASSERT((capacity % FRAGMENT_SIZE_MIN) == 0);
O1HEAP_ASSERT((capacity >= FRAGMENT_SIZE_MIN) && (capacity <= FRAGMENT_SIZE_MAX));
// Initialize the root fragment.
Fragment* const frag = (Fragment*)(void*)(((char*)base) + INSTANCE_SIZE_PADDED);
O1HEAP_ASSERT((((size_t)frag) % O1HEAP_ALIGNMENT) == 0U);
frag->header.next = NULL;
frag->header.prev = NULL;
frag->header.size = capacity;
frag->header.used = false;
frag->next_free = NULL;
frag->prev_free = NULL;
rebin(out, frag);
O1HEAP_ASSERT(out->nonempty_bin_mask != 0U);
// Initialize the diagnostics.
out->diagnostics.capacity = capacity;
out->diagnostics.allocated = 0U;
out->diagnostics.peak_allocated = 0U;
out->diagnostics.peak_request_size = 0U;
out->diagnostics.oom_count = 0U;
}
return out;
}
void* o1heapAllocate(O1HeapInstance* const handle, const size_t amount)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
void* out = NULL;
// If the amount approaches approx. SIZE_MAX/2, an undetected integer overflow may occur.
// To avoid that, we do not attempt allocation if the amount exceeds the hard limit.
// We perform multiple redundant checks to account for a possible unaccounted overflow.
if (O1HEAP_LIKELY((amount > 0U) && (amount <= (handle->diagnostics.capacity - O1HEAP_ALIGNMENT))))
{
// Add the header size and align the allocation size to the power of 2.
// See "Timing-Predictable Memory Allocation In Hard Real-Time Systems", Herter, page 27.
const size_t fragment_size = roundUpToPowerOf2(amount + O1HEAP_ALIGNMENT);
O1HEAP_ASSERT(fragment_size <= FRAGMENT_SIZE_MAX);
O1HEAP_ASSERT(fragment_size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(fragment_size >= amount + O1HEAP_ALIGNMENT);
O1HEAP_ASSERT((fragment_size & (fragment_size - 1U)) == 0U); // Is power of 2.
const uint_fast8_t optimal_bin_index = log2Ceil(fragment_size / FRAGMENT_SIZE_MIN); // Use CEIL when fetching.
O1HEAP_ASSERT(optimal_bin_index < NUM_BINS_MAX);
const size_t candidate_bin_mask = ~(pow2(optimal_bin_index) - 1U);
// Find the smallest non-empty bin we can use.
const size_t suitable_bins = handle->nonempty_bin_mask & candidate_bin_mask;
const size_t smallest_bin_mask = suitable_bins & ~(suitable_bins - 1U); // Clear all bits but the lowest.
if (O1HEAP_LIKELY(smallest_bin_mask != 0))
{
O1HEAP_ASSERT((smallest_bin_mask & (smallest_bin_mask - 1U)) == 0U); // Is power of 2.
const uint_fast8_t bin_index = log2Floor(smallest_bin_mask);
O1HEAP_ASSERT(bin_index >= optimal_bin_index);
O1HEAP_ASSERT(bin_index < NUM_BINS_MAX);
// The bin we found shall not be empty, otherwise it's a state divergence (memory corruption?).
Fragment* const frag = handle->bins[bin_index];
O1HEAP_ASSERT(frag != NULL);
O1HEAP_ASSERT(frag->header.size >= fragment_size);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
O1HEAP_ASSERT(!frag->header.used);
unbin(handle, frag);
// Split the fragment if it is too large.
const size_t leftover = frag->header.size - fragment_size;
frag->header.size = fragment_size;
O1HEAP_ASSERT(leftover < handle->diagnostics.capacity); // Overflow check.
O1HEAP_ASSERT(leftover % FRAGMENT_SIZE_MIN == 0U); // Alignment check.
if (O1HEAP_LIKELY(leftover >= FRAGMENT_SIZE_MIN))
{
Fragment* const new_frag = (Fragment*)(void*)(((char*)frag) + fragment_size);
O1HEAP_ASSERT(((size_t)new_frag) % O1HEAP_ALIGNMENT == 0U);
new_frag->header.size = leftover;
new_frag->header.used = false;
interlink(new_frag, frag->header.next);
interlink(frag, new_frag);
rebin(handle, new_frag);
}
// Update the diagnostics.
O1HEAP_ASSERT((handle->diagnostics.allocated % FRAGMENT_SIZE_MIN) == 0U);
handle->diagnostics.allocated += fragment_size;
O1HEAP_ASSERT(handle->diagnostics.allocated <= handle->diagnostics.capacity);
if (O1HEAP_LIKELY(handle->diagnostics.peak_allocated < handle->diagnostics.allocated))
{
handle->diagnostics.peak_allocated = handle->diagnostics.allocated;
}
// Finalize the fragment we just allocated.
O1HEAP_ASSERT(frag->header.size >= amount + O1HEAP_ALIGNMENT);
frag->header.used = true;
out = ((char*)frag) + O1HEAP_ALIGNMENT;
}
}
// Update the diagnostics.
if (O1HEAP_LIKELY(handle->diagnostics.peak_request_size < amount))
{
handle->diagnostics.peak_request_size = amount;
}
if (O1HEAP_LIKELY((out == NULL) && (amount > 0U)))
{
handle->diagnostics.oom_count++;
}
return out;
}
void o1heapFree(O1HeapInstance* const handle, void* const pointer)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
if (O1HEAP_LIKELY(pointer != NULL)) // NULL pointer is a no-op.
{
Fragment* const frag = (Fragment*)(void*)(((char*)pointer) - O1HEAP_ALIGNMENT);
// Check for heap corruption in debug builds.
O1HEAP_ASSERT(((size_t)frag) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(((size_t)frag) >= (((size_t)handle) + INSTANCE_SIZE_PADDED));
O1HEAP_ASSERT(((size_t)frag) <=
(((size_t)handle) + INSTANCE_SIZE_PADDED + handle->diagnostics.capacity - FRAGMENT_SIZE_MIN));
O1HEAP_ASSERT(frag->header.used); // Catch double-free
O1HEAP_ASSERT(((size_t)frag->header.next) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(((size_t)frag->header.prev) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(frag->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(frag->header.size <= handle->diagnostics.capacity);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
// Even if we're going to drop the fragment later, mark it free anyway to prevent double-free.
frag->header.used = false;
// Update the diagnostics. It must be done before merging because it invalidates the fragment size information.
O1HEAP_ASSERT(handle->diagnostics.allocated >= frag->header.size); // Heap corruption check.
handle->diagnostics.allocated -= frag->header.size;
// Merge with siblings and insert the returned fragment into the appropriate bin and update metadata.
Fragment* const prev = frag->header.prev;
Fragment* const next = frag->header.next;
const bool join_left = (prev != NULL) && (!prev->header.used);
const bool join_right = (next != NULL) && (!next->header.used);
if (join_left && join_right) // [ prev ][ this ][ next ] => [ ------- prev ------- ]
{
unbin(handle, prev);
unbin(handle, next);
prev->header.size += frag->header.size + next->header.size;
frag->header.size = 0; // Invalidate the dropped fragment headers to prevent double-free.
next->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next->header.next);
rebin(handle, prev);
}
else if (join_left) // [ prev ][ this ][ next ] => [ --- prev --- ][ next ]
{
unbin(handle, prev);
prev->header.size += frag->header.size;
frag->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next);
rebin(handle, prev);
}
else if (join_right) // [ prev ][ this ][ next ] => [ prev ][ --- this --- ]
{
unbin(handle, next);
frag->header.size += next->header.size;
next->header.size = 0;
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(frag, next->header.next);
rebin(handle, frag);
}
else
{
rebin(handle, frag);
}
}
}
bool o1heapDoInvariantsHold(const O1HeapInstance* const handle)
{
O1HEAP_ASSERT(handle != NULL);
bool valid = true;
// Check the bin mask consistency.
for (size_t i = 0; i < NUM_BINS_MAX; i++) // Dear compiler, feel free to unroll this loop.
{
const bool mask_bit_set = (handle->nonempty_bin_mask & pow2((uint_fast8_t)i)) != 0U;
const bool bin_nonempty = handle->bins[i] != NULL;
valid = valid && (mask_bit_set == bin_nonempty);
}
// Create a local copy of the diagnostics struct.
const O1HeapDiagnostics diag = handle->diagnostics;
// Capacity check.
valid = valid && (diag.capacity <= FRAGMENT_SIZE_MAX) && (diag.capacity >= FRAGMENT_SIZE_MIN) &&
((diag.capacity % FRAGMENT_SIZE_MIN) == 0U);
// Allocation info check.
valid = valid && (diag.allocated <= diag.capacity) && ((diag.allocated % FRAGMENT_SIZE_MIN) == 0U) &&
(diag.peak_allocated <= diag.capacity) && (diag.peak_allocated >= diag.allocated) &&
((diag.peak_allocated % FRAGMENT_SIZE_MIN) == 0U);
// Peak request check
valid = valid && ((diag.peak_request_size < diag.capacity) || (diag.oom_count > 0U));
if (diag.peak_request_size == 0U)
{
valid = valid && (diag.peak_allocated == 0U) && (diag.allocated == 0U) && (diag.oom_count == 0U);
}
else
{
valid = valid && // Overflow on summation is possible but safe to ignore.
(((diag.peak_request_size + O1HEAP_ALIGNMENT) <= diag.peak_allocated) || (diag.oom_count > 0U));
}
return valid;
}
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance* const handle)
{
O1HEAP_ASSERT(handle != NULL);
const O1HeapDiagnostics out = handle->diagnostics;
return out;
}

122
thirdparty/o1heap/o1heap.h vendored Normal file
View file

@ -0,0 +1,122 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
//
// READ THE DOCUMENTATION IN README.md.
#ifndef O1HEAP_H_INCLUDED
#define O1HEAP_H_INCLUDED
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/// The semantic version number of this distribution.
#define O1HEAP_VERSION_MAJOR 2
/// The guaranteed alignment depends on the platform pointer width.
#define O1HEAP_ALIGNMENT (sizeof(void*) * 4U)
/// The definition is private, so the user code can only operate on pointers. This is done to enforce encapsulation.
typedef struct O1HeapInstance O1HeapInstance;
/// Runtime diagnostic information. This information can be used to facilitate runtime self-testing,
/// as required by certain safety-critical development guidelines.
/// If assertion checks are not disabled, the library will perform automatic runtime self-diagnostics that trigger
/// an assertion failure if a heap corruption is detected.
/// Health checks and validation can be done with o1heapDoInvariantsHold().
typedef struct
{
/// The total amount of memory available for serving allocation requests (heap size).
/// The maximum allocation size is (capacity - O1HEAP_ALIGNMENT).
/// This parameter does not include the overhead used up by O1HeapInstance and arena alignment.
/// This parameter is constant.
size_t capacity;
/// The amount of memory that is currently allocated, including the per-fragment overhead and size alignment.
/// For example, if the application requested a fragment of size 1 byte, the value reported here may be 32 bytes.
size_t allocated;
/// The maximum value of 'allocated' seen since initialization. This parameter is never decreased.
size_t peak_allocated;
/// The largest amount of memory that the allocator has attempted to allocate (perhaps unsuccessfully)
/// since initialization (not including the rounding and the allocator's own per-fragment overhead,
/// so the total is larger). This parameter is never decreased. The initial value is zero.
size_t peak_request_size;
/// The number of times an allocation request could not be completed due to the lack of memory or
/// excessive fragmentation. OOM stands for "out of memory". This parameter is never decreased.
uint64_t oom_count;
} O1HeapDiagnostics;
/// The arena base pointer shall be aligned at O1HEAP_ALIGNMENT, otherwise NULL is returned.
///
/// The total heap capacity cannot exceed approx. (SIZE_MAX/2). If the arena size allows for a larger heap,
/// the excess will be silently truncated away (no error). This is not a realistic use case because a typical
/// application is unlikely to be able to dedicate that much of the address space for the heap.
///
/// The function initializes a new heap instance allocated in the provided arena, taking some of its space for its
/// own needs (normally about 40..600 bytes depending on the architecture, but this parameter is not characterized).
/// A pointer to the newly initialized instance is returned.
///
/// If the provided space is insufficient, NULL is returned.
///
/// An initialized instance does not hold any resources. Therefore, if the instance is no longer needed,
/// it can be discarded without any de-initialization procedures.
///
/// The heap is not thread-safe; external synchronization may be required.
O1HeapInstance* o1heapInit(void* const base, const size_t size);
/// The semantics follows malloc() with additional guarantees the full list of which is provided below.
///
/// If the allocation request is served successfully, a pointer to the newly allocated memory fragment is returned.
/// The returned pointer is guaranteed to be aligned at O1HEAP_ALIGNMENT.
///
/// If the allocation request cannot be served due to the lack of memory or its excessive fragmentation,
/// a NULL pointer is returned.
///
/// The function is executed in constant time.
/// The allocated memory is NOT zero-filled (because zero-filling is a variable-complexity operation).
void* o1heapAllocate(O1HeapInstance* const handle, const size_t amount);
/// The semantics follows free() with additional guarantees the full list of which is provided below.
///
/// If the pointer does not point to a previously allocated block and is not NULL, the behavior is undefined.
/// Builds where assertion checks are enabled may trigger an assertion failure for some invalid inputs.
///
/// The function is executed in constant time.
void o1heapFree(O1HeapInstance* const handle, void* const pointer);
/// Performs a basic sanity check on the heap.
/// This function can be used as a weak but fast method of heap corruption detection.
/// If the handle pointer is NULL, the behavior is undefined.
/// The time complexity is constant.
/// The return value is truth if the heap looks valid, falsity otherwise.
bool o1heapDoInvariantsHold(const O1HeapInstance* const handle);
/// Samples and returns a copy of the diagnostic information, see O1HeapDiagnostics.
/// This function merely copies the structure from an internal storage, so it is fast to return.
/// If the handle pointer is NULL, the behavior is undefined.
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance* const handle);
#ifdef __cplusplus
}
#endif
#endif // O1HEAP_H_INCLUDED