mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-28 13:28:01 +03:00
Merge branch 'master' into aslr
This commit is contained in:
commit
ee00978a57
138 changed files with 4263 additions and 17736 deletions
|
@ -72,13 +72,13 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then
|
|||
git clone https://github.com/engnr/qt-downloader.git
|
||||
cd qt-downloader
|
||||
git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
|
||||
# nested Qt 6.8.3 URL workaround
|
||||
# nested Qt 6.9.0 URL workaround
|
||||
# sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
# sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
cd "/tmp/Qt"
|
||||
"$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml
|
||||
mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.3 workaround
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.0 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
|
||||
fi
|
||||
|
||||
|
|
|
@ -39,13 +39,13 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then
|
|||
git clone https://github.com/engnr/qt-downloader.git
|
||||
cd qt-downloader
|
||||
git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
|
||||
# nested Qt 6.8.3 URL workaround
|
||||
# nested Qt 6.9.0 URL workaround
|
||||
# sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
# sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
cd "/tmp/Qt"
|
||||
"$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml
|
||||
mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.3 workaround
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.0 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
|
||||
fi
|
||||
|
||||
|
|
|
@ -25,11 +25,20 @@ if [ "$DEPLOY_APPIMAGE" = "true" ]; then
|
|||
# Remove libvulkan because it causes issues with gamescope
|
||||
rm -f ./AppDir/usr/lib/libvulkan.so*
|
||||
|
||||
# Remove unused Qt6 libraries
|
||||
rm -f ./AppDir/usr/lib/libQt6OpenGL.so*
|
||||
rm -f ./AppDir/usr/lib/libQt6Qml*.so*
|
||||
rm -f ./AppDir/usr/lib/libQt6Quick.so*
|
||||
rm -f ./AppDir/usr/lib/libQt6VirtualKeyboard.so*
|
||||
rm -f ./AppDir/usr/plugins/platforminputcontexts/libqtvirtualkeyboardplugin.so*
|
||||
|
||||
# Remove git directory containing local commit history file
|
||||
rm -rf ./AppDir/usr/share/rpcs3/git
|
||||
|
||||
linuxdeploy --appimage-extract
|
||||
./squashfs-root/plugins/linuxdeploy-plugin-appimage/usr/bin/appimagetool AppDir -g
|
||||
curl -fsSLo /uruntime "https://github.com/VHSgunzo/uruntime/releases/download/v0.3.4/uruntime-appimage-dwarfs-$CPU_ARCH"
|
||||
chmod +x /uruntime
|
||||
/uruntime --appimage-mkdwarfs -f --set-owner 0 --set-group 0 --no-history --no-create-timestamp \
|
||||
--compression zstd:level=22 -S26 -B32 --header /uruntime -i AppDir -o RPCS3.AppImage
|
||||
|
||||
APPIMAGE_SUFFIX="linux_${CPU_ARCH}"
|
||||
if [ "$CPU_ARCH" = "x86_64" ]; then
|
||||
|
|
|
@ -8,7 +8,6 @@ ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY"
|
|||
|
||||
# Remove unecessary files
|
||||
rm -f ./bin/rpcs3.exp ./bin/rpcs3.lib ./bin/rpcs3.pdb ./bin/vc_redist.x64.exe
|
||||
rm -rf ./bin/git
|
||||
|
||||
# Prepare compatibility and SDL database for packaging
|
||||
mkdir ./bin/config
|
||||
|
|
|
@ -7,7 +7,7 @@ env:
|
|||
BUILD_SOURCEBRANCHNAME: $CIRRUS_BRANCH
|
||||
RPCS3_TOKEN: ENCRYPTED[100ebb8e3552bf2021d0ef55dccda3e58d27be5b6cab0b0b92843ef490195d3c4edaefa087e4a3b425caa6392300b9b1]
|
||||
QT_VER_MAIN: '6'
|
||||
QT_VER: '6.8.3'
|
||||
QT_VER: '6.9.0'
|
||||
LLVM_COMPILER_VER: '19'
|
||||
LLVM_VER: '19.1.7'
|
||||
|
||||
|
@ -23,7 +23,7 @@ env:
|
|||
# COMPILER: msvc
|
||||
# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}\artifacts\
|
||||
# QT_VER_MSVC: 'msvc2022'
|
||||
# QT_DATE: '202503201308'
|
||||
# QT_DATE: '202503301022'
|
||||
# QTDIR: C:\Qt\${QT_VER}\${QT_VER_MSVC}_64
|
||||
# VULKAN_VER: '1.3.268.0'
|
||||
# VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5'
|
||||
|
@ -58,7 +58,7 @@ env:
|
|||
|
||||
# linux_task:
|
||||
# container:
|
||||
# image: rpcs3/rpcs3-ci-jammy:1.2
|
||||
# image: rpcs3/rpcs3-ci-jammy:1.5
|
||||
# cpu: 4
|
||||
# memory: 16G
|
||||
# env:
|
||||
|
@ -134,7 +134,7 @@ freebsd_task:
|
|||
# matrix:
|
||||
# - name: Cirrus Linux AArch64 Clang
|
||||
# arm_container:
|
||||
# image: 'docker.io/rpcs3/rpcs3-ci-jammy-aarch64:1.2'
|
||||
# image: 'docker.io/rpcs3/rpcs3-ci-jammy-aarch64:1.5'
|
||||
# cpu: 8
|
||||
# memory: 8G
|
||||
# clang_script:
|
||||
|
|
10
.github/workflows/rpcs3.yml
vendored
10
.github/workflows/rpcs3.yml
vendored
|
@ -25,17 +25,17 @@ jobs:
|
|||
matrix:
|
||||
include:
|
||||
- os: ubuntu-24.04
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy:1.4"
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy:1.5"
|
||||
build_sh: "/rpcs3/.ci/build-linux.sh"
|
||||
compiler: clang
|
||||
UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f
|
||||
UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux"
|
||||
- os: ubuntu-24.04
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy:1.4"
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy:1.5"
|
||||
build_sh: "/rpcs3/.ci/build-linux.sh"
|
||||
compiler: gcc
|
||||
- os: ubuntu-24.04-arm
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.4"
|
||||
docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.5"
|
||||
build_sh: "/rpcs3/.ci/build-linux-aarch64.sh"
|
||||
compiler: clang
|
||||
UPLOAD_COMMIT_HASH: a1d35836e8d45bfc6f63c26f0a3e5d46ef622fe1
|
||||
|
@ -105,9 +105,9 @@ jobs:
|
|||
env:
|
||||
COMPILER: msvc
|
||||
QT_VER_MAIN: '6'
|
||||
QT_VER: '6.8.3'
|
||||
QT_VER: '6.9.0'
|
||||
QT_VER_MSVC: 'msvc2022'
|
||||
QT_DATE: '202503201308'
|
||||
QT_DATE: '202503301022'
|
||||
LLVM_VER: '19.1.7'
|
||||
VULKAN_VER: '1.3.268.0'
|
||||
VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5'
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -55,9 +55,6 @@
|
|||
/bin/GuiConfigs/*.dat
|
||||
/bin/GuiConfigs/*.dat.*
|
||||
|
||||
# Some data from git
|
||||
!/bin/git/
|
||||
|
||||
# Visual Studio Files
|
||||
.vs/*
|
||||
.vscode/*
|
||||
|
|
51
3rdparty/GL/glext.h
vendored
51
3rdparty/GL/glext.h
vendored
|
@ -32,7 +32,7 @@ extern "C" {
|
|||
#define GLAPI extern
|
||||
#endif
|
||||
|
||||
#define GL_GLEXT_VERSION 20220530
|
||||
#define GL_GLEXT_VERSION 20250203
|
||||
|
||||
#include <KHR/khrplatform.h>
|
||||
|
||||
|
@ -5397,12 +5397,12 @@ typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severi
|
|||
typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
|
||||
typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
|
||||
typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, void *userParam);
|
||||
typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
|
||||
typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
|
||||
#ifdef GL_GLEXT_PROTOTYPES
|
||||
GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
|
||||
GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
|
||||
GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, void *userParam);
|
||||
GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
|
||||
GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
|
||||
#endif
|
||||
#endif /* GL_AMD_debug_output */
|
||||
|
||||
|
@ -7370,6 +7370,16 @@ GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1,
|
|||
#endif
|
||||
#endif /* GL_EXT_framebuffer_blit */
|
||||
|
||||
#ifndef GL_EXT_framebuffer_blit_layers
|
||||
#define GL_EXT_framebuffer_blit_layers 1
|
||||
typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYERSEXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
|
||||
typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter);
|
||||
#ifdef GL_GLEXT_PROTOTYPES
|
||||
GLAPI void APIENTRY glBlitFramebufferLayersEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
|
||||
GLAPI void APIENTRY glBlitFramebufferLayerEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter);
|
||||
#endif
|
||||
#endif /* GL_EXT_framebuffer_blit_layers */
|
||||
|
||||
#ifndef GL_EXT_framebuffer_multisample
|
||||
#define GL_EXT_framebuffer_multisample 1
|
||||
#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB
|
||||
|
@ -9394,6 +9404,11 @@ GLAPI void APIENTRY glResizeBuffersMESA (void);
|
|||
#define GL_MESA_shader_integer_functions 1
|
||||
#endif /* GL_MESA_shader_integer_functions */
|
||||
|
||||
#ifndef GL_MESA_texture_const_bandwidth
|
||||
#define GL_MESA_texture_const_bandwidth 1
|
||||
#define GL_CONST_BW_TILING_MESA 0x8BBE
|
||||
#endif /* GL_MESA_texture_const_bandwidth */
|
||||
|
||||
#ifndef GL_MESA_tile_raster_order
|
||||
#define GL_MESA_tile_raster_order 1
|
||||
#define GL_TILE_RASTER_ORDER_FIXED_MESA 0x8BB8
|
||||
|
@ -10248,12 +10263,6 @@ typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s,
|
|||
typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
|
||||
typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog);
|
||||
typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog);
|
||||
typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
|
||||
typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight);
|
||||
typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight);
|
||||
typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x);
|
||||
typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y);
|
||||
|
@ -10266,6 +10275,12 @@ typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, c
|
|||
typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog);
|
||||
typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog);
|
||||
typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
|
||||
typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v);
|
||||
typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight);
|
||||
typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight);
|
||||
#ifdef GL_GLEXT_PROTOTYPES
|
||||
GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y);
|
||||
GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v);
|
||||
|
@ -10295,12 +10310,6 @@ GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t,
|
|||
GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
|
||||
GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog);
|
||||
GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog);
|
||||
GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
|
||||
GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight);
|
||||
GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight);
|
||||
GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x);
|
||||
GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y);
|
||||
|
@ -10313,6 +10322,12 @@ GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfN
|
|||
GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog);
|
||||
GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog);
|
||||
GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
|
||||
GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v);
|
||||
GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight);
|
||||
GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight);
|
||||
#endif
|
||||
#endif /* GL_NV_half_float */
|
||||
|
||||
|
@ -11449,6 +11464,10 @@ GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id);
|
|||
#endif
|
||||
#endif /* GL_NV_transform_feedback2 */
|
||||
|
||||
#ifndef GL_NV_uniform_buffer_std430_layout
|
||||
#define GL_NV_uniform_buffer_std430_layout 1
|
||||
#endif /* GL_NV_uniform_buffer_std430_layout */
|
||||
|
||||
#ifndef GL_NV_uniform_buffer_unified_memory
|
||||
#define GL_NV_uniform_buffer_unified_memory 1
|
||||
#define GL_UNIFORM_BUFFER_UNIFIED_NV 0x936E
|
||||
|
@ -11964,8 +11983,10 @@ GLAPI void APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum s
|
|||
#define GL_MAX_VIEWS_OVR 0x9631
|
||||
#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633
|
||||
typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
|
||||
typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
|
||||
#ifdef GL_GLEXT_PROTOTYPES
|
||||
GLAPI void APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
|
||||
GLAPI void APIENTRY glNamedFramebufferTextureMultiviewOVR (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
|
||||
#endif
|
||||
#endif /* GL_OVR_multiview */
|
||||
|
||||
|
|
2
3rdparty/SoundTouch/soundtouch
vendored
2
3rdparty/SoundTouch/soundtouch
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 394e1f58b23dc80599214d2e9b6a5e0dfd0bbe07
|
||||
Subproject commit 3982730833b6daefe77dcfb32b5c282851640c17
|
2
3rdparty/curl/curl
vendored
2
3rdparty/curl/curl
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 57495c64871d18905a0941db9196ef90bafe9a29
|
||||
Subproject commit 1c3149881769e7bd79b072e48374e4c2b3678b2f
|
14
3rdparty/curl/libcurl.vcxproj
vendored
14
3rdparty/curl/libcurl.vcxproj
vendored
|
@ -76,6 +76,7 @@
|
|||
<ClCompile Include="curl\lib\connect.c" />
|
||||
<ClCompile Include="curl\lib\content_encoding.c" />
|
||||
<ClCompile Include="curl\lib\cookie.c" />
|
||||
<ClCompile Include="curl\lib\cshutdn.c" />
|
||||
<ClCompile Include="curl\lib\curl_addrinfo.c" />
|
||||
<ClCompile Include="curl\lib\curl_des.c" />
|
||||
<ClCompile Include="curl\lib\curl_endian.c" />
|
||||
|
@ -94,7 +95,9 @@
|
|||
<ClCompile Include="curl\lib\curl_threads.c" />
|
||||
<ClCompile Include="curl\lib\curl_trc.c" />
|
||||
<ClCompile Include="curl\lib\cw-out.c" />
|
||||
<ClCompile Include="curl\lib\cw-pause.c" />
|
||||
<ClCompile Include="curl\lib\dict.c" />
|
||||
<ClCompile Include="curl\lib\dllmain.c" />
|
||||
<ClCompile Include="curl\lib\doh.c" />
|
||||
<ClCompile Include="curl\lib\dynbuf.c" />
|
||||
<ClCompile Include="curl\lib\dynhds.c" />
|
||||
|
@ -112,6 +115,7 @@
|
|||
<ClCompile Include="curl\lib\getinfo.c" />
|
||||
<ClCompile Include="curl\lib\gopher.c" />
|
||||
<ClCompile Include="curl\lib\hash.c" />
|
||||
<ClCompile Include="curl\lib\hash_offt.c" />
|
||||
<ClCompile Include="curl\lib\headers.c" />
|
||||
<ClCompile Include="curl\lib\hmac.c" />
|
||||
<ClCompile Include="curl\lib\hostasyn.c" />
|
||||
|
@ -138,6 +142,7 @@
|
|||
<ClCompile Include="curl\lib\krb5.c" />
|
||||
<ClCompile Include="curl\lib\ldap.c" />
|
||||
<ClCompile Include="curl\lib\llist.c" />
|
||||
<ClCompile Include="curl\lib\macos.c" />
|
||||
<ClCompile Include="curl\lib\md4.c" />
|
||||
<ClCompile Include="curl\lib\md5.c" />
|
||||
<ClCompile Include="curl\lib\memdebug.c" />
|
||||
|
@ -145,6 +150,7 @@
|
|||
<ClCompile Include="curl\lib\mprintf.c" />
|
||||
<ClCompile Include="curl\lib\mqtt.c" />
|
||||
<ClCompile Include="curl\lib\multi.c" />
|
||||
<ClCompile Include="curl\lib\multi_ev.c" />
|
||||
<ClCompile Include="curl\lib\netrc.c" />
|
||||
<ClCompile Include="curl\lib\nonblock.c" />
|
||||
<ClCompile Include="curl\lib\noproxy.c" />
|
||||
|
@ -174,10 +180,9 @@
|
|||
<ClCompile Include="curl\lib\splay.c" />
|
||||
<ClCompile Include="curl\lib\strcase.c" />
|
||||
<ClCompile Include="curl\lib\strdup.c" />
|
||||
<ClCompile Include="curl\lib\strequal.c" />
|
||||
<ClCompile Include="curl\lib\strerror.c" />
|
||||
<ClCompile Include="curl\lib\strparse.c" />
|
||||
<ClCompile Include="curl\lib\strtok.c" />
|
||||
<ClCompile Include="curl\lib\strtoofft.c" />
|
||||
<ClCompile Include="curl\lib\system_win32.c" />
|
||||
<ClCompile Include="curl\lib\telnet.c" />
|
||||
<ClCompile Include="curl\lib\tftp.c" />
|
||||
|
@ -260,6 +265,7 @@
|
|||
<ClInclude Include="curl\lib\connect.h" />
|
||||
<ClInclude Include="curl\lib\content_encoding.h" />
|
||||
<ClInclude Include="curl\lib\cookie.h" />
|
||||
<ClInclude Include="curl\lib\cshutdn.h" />
|
||||
<ClInclude Include="curl\lib\curlx.h" />
|
||||
<ClInclude Include="curl\lib\curl_addrinfo.h" />
|
||||
<ClInclude Include="curl\lib\curl_base64.h" />
|
||||
|
@ -291,6 +297,7 @@
|
|||
<ClInclude Include="curl\lib\curl_threads.h" />
|
||||
<ClInclude Include="curl\lib\curl_trc.h" />
|
||||
<ClInclude Include="curl\lib\cw-out.h" />
|
||||
<ClInclude Include="curl\lib\cw-pause.h" />
|
||||
<ClInclude Include="curl\lib\dict.h" />
|
||||
<ClInclude Include="curl\lib\doh.h" />
|
||||
<ClInclude Include="curl\lib\dynbuf.h" />
|
||||
|
@ -309,6 +316,7 @@
|
|||
<ClInclude Include="curl\lib\getinfo.h" />
|
||||
<ClInclude Include="curl\lib\gopher.h" />
|
||||
<ClInclude Include="curl\lib\hash.h" />
|
||||
<ClInclude Include="curl\lib\hash_offt.h" />
|
||||
<ClInclude Include="curl\lib\headers.h" />
|
||||
<ClInclude Include="curl\lib\hostip.h" />
|
||||
<ClInclude Include="curl\lib\hsts.h" />
|
||||
|
@ -328,11 +336,13 @@
|
|||
<ClInclude Include="curl\lib\inet_ntop.h" />
|
||||
<ClInclude Include="curl\lib\inet_pton.h" />
|
||||
<ClInclude Include="curl\lib\llist.h" />
|
||||
<ClInclude Include="curl\lib\macos.h" />
|
||||
<ClInclude Include="curl\lib\memdebug.h" />
|
||||
<ClInclude Include="curl\lib\mime.h" />
|
||||
<ClInclude Include="curl\lib\mqtt.h" />
|
||||
<ClInclude Include="curl\lib\multihandle.h" />
|
||||
<ClInclude Include="curl\lib\multiif.h" />
|
||||
<ClInclude Include="curl\lib\multi_ev.h" />
|
||||
<ClInclude Include="curl\lib\netrc.h" />
|
||||
<ClInclude Include="curl\lib\nonblock.h" />
|
||||
<ClInclude Include="curl\lib\noproxy.h" />
|
||||
|
|
42
3rdparty/curl/libcurl.vcxproj.filters
vendored
42
3rdparty/curl/libcurl.vcxproj.filters
vendored
|
@ -291,12 +291,6 @@
|
|||
<ClCompile Include="curl\lib\strerror.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\strtok.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\strtoofft.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\system_win32.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -522,6 +516,27 @@
|
|||
<ClCompile Include="curl\lib\vtls\vtls_spack.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\cshutdn.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\cw-pause.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\dllmain.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\hash_offt.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\macos.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\multi_ev.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="curl\lib\strequal.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="curl\include\curl\curl.h">
|
||||
|
@ -1055,6 +1070,21 @@
|
|||
<ClInclude Include="curl\lib\vtls\vtls_spack.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="curl\lib\cshutdn.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="curl\lib\cw-pause.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="curl\lib\hash_offt.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="curl\lib\macos.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="curl\lib\multi_ev.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ResourceCompile Include="curl\lib\libcurl.rc">
|
||||
|
|
2551
3rdparty/robin_hood/include/robin_hood.h
vendored
2551
3rdparty/robin_hood/include/robin_hood.h
vendored
File diff suppressed because it is too large
Load diff
2101
3rdparty/unordered_dense/include/unordered_dense.h
vendored
Normal file
2101
3rdparty/unordered_dense/include/unordered_dense.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
10
BUILDING.md
10
BUILDING.md
|
@ -19,26 +19,26 @@ The following tools are required to build RPCS3 on Windows 10 or later:
|
|||
with standalone **CMake** tool.
|
||||
|
||||
- [Python 3.6+](https://www.python.org/downloads/) (add to PATH)
|
||||
- [Qt 6.8.3](https://www.qt.io/download-qt-installer) In case you can't download from the official installer, you can use [Another Qt installer](https://github.com/miurahr/aqtinstall) (In that case you will need to manually add the "qtmultimedia" module when installing Qt)
|
||||
- [Qt 6.9.0](https://www.qt.io/download-qt-installer) In case you can't download from the official installer, you can use [Another Qt installer](https://github.com/miurahr/aqtinstall) (In that case you will need to manually add the "qtmultimedia" module when installing Qt)
|
||||
- [Vulkan SDK 1.3.268.0](https://vulkan.lunarg.com/sdk/home) (see "Install the SDK" [here](https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html)) for now future SDKs don't work. You need precisely 1.3.268.0.
|
||||
|
||||
The `sln` solution available only on **Visual Studio** is the preferred building solution. It easily allows to build the **RPCS3** application in `Release` and `Debug` mode.
|
||||
|
||||
In order to build **RPCS3** with the `sln` solution (with **Visual Studio**), **Qt** libs need to be detected. To detect the libs:
|
||||
- add and set the `QTDIR` environment variable, e.g. `<QtInstallFolder>\6.8.3\msvc2022_64\`
|
||||
- add and set the `QTDIR` environment variable, e.g. `<QtInstallFolder>\6.9.0\msvc2022_64\`
|
||||
- or use the [Visual Studio Qt Plugin](https://marketplace.visualstudio.com/items?itemName=TheQtCompany.QtVisualStudioTools2022)
|
||||
|
||||
**NOTE:** If you have issues with the **Visual Studio Qt Plugin**, you may want to uninstall it and install the [Legacy Qt Plugin](https://marketplace.visualstudio.com/items?itemName=TheQtCompany.LEGACYQtVisualStudioTools2022) instead.
|
||||
|
||||
In order to build **RPCS3** with the `CMake` solution (with both **Visual Studio** and standalone **CMake** tool):
|
||||
- add and set the `Qt6_ROOT` environment variable to the **Qt** libs path, e.g. `<QtInstallFolder>\6.8.3\msvc2022_64\`
|
||||
- add and set the `Qt6_ROOT` environment variable to the **Qt** libs path, e.g. `<QtInstallFolder>\6.9.0\msvc2022_64\`
|
||||
|
||||
### Linux
|
||||
|
||||
These are the essentials tools to build RPCS3 on Linux. Some of them can be installed through your favorite package manager:
|
||||
- Clang 17+ or GCC 13+
|
||||
- [CMake 3.28.0+](https://www.cmake.org/download/)
|
||||
- [Qt 6.8.3](https://www.qt.io/download-qt-installer)
|
||||
- [Qt 6.9.0](https://www.qt.io/download-qt-installer)
|
||||
- [Vulkan SDK 1.3.268.0](https://vulkan.lunarg.com/sdk/home) (See "Install the SDK" [here](https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html)) for now future SDKs don't work. You need precisely 1.3.268.0.
|
||||
- [SDL3](https://github.com/libsdl-org/SDL/releases) (for the FAudio backend)
|
||||
|
||||
|
@ -121,7 +121,7 @@ Start **Visual Studio**, click on `Open a project or solution` and select the `r
|
|||
##### Configuring the Qt Plugin (if used)
|
||||
|
||||
1) go to `Extensions->Qt VS Tools->Qt Versions`
|
||||
2) add the path to your Qt installation with compiler e.g. `<QtInstallFolder>\6.8.3\msvc2022_64`, version will fill in automatically
|
||||
2) add the path to your Qt installation with compiler e.g. `<QtInstallFolder>\6.9.0\msvc2022_64`, version will fill in automatically
|
||||
3) go to `Extensions->Qt VS Tools->Options->Legacy Project Format`. (Only available in the **Legacy Qt Plugin**)
|
||||
4) set `Build: Run pre-build setup` to `true`. (Only available in the **Legacy Qt Plugin**)
|
||||
|
||||
|
|
|
@ -1732,7 +1732,7 @@ void patch_engine::save_config(const patch_map& patches_map)
|
|||
|
||||
fs::pending_file file(path);
|
||||
|
||||
if (!file.file || (file.file.write(out.c_str(), out.size()), !file.commit()))
|
||||
if (!file.file || file.file.write(out.c_str(), out.size()) < out.size() || !file.commit())
|
||||
{
|
||||
patch_log.error("Failed to create patch config file %s (error=%s)", path, fs::g_tls_error);
|
||||
}
|
||||
|
|
|
@ -40,13 +40,13 @@ jobs:
|
|||
# displayName: ccache
|
||||
|
||||
# - bash: |
|
||||
# docker pull --quiet rpcs3/rpcs3-ci-jammy:1.4
|
||||
# docker pull --quiet rpcs3/rpcs3-ci-jammy:1.5
|
||||
# docker run \
|
||||
# -v $(pwd):/rpcs3 \
|
||||
# --env-file .ci/docker.env \
|
||||
# -v $CCACHE_DIR:/root/.ccache \
|
||||
# -v $BUILD_ARTIFACTSTAGINGDIRECTORY:/root/artifacts \
|
||||
# rpcs3/rpcs3-ci-jammy:1.4 \
|
||||
# rpcs3/rpcs3-ci-jammy:1.5 \
|
||||
# /rpcs3/.ci/build-linux.sh
|
||||
# displayName: Docker setup and build
|
||||
|
||||
|
@ -71,9 +71,9 @@ jobs:
|
|||
# variables:
|
||||
# COMPILER: msvc
|
||||
# QT_VER_MAIN: '6'
|
||||
# QT_VER: '6.8.3'
|
||||
# QT_VER: '6.9.0'
|
||||
# QT_VER_MSVC: 'msvc2022'
|
||||
# QT_DATE: '202503201308'
|
||||
# QT_DATE: '202503301022'
|
||||
# QTDIR: C:\Qt\$(QT_VER)\$(QT_VER_MSVC)_64
|
||||
# LLVM_VER: '19.1.7'
|
||||
# VULKAN_VER: '1.3.268.0'
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Cached data from GitHub API.
|
13533
bin/git/commits.lst
13533
bin/git/commits.lst
File diff suppressed because one or more lines are too long
|
@ -158,19 +158,16 @@ if (NOT ANDROID)
|
|||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/rpcs3.icns $<TARGET_FILE_DIR:rpcs3>/../Resources/rpcs3.icns
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/Icons $<TARGET_FILE_DIR:rpcs3>/../Resources/Icons
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/GuiConfigs $<TARGET_FILE_DIR:rpcs3>/../Resources/GuiConfigs
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/git $<TARGET_FILE_DIR:rpcs3>/../Resources/git
|
||||
COMMAND "${MACDEPLOYQT_EXECUTABLE}" "${PROJECT_BINARY_DIR}/bin/rpcs3.app" "${QT_DEPLOY_FLAGS}")
|
||||
elseif(UNIX)
|
||||
add_custom_command(TARGET rpcs3 POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/Icons $<TARGET_FILE_DIR:rpcs3>/Icons
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/GuiConfigs $<TARGET_FILE_DIR:rpcs3>/GuiConfigs
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/git $<TARGET_FILE_DIR:rpcs3>/git)
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/GuiConfigs $<TARGET_FILE_DIR:rpcs3>/GuiConfigs)
|
||||
elseif(WIN32)
|
||||
add_custom_command(TARGET rpcs3 POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:OpenAL::OpenAL> $<TARGET_FILE_DIR:rpcs3>
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/Icons $<TARGET_FILE_DIR:rpcs3>/Icons
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/GuiConfigs $<TARGET_FILE_DIR:rpcs3>/GuiConfigs
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/bin/git $<TARGET_FILE_DIR:rpcs3>/git
|
||||
COMMAND "${WINDEPLOYQT_EXECUTABLE}" --no-compiler-runtime --no-opengl-sw --no-patchqt
|
||||
--no-translations --no-system-d3d-compiler --no-system-dxc-compiler --no-ffmpeg --no-quick-import
|
||||
--plugindir "$<IF:$<CXX_COMPILER_ID:MSVC>,$<TARGET_FILE_DIR:rpcs3>/plugins,$<TARGET_FILE_DIR:rpcs3>/share/qt6/plugins>"
|
||||
|
@ -195,8 +192,6 @@ if (NOT ANDROID)
|
|||
DESTINATION ${CMAKE_INSTALL_DATADIR}/rpcs3)
|
||||
install(DIRECTORY ../bin/GuiConfigs
|
||||
DESTINATION ${CMAKE_INSTALL_DATADIR}/rpcs3)
|
||||
install(DIRECTORY ../bin/git
|
||||
DESTINATION ${CMAKE_INSTALL_DATADIR}/rpcs3)
|
||||
install(DIRECTORY ../bin/test
|
||||
DESTINATION ${CMAKE_INSTALL_DATADIR}/rpcs3)
|
||||
endif()
|
||||
|
|
|
@ -1336,17 +1336,19 @@ static fs::file CheckDebugSelf(const fs::file& s)
|
|||
// Get the real elf offset.
|
||||
s.seek(0x10);
|
||||
|
||||
// Start at the real elf offset.
|
||||
s.seek(key_version == 0x80 ? +s.read<be_t<u64>>() : +s.read<le_t<u64>>());
|
||||
// Read the real elf offset.
|
||||
usz read_pos = key_version == 0x80 ? +s.read<be_t<u64>>() : +s.read<le_t<u64>>();
|
||||
|
||||
// Write the real ELF file back.
|
||||
fs::file e = fs::make_stream<std::vector<u8>>();
|
||||
|
||||
// Copy the data.
|
||||
char buf[2048];
|
||||
while (const u64 size = s.read(buf, 2048))
|
||||
std::vector<u8> buf(std::min<usz>(s.size(), 4096));
|
||||
|
||||
while (const u64 size = s.read_at(read_pos, buf.data(), buf.size()))
|
||||
{
|
||||
e.write(buf, size);
|
||||
e.write(buf.data(), size);
|
||||
read_pos += size;
|
||||
}
|
||||
|
||||
return e;
|
||||
|
@ -1371,7 +1373,10 @@ fs::file decrypt_self(const fs::file& elf_or_self, const u8* klic_key, SelfAddit
|
|||
elf_or_self.seek(0);
|
||||
|
||||
// Check SELF header first. Check for a debug SELF.
|
||||
if (elf_or_self.size() >= 4 && elf_or_self.read<u32>() == "SCE\0"_u32)
|
||||
u32 file_type = umax;
|
||||
elf_or_self.read_at(0, &file_type, sizeof(file_type));
|
||||
|
||||
if (file_type == "SCE\0"_u32)
|
||||
{
|
||||
if (fs::file res = CheckDebugSelf(elf_or_self))
|
||||
{
|
||||
|
@ -1409,6 +1414,23 @@ fs::file decrypt_self(const fs::file& elf_or_self, const u8* klic_key, SelfAddit
|
|||
// Make a new ELF file from this SELF.
|
||||
return self_dec.MakeElf(isElf32);
|
||||
}
|
||||
else if (Emu.GetBoot().ends_with(".elf") || Emu.GetBoot().ends_with(".ELF"))
|
||||
{
|
||||
// Write the file back if the main executable is not signed
|
||||
fs::file e = fs::make_stream<std::vector<u8>>();
|
||||
|
||||
// Copy the data.
|
||||
std::vector<u8> buf(std::min<usz>(elf_or_self.size(), 4096));
|
||||
|
||||
usz read_pos = 0;
|
||||
while (const u64 size = elf_or_self.read_at(read_pos, buf.data(), buf.size()))
|
||||
{
|
||||
e.write(buf.data(), size);
|
||||
read_pos += size;
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -627,6 +627,7 @@ if(TARGET 3rdparty_vulkan)
|
|||
RSX/VK/VKCommonDecompiler.cpp
|
||||
RSX/VK/VKCommonPipelineLayout.cpp
|
||||
RSX/VK/VKCompute.cpp
|
||||
RSX/VK/VKDataHeapManager.cpp
|
||||
RSX/VK/VKDMA.cpp
|
||||
RSX/VK/VKDraw.cpp
|
||||
RSX/VK/VKFormats.cpp
|
||||
|
@ -649,6 +650,7 @@ if(TARGET 3rdparty_vulkan)
|
|||
RSX/VK/VKVertexBuffers.cpp
|
||||
RSX/VK/VKVertexProgram.cpp
|
||||
RSX/VK/VKTextureCache.cpp
|
||||
RSX/VK/VulkanAPI.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ struct CellAudioInDeviceInfo
|
|||
u8 reserved[12];
|
||||
be_t<u64> deviceId;
|
||||
be_t<u64> type;
|
||||
char name[64];
|
||||
char name[64]; // Not necessarily null terminated!
|
||||
CellAudioInSoundMode availableModes[16];
|
||||
};
|
||||
|
||||
|
|
|
@ -38,11 +38,17 @@ void fmt_class_string<CellAudioInError>::format(std::string& out, u64 arg)
|
|||
struct avconf_manager
|
||||
{
|
||||
shared_mutex mutex;
|
||||
std::vector<CellAudioInDeviceInfo> devices;
|
||||
|
||||
struct device_info
|
||||
{
|
||||
CellAudioInDeviceInfo info {};
|
||||
std::string full_device_name; // The device name may be too long for CellAudioInDeviceInfo, so we additionally save the full name.
|
||||
};
|
||||
std::vector<device_info> devices;
|
||||
CellAudioInDeviceMode inDeviceMode = CELL_AUDIO_IN_SINGLE_DEVICE_MODE; // TODO: use somewhere
|
||||
|
||||
void copy_device_info(u32 num, vm::ptr<CellAudioInDeviceInfo> info) const;
|
||||
std::optional<CellAudioInDeviceInfo> get_device_info(vm::cptr<char> name) const;
|
||||
std::optional<device_info> get_device_info(vm::cptr<char> name) const;
|
||||
|
||||
avconf_manager();
|
||||
|
||||
|
@ -62,78 +68,89 @@ avconf_manager::avconf_manager()
|
|||
switch (g_cfg.audio.microphone_type)
|
||||
{
|
||||
case microphone_handler::standard:
|
||||
{
|
||||
for (u32 index = 0; index < mic_list.size(); index++)
|
||||
{
|
||||
devices.emplace_back();
|
||||
|
||||
devices[curindex].portType = CELL_AUDIO_IN_PORT_USB;
|
||||
devices[curindex].availableModeCount = 1;
|
||||
devices[curindex].state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
devices[curindex].deviceId = 0xE11CC0DE + curindex;
|
||||
devices[curindex].type = 0xC0DEE11C;
|
||||
devices[curindex].availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
devices[curindex].availableModes[0].channel = CELL_AUDIO_IN_CHNUM_2;
|
||||
devices[curindex].availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
devices[curindex].deviceNumber = curindex;
|
||||
strcpy_trunc(devices[curindex].name, mic_list[index]);
|
||||
device_info device {};
|
||||
device.info.portType = CELL_AUDIO_IN_PORT_USB;
|
||||
device.info.availableModeCount = 1;
|
||||
device.info.state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
device.info.deviceId = 0xE11CC0DE + curindex;
|
||||
device.info.type = 0xC0DEE11C;
|
||||
device.info.availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
device.info.availableModes[0].channel = CELL_AUDIO_IN_CHNUM_2;
|
||||
device.info.availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
device.info.deviceNumber = curindex;
|
||||
device.full_device_name = mic_list[index];
|
||||
strcpy_trunc(device.info.name, device.full_device_name);
|
||||
|
||||
devices.push_back(std::move(device));
|
||||
curindex++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case microphone_handler::real_singstar:
|
||||
case microphone_handler::singstar:
|
||||
{
|
||||
// Only one device for singstar device
|
||||
devices.emplace_back();
|
||||
|
||||
devices[curindex].portType = CELL_AUDIO_IN_PORT_USB;
|
||||
devices[curindex].availableModeCount = 1;
|
||||
devices[curindex].state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
devices[curindex].deviceId = 0x00000001;
|
||||
devices[curindex].type = 0x14150000;
|
||||
devices[curindex].availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
devices[curindex].availableModes[0].channel = CELL_AUDIO_IN_CHNUM_2;
|
||||
devices[curindex].availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
devices[curindex].deviceNumber = curindex;
|
||||
strcpy_trunc(devices[curindex].name, mic_list[0]);
|
||||
device_info device {};
|
||||
device.info.portType = CELL_AUDIO_IN_PORT_USB;
|
||||
device.info.availableModeCount = 1;
|
||||
device.info.state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
device.info.deviceId = 0x00000001;
|
||||
device.info.type = 0x14150000;
|
||||
device.info.availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
device.info.availableModes[0].channel = CELL_AUDIO_IN_CHNUM_2;
|
||||
device.info.availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
device.info.deviceNumber = curindex;
|
||||
device.full_device_name = mic_list[0];
|
||||
strcpy_trunc(device.info.name, device.full_device_name);
|
||||
|
||||
devices.push_back(std::move(device));
|
||||
curindex++;
|
||||
break;
|
||||
}
|
||||
case microphone_handler::rocksmith:
|
||||
devices.emplace_back();
|
||||
|
||||
devices[curindex].portType = CELL_AUDIO_IN_PORT_USB;
|
||||
devices[curindex].availableModeCount = 1;
|
||||
devices[curindex].state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
devices[curindex].deviceId = 0x12BA00FF; // Specific to rocksmith usb input
|
||||
devices[curindex].type = 0xC0DE73C4;
|
||||
devices[curindex].availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
devices[curindex].availableModes[0].channel = CELL_AUDIO_IN_CHNUM_1;
|
||||
devices[curindex].availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
devices[curindex].deviceNumber = curindex;
|
||||
strcpy_trunc(devices[curindex].name, mic_list[0]);
|
||||
{
|
||||
device_info device {};
|
||||
device.info.portType = CELL_AUDIO_IN_PORT_USB;
|
||||
device.info.availableModeCount = 1;
|
||||
device.info.state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
device.info.deviceId = 0x12BA00FF; // Specific to rocksmith usb input
|
||||
device.info.type = 0xC0DE73C4;
|
||||
device.info.availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
device.info.availableModes[0].channel = CELL_AUDIO_IN_CHNUM_1;
|
||||
device.info.availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
device.info.deviceNumber = curindex;
|
||||
device.full_device_name = mic_list[0];
|
||||
strcpy_trunc(device.info.name, device.full_device_name);
|
||||
|
||||
devices.push_back(std::move(device));
|
||||
curindex++;
|
||||
break;
|
||||
}
|
||||
case microphone_handler::null:
|
||||
default: break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (g_cfg.io.camera != camera_handler::null)
|
||||
{
|
||||
devices.emplace_back();
|
||||
|
||||
devices[curindex].portType = CELL_AUDIO_IN_PORT_USB;
|
||||
devices[curindex].availableModeCount = 1;
|
||||
devices[curindex].state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
devices[curindex].deviceId = 0xDEADBEEF;
|
||||
devices[curindex].type = 0xBEEFDEAD;
|
||||
devices[curindex].availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
devices[curindex].availableModes[0].channel = CELL_AUDIO_IN_CHNUM_NONE;
|
||||
devices[curindex].availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
devices[curindex].deviceNumber = curindex;
|
||||
strcpy_trunc(devices[curindex].name, "USB Camera");
|
||||
device_info device {};
|
||||
device.info.portType = CELL_AUDIO_IN_PORT_USB;
|
||||
device.info.availableModeCount = 1;
|
||||
device.info.state = CELL_AUDIO_IN_DEVICE_STATE_AVAILABLE;
|
||||
device.info.deviceId = 0xDEADBEEF;
|
||||
device.info.type = 0xBEEFDEAD;
|
||||
device.info.availableModes[0].type = CELL_AUDIO_IN_CODING_TYPE_LPCM;
|
||||
device.info.availableModes[0].channel = CELL_AUDIO_IN_CHNUM_NONE;
|
||||
device.info.availableModes[0].fs = CELL_AUDIO_IN_FS_8KHZ | CELL_AUDIO_IN_FS_12KHZ | CELL_AUDIO_IN_FS_16KHZ | CELL_AUDIO_IN_FS_24KHZ | CELL_AUDIO_IN_FS_32KHZ | CELL_AUDIO_IN_FS_48KHZ;
|
||||
device.info.deviceNumber = curindex;
|
||||
device.full_device_name = "USB Camera";
|
||||
strcpy_trunc(device.info.name, device.full_device_name);
|
||||
|
||||
devices.push_back(std::move(device));
|
||||
curindex++;
|
||||
}
|
||||
}
|
||||
|
@ -142,14 +159,14 @@ void avconf_manager::copy_device_info(u32 num, vm::ptr<CellAudioInDeviceInfo> in
|
|||
{
|
||||
memset(info.get_ptr(), 0, sizeof(CellAudioInDeviceInfo));
|
||||
ensure(num < devices.size());
|
||||
*info = devices[num];
|
||||
*info = devices[num].info;
|
||||
}
|
||||
|
||||
std::optional<CellAudioInDeviceInfo> avconf_manager::get_device_info(vm::cptr<char> name) const
|
||||
std::optional<avconf_manager::device_info> avconf_manager::get_device_info(vm::cptr<char> name) const
|
||||
{
|
||||
for (const CellAudioInDeviceInfo& device : devices)
|
||||
for (const device_info& device : devices)
|
||||
{
|
||||
if (strncmp(device.name, name.get_ptr(), sizeof(device.name)) == 0)
|
||||
if (strncmp(device.info.name, name.get_ptr(), sizeof(device.info.name)) == 0)
|
||||
{
|
||||
return device;
|
||||
}
|
||||
|
@ -398,8 +415,8 @@ error_code cellAudioInRegisterDevice(u64 deviceType, vm::cptr<char> name, vm::pt
|
|||
auto& av_manager = g_fxo->get<avconf_manager>();
|
||||
const std::lock_guard lock(av_manager.mutex);
|
||||
|
||||
std::optional<CellAudioInDeviceInfo> info = av_manager.get_device_info(name);
|
||||
if (!info || !memchr(info->name, '\0', sizeof(info->name)))
|
||||
std::optional<avconf_manager::device_info> device = av_manager.get_device_info(name);
|
||||
if (!device)
|
||||
{
|
||||
// TODO
|
||||
return CELL_AUDIO_IN_ERROR_DEVICE_NOT_FOUND;
|
||||
|
@ -407,7 +424,7 @@ error_code cellAudioInRegisterDevice(u64 deviceType, vm::cptr<char> name, vm::pt
|
|||
|
||||
auto& mic_thr = g_fxo->get<mic_thread>();
|
||||
const std::lock_guard mic_lock(mic_thr.mutex);
|
||||
const u32 device_number = mic_thr.register_device(info->name);
|
||||
const u32 device_number = mic_thr.register_device(device->full_device_name);
|
||||
|
||||
return not_an_error(device_number);
|
||||
}
|
||||
|
|
|
@ -61,11 +61,6 @@ void fmt_class_string<CellCameraFormat>::format(std::string& out, u64 arg)
|
|||
});
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
// **************
|
||||
// * Prototypes *
|
||||
// **************
|
||||
|
@ -402,7 +397,7 @@ error_code check_init_and_open(s32 dev_num)
|
|||
}
|
||||
|
||||
// This represents a recurring subfunction throughout libCamera
|
||||
error_code check_resolution(s32 dev_num)
|
||||
error_code check_resolution(s32 /*dev_num*/)
|
||||
{
|
||||
// TODO: Some sort of connection check maybe?
|
||||
//if (error == CELL_CAMERA_ERROR_RESOLUTION_UNKNOWN)
|
||||
|
@ -413,7 +408,7 @@ error_code check_resolution(s32 dev_num)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
// This represents a oftenly used sequence in libCamera (usually the beginning of a subfunction).
|
||||
// This represents an often used sequence in libCamera (usually the beginning of a subfunction).
|
||||
// There also exist common sequences for mutex lock/unlock by the way.
|
||||
error_code check_resolution_ex(s32 dev_num)
|
||||
{
|
||||
|
|
|
@ -26,11 +26,6 @@ void fmt_class_string<CellDaisyError>::format(std::string& out, u64 arg)
|
|||
});
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
using LFQueue2 = struct CellDaisyLFQueue2;
|
||||
using Lock = struct CellDaisyLock;
|
||||
using ScatterGatherInterlock = struct CellDaisyScatterGatherInterlock;
|
||||
|
@ -38,134 +33,134 @@ using AtomicInterlock = volatile struct CellDaisyAtomicInterlock;
|
|||
|
||||
error_code cellDaisyLFQueue2GetPopPointer(vm::ptr<LFQueue2> queue, vm::ptr<s32> pPointer, u32 isBlocking)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2GetPopPointer()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2GetPopPointer(queue=*0x%x, pPointer=*0x%x, isBlocking=%d)", queue, pPointer, isBlocking);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLFQueue2CompletePopPointer(vm::ptr<LFQueue2> queue, s32 pointer, vm::ptr<s32(vm::ptr<void>, u32)> fpSendSignal, u32 isQueueFull)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2CompletePopPointer()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2CompletePopPointer(queue=*0x%x, pointer=0x%x, fpSendSignal=*0x%x, isQueueFull=%d)", queue, pointer, fpSendSignal, isQueueFull);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
void cellDaisyLFQueue2PushOpen(vm::ptr<LFQueue2> queue)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2PushOpen()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2PushOpen(queue=*0x%x)", queue);
|
||||
}
|
||||
|
||||
error_code cellDaisyLFQueue2PushClose(vm::ptr<LFQueue2> queue, vm::ptr<s32(vm::ptr<void>, u32)> fpSendSignal)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2PushClose()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2PushClose(queue=*0x%x, fpSendSignal=*0x%x)", queue, fpSendSignal);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
void cellDaisyLFQueue2PopOpen(vm::ptr<LFQueue2> queue)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2PopOpen()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2PopOpen(queue=*0x%x)", queue);
|
||||
}
|
||||
|
||||
error_code cellDaisyLFQueue2PopClose(vm::ptr<LFQueue2> queue, vm::ptr<s32(vm::ptr<void>, u32)> fpSendSignal)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2PopClose()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2PopClose(queue=*0x%x, fpSendSignal=*0x%x)", queue, fpSendSignal);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLFQueue2HasUnfinishedConsumer(vm::ptr<LFQueue2> queue, u32 isCancelled)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLFQueue2HasUnfinishedConsumer()");
|
||||
cellDaisy.todo("cellDaisyLFQueue2HasUnfinishedConsumer(queue=*0x%x, isCancelled=%d)", queue, isCancelled);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisy_snprintf(vm::ptr<char> buffer, u32 count, vm::cptr<char> fmt, ppu_va_args_t fmt_args)
|
||||
{
|
||||
cellDaisy.todo("cellDaisy_snprintf()");
|
||||
cellDaisy.todo("cellDaisy_snprintf(buffer=*0x%x, count=%d, fmt=*0x%x, fmt_args=%d)", buffer, count, fmt, fmt_args.count);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_initialize(vm::ptr<Lock> _this, u32 depth)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_initialize()");
|
||||
cellDaisy.todo("cellDaisyLock_initialize(_this=*0x%x, depth=%d)", _this, depth);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_getNextHeadPointer(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_getNextHeadPointer()");
|
||||
cellDaisy.todo("cellDaisyLock_getNextHeadPointer(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_getNextTailPointer(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_getNextTailPointer()");
|
||||
cellDaisy.todo("cellDaisyLock_getNextTailPointer(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_completeConsume(vm::ptr<Lock> _this, u32 pointer)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_completeConsume()");
|
||||
cellDaisy.todo("cellDaisyLock_completeConsume(_this=*0x%x, pointer=0x%x)", _this, pointer);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_completeProduce(vm::ptr<Lock> _this, u32 pointer)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_completeProduce()");
|
||||
cellDaisy.todo("cellDaisyLock_completeProduce(_this=*0x%x, pointer=0x%x)", _this, pointer);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_pushOpen(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_pushOpen()");
|
||||
cellDaisy.todo("cellDaisyLock_pushOpen(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_pushClose(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_pushClose()");
|
||||
cellDaisy.todo("cellDaisyLock_pushClose(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_popOpen(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_popOpen()");
|
||||
cellDaisy.todo("cellDaisyLock_popOpen(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyLock_popClose(vm::ptr<Lock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyLock_popClose()");
|
||||
cellDaisy.todo("cellDaisyLock_popClose(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
void cellDaisyScatterGatherInterlock_1(vm::ptr<ScatterGatherInterlock> _this, vm::ptr<AtomicInterlock> ea, u32 size, vm::ptr<void> eaSignal, vm::ptr<s32(vm::ptr<void>, u32)> fpSendSignal)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_1()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_1(_this=*0x%x, ea=*0x%x, size=%d, eaSignal=*0x%x, fpSendSignal=*0x%x)", _this, ea, size, eaSignal, fpSendSignal);
|
||||
}
|
||||
|
||||
void cellDaisyScatterGatherInterlock_2(vm::ptr<ScatterGatherInterlock> _this, u32 size, vm::ptr<u32> ids, u32 numSpus, u8 spup)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_2()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_2(_this=*0x%x, size=%d, ids=*0x%x, numSpus=%d, spup=%d)", _this, size, ids, numSpus, spup);
|
||||
}
|
||||
|
||||
void cellDaisyScatterGatherInterlock_9tor(vm::ptr<ScatterGatherInterlock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_9tor()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_9tor(_this=*0x%x)", _this);
|
||||
}
|
||||
|
||||
error_code cellDaisyScatterGatherInterlock_probe(vm::ptr<ScatterGatherInterlock> _this, u32 isBlocking)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_probe()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_probe(_this=*0x%x, isBlocking=%d)", _this, isBlocking);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellDaisyScatterGatherInterlock_release(vm::ptr<ScatterGatherInterlock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_release()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_release(_this=*0x%x)", _this);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
void cellDaisyScatterGatherInterlock_proceedSequenceNumber(vm::ptr<ScatterGatherInterlock> _this)
|
||||
{
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_proceedSequenceNumber()");
|
||||
cellDaisy.todo("cellDaisyScatterGatherInterlock_proceedSequenceNumber(_this=*0x%x)", _this);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -446,7 +446,7 @@ error_code _cellGcmInitBody(ppu_thread& ppu, vm::pptr<CellGcmContextData> contex
|
|||
gcm_cfg.zculls_addr = vm::alloc(sizeof(CellGcmZcullInfo) * 8, vm::main);
|
||||
gcm_cfg.tiles_addr = vm::alloc(sizeof(CellGcmTileInfo) * 15, vm::main);
|
||||
|
||||
vm::_ref<CellGcmContextData>(gcm_cfg.gcm_info.context_addr) = gcm_cfg.current_context;
|
||||
vm::write<CellGcmContextData>(gcm_cfg.gcm_info.context_addr, gcm_cfg.current_context);
|
||||
context->set(gcm_cfg.gcm_info.context_addr);
|
||||
|
||||
// 0x40 is to offset CellGcmControl from RsxDmaControl
|
||||
|
@ -590,7 +590,7 @@ ret_type gcmSetPrepareFlip(ppu_thread& ppu, vm::ptr<CellGcmContextData> ctxt, u3
|
|||
|
||||
if (!old_api && ctxt.addr() == gcm_cfg.gcm_info.context_addr)
|
||||
{
|
||||
vm::_ref<CellGcmControl>(gcm_cfg.gcm_info.control_addr).put += cmd_size;
|
||||
vm::_ptr<CellGcmControl>(gcm_cfg.gcm_info.control_addr)->put += cmd_size;
|
||||
}
|
||||
|
||||
return static_cast<ret_type>(not_an_error(id));
|
||||
|
@ -1463,7 +1463,7 @@ s32 cellGcmCallback(ppu_thread& ppu, vm::ptr<CellGcmContextData> context, u32 co
|
|||
|
||||
auto& gcm_cfg = g_fxo->get<gcm_config>();
|
||||
|
||||
auto& ctrl = vm::_ref<CellGcmControl>(gcm_cfg.gcm_info.control_addr);
|
||||
auto& ctrl = *vm::_ptr<CellGcmControl>(gcm_cfg.gcm_info.control_addr);
|
||||
|
||||
// Flush command buffer (ie allow RSX to read up to context->current)
|
||||
ctrl.put.exchange(getOffsetFromAddress(context->current.addr()));
|
||||
|
|
|
@ -11,11 +11,6 @@
|
|||
|
||||
LOG_CHANNEL(cellGifDec);
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
template <>
|
||||
void fmt_class_string<CellGifDecError>::format(std::string& out, u64 arg)
|
||||
{
|
||||
|
|
|
@ -11,11 +11,6 @@
|
|||
|
||||
LOG_CHANNEL(cellJpgDec);
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
template <>
|
||||
void fmt_class_string<CellJpgDecError>::format(std::string& out, u64 arg)
|
||||
{
|
||||
|
@ -40,19 +35,19 @@ void fmt_class_string<CellJpgDecError>::format(std::string& out, u64 arg)
|
|||
|
||||
error_code cellJpgDecCreate(u32 mainHandle, u32 threadInParam, u32 threadOutParam)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellJpgDec);
|
||||
cellJpgDec.todo("cellJpgDecCreate(mainHandle=0x%x, threadInParam=0x%x, threadOutParam=0x%x)", mainHandle, threadInParam, threadOutParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellJpgDecExtCreate(u32 mainHandle, u32 threadInParam, u32 threadOutParam, u32 extThreadInParam, u32 extThreadOutParam)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellJpgDec);
|
||||
cellJpgDec.todo("cellJpgDecExtCreate(mainHandle=0x%x, threadInParam=0x%x, threadOutParam=0x%x, extThreadInParam=0x%x, extThreadOutParam=0x%x)", mainHandle, threadInParam, threadOutParam, extThreadInParam, extThreadOutParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellJpgDecDestroy(u32 mainHandle)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellJpgDec);
|
||||
cellJpgDec.todo("cellJpgDecDestroy(mainHandle=0x%x)", mainHandle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -230,7 +225,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
}
|
||||
|
||||
//Decode JPG file. (TODO: Is there any faster alternative? Can we do it without external libraries?)
|
||||
int width, height, actual_components;
|
||||
int width = 0, height = 0, actual_components = 0;
|
||||
auto image = std::unique_ptr<unsigned char,decltype(&::free)>
|
||||
(
|
||||
stbi_load_from_memory(jpg.get(), ::narrow<int>(fileSize), &width, &height, &actual_components, 4),
|
||||
|
@ -275,7 +270,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
{
|
||||
//TODO: Find out if we can't do padding without an extra copy
|
||||
const int linesize = std::min(bytesPerLine, width * nComponents);
|
||||
const auto output = std::make_unique<char[]>(linesize);
|
||||
std::vector<char> output(image_size);
|
||||
for (int i = 0; i < height; i++)
|
||||
{
|
||||
const int dstOffset = i * bytesPerLine;
|
||||
|
@ -287,22 +282,22 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
output[j + 2] = image.get()[srcOffset + j + 1];
|
||||
output[j + 3] = image.get()[srcOffset + j + 2];
|
||||
}
|
||||
std::memcpy(&data[dstOffset], output.get(), linesize);
|
||||
std::memcpy(&data[dstOffset], output.data(), linesize);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto img = std::make_unique<uint[]>(image_size);
|
||||
uint* source_current = reinterpret_cast<uint*>(image.get());
|
||||
uint* dest_current = img.get();
|
||||
for (uint i = 0; i < image_size / nComponents; i++)
|
||||
std::vector<u32> img(image_size);
|
||||
const u32* source_current = reinterpret_cast<const u32*>(image.get());
|
||||
u32* dest_current = img.data();
|
||||
for (u32 i = 0; i < image_size / nComponents; i++)
|
||||
{
|
||||
uint val = *source_current;
|
||||
const u32 val = *source_current;
|
||||
*dest_current = (val >> 24) | (val << 8); // set alpha (A8) as leftmost byte
|
||||
source_current++;
|
||||
dest_current++;
|
||||
}
|
||||
std::memcpy(data.get_ptr(), img.get(), image_size);
|
||||
std::memcpy(data.get_ptr(), img.data(), image_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -322,6 +322,8 @@ error_code microphone_device::open_microphone(const u8 type, const u32 dsp_r, co
|
|||
num_channels = channels;
|
||||
|
||||
#ifndef WITHOUT_OPENAL
|
||||
enumerate_devices();
|
||||
|
||||
// Adjust number of channels depending on microphone type
|
||||
switch (device_type)
|
||||
{
|
||||
|
@ -423,36 +425,40 @@ error_code microphone_device::open_microphone(const u8 type, const u32 dsp_r, co
|
|||
break;
|
||||
}
|
||||
|
||||
ALCdevice* device = nullptr;
|
||||
|
||||
// Make sure we use a proper sampling rate
|
||||
const auto fixup_samplingrate = [this](u32& rate) -> bool
|
||||
// TODO: The used sample rate may vary for Sony's camera devices
|
||||
const std::array<u32, 7> samplingrates = { raw_samplingrate, 48000u, 32000u, 24000u, 16000u, 12000u, 8000u };
|
||||
|
||||
for (u32 samplingrate : samplingrates)
|
||||
{
|
||||
// TODO: The used sample rate may vary for Sony's camera devices
|
||||
const std::array<u32, 7> samplingrates = { rate, 48000u, 32000u, 24000u, 16000u, 12000u, 8000u };
|
||||
|
||||
const auto test_samplingrate = [&samplingrates](const u32& rate)
|
||||
if (!std::any_of(samplingrates.cbegin() + 1, samplingrates.cend(), [samplingrate](u32 r){ return r == samplingrate; }))
|
||||
{
|
||||
// TODO: actually check if device supports sampling rates
|
||||
return std::any_of(samplingrates.cbegin() + 1, samplingrates.cend(), [&rate](const u32& r){ return r == rate; });
|
||||
};
|
||||
|
||||
for (u32 samplingrate : samplingrates)
|
||||
{
|
||||
if (test_samplingrate(samplingrate))
|
||||
{
|
||||
// Use this sampling rate
|
||||
raw_samplingrate = samplingrate;
|
||||
cellMic.notice("Using sampling rate %d.", samplingrate);
|
||||
return true;
|
||||
}
|
||||
|
||||
cellMic.warning("Requested sampling rate %d, but we do not support it. Trying next sampling rate...", samplingrate);
|
||||
continue;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
cellMic.notice("Trying sampling rate %d with %d channel(s)", samplingrate, num_channels);
|
||||
|
||||
if (!fixup_samplingrate(raw_samplingrate))
|
||||
device = open_device(devices[0].name, samplingrate, num_al_channels, inbuf_size);
|
||||
if (!device)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use this sampling rate
|
||||
raw_samplingrate = samplingrate;
|
||||
cellMic.notice("Using sampling rate %d and %d channel(s)", raw_samplingrate, num_channels);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!device)
|
||||
{
|
||||
cellMic.error("Failed to open capture device '%s' (raw_samplingrate=%d, num_al_channels=0x%x, inbuf_size=%d)", devices[0].name, raw_samplingrate, num_al_channels, inbuf_size);
|
||||
#ifdef _WIN32
|
||||
cellMic.error("Make sure microphone use is authorized under \"Microphone privacy settings\" in windows configuration");
|
||||
#endif
|
||||
return CELL_MICIN_ERROR_DEVICE_NOT_SUPPORT;
|
||||
}
|
||||
|
||||
|
@ -460,29 +466,19 @@ error_code microphone_device::open_microphone(const u8 type, const u32 dsp_r, co
|
|||
|
||||
ensure(!devices.empty());
|
||||
|
||||
ALCdevice* device = alcCaptureOpenDevice(devices[0].name.c_str(), raw_samplingrate, num_al_channels, inbuf_size);
|
||||
|
||||
if (ALCenum err = alcGetError(device); err != ALC_NO_ERROR || !device)
|
||||
{
|
||||
cellMic.error("Error opening capture device %s (error=%s, device=*0x%x)", devices[0].name, fmt::alc_error{device, err}, device);
|
||||
#ifdef _WIN32
|
||||
cellMic.error("Make sure microphone use is authorized under \"Microphone privacy settings\" in windows configuration");
|
||||
#endif
|
||||
return CELL_MICIN_ERROR_DEVICE_NOT_SUPPORT;
|
||||
}
|
||||
|
||||
devices[0].device = device;
|
||||
devices[0].buf.resize(inbuf_size, 0);
|
||||
|
||||
if (device_type == microphone_handler::singstar && devices.size() >= 2)
|
||||
{
|
||||
// Open a 2nd microphone into the same device
|
||||
device = alcCaptureOpenDevice(devices[1].name.c_str(), raw_samplingrate, AL_FORMAT_MONO16, inbuf_size);
|
||||
num_al_channels = AL_FORMAT_MONO16;
|
||||
device = open_device(devices[1].name, raw_samplingrate, num_al_channels, inbuf_size);
|
||||
|
||||
if (ALCenum err = alcGetError(device); err != ALC_NO_ERROR || !device)
|
||||
if (!device)
|
||||
{
|
||||
// Ignore it and move on
|
||||
cellMic.error("Error opening 2nd SingStar capture device %s (error=%s, device=*0x%x)", devices[1].name, fmt::alc_error{device, err}, device);
|
||||
cellMic.error("Failed to open 2nd SingStar capture device '%s' (raw_samplingrate=%d, num_al_channels=0x%x, inbuf_size=%d)", devices[1].name, raw_samplingrate, num_al_channels, inbuf_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -517,7 +513,7 @@ error_code microphone_device::close_microphone()
|
|||
{
|
||||
if (alcCaptureCloseDevice(micdevice.device) != ALC_TRUE)
|
||||
{
|
||||
cellMic.error("Error closing capture device %s", micdevice.name);
|
||||
cellMic.error("Error closing capture device '%s'", micdevice.name);
|
||||
}
|
||||
|
||||
micdevice.device = nullptr;
|
||||
|
@ -539,7 +535,7 @@ error_code microphone_device::start_microphone()
|
|||
alcCaptureStart(micdevice.device);
|
||||
if (ALCenum err = alcGetError(micdevice.device); err != ALC_NO_ERROR)
|
||||
{
|
||||
cellMic.error("Error starting capture of device %s (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
cellMic.error("Error starting capture of device '%s' (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
stop_microphone();
|
||||
return CELL_MICIN_ERROR_FATAL;
|
||||
}
|
||||
|
@ -558,7 +554,7 @@ error_code microphone_device::stop_microphone()
|
|||
alcCaptureStop(micdevice.device);
|
||||
if (ALCenum err = alcGetError(micdevice.device); err != ALC_NO_ERROR)
|
||||
{
|
||||
cellMic.error("Error stopping capture of device %s (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
cellMic.error("Error stopping capture of device '%s' (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -637,7 +633,7 @@ u32 microphone_device::capture_audio()
|
|||
|
||||
if (ALCenum err = alcGetError(micdevice.device); err != ALC_NO_ERROR)
|
||||
{
|
||||
cellMic.error("Error getting number of captured samples of device %s (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
cellMic.error("Error getting number of captured samples of device '%s' (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
return CELL_MICIN_ERROR_FATAL;
|
||||
}
|
||||
|
||||
|
@ -655,7 +651,7 @@ u32 microphone_device::capture_audio()
|
|||
|
||||
if (ALCenum err = alcGetError(micdevice.device); err != ALC_NO_ERROR)
|
||||
{
|
||||
cellMic.error("Error capturing samples of device %s (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
cellMic.error("Error capturing samples of device '%s' (error=%s)", micdevice.name, fmt::alc_error{micdevice.device, err});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,6 +663,56 @@ u32 microphone_device::capture_audio()
|
|||
|
||||
// Private functions
|
||||
|
||||
#ifndef WITHOUT_OPENAL
|
||||
void microphone_device::enumerate_devices()
|
||||
{
|
||||
cellMic.notice("Enumerating capture devices...");
|
||||
enumerated_devices.clear();
|
||||
|
||||
if (alcIsExtensionPresent(nullptr, "ALC_ENUMERATION_EXT") == AL_TRUE)
|
||||
{
|
||||
if (const char* alc_devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER))
|
||||
{
|
||||
while (alc_devices && *alc_devices != 0)
|
||||
{
|
||||
cellMic.notice("Found capture device: '%s'", alc_devices);
|
||||
enumerated_devices.push_back(alc_devices);
|
||||
alc_devices += strlen(alc_devices) + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Without enumeration we can only use one device
|
||||
cellMic.error("OpenAl extension ALC_ENUMERATION_EXT not supported. The enumerated capture devices will only contain the default capture device.");
|
||||
|
||||
if (const char* alc_device = alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER))
|
||||
{
|
||||
cellMic.notice("Found default capture device: '%s'", alc_device);
|
||||
enumerated_devices.push_back(alc_device);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ALCdevice* microphone_device::open_device(const std::string& name, u32 samplingrate, ALCenum num_al_channels, u32 buf_size)
|
||||
{
|
||||
if (std::none_of(enumerated_devices.cbegin(), enumerated_devices.cend(), [&name](const std::string& dev){ return dev == name; }))
|
||||
{
|
||||
cellMic.error("Capture device '%s' not in enumerated devices", name);
|
||||
}
|
||||
|
||||
ALCdevice* device = alcCaptureOpenDevice(name.c_str(), samplingrate, num_al_channels, buf_size);
|
||||
|
||||
if (ALCenum err = alcGetError(device); err != ALC_NO_ERROR || !device)
|
||||
{
|
||||
cellMic.warning("Failed to open capture device '%s' (error=%s, device=*0x%x, samplingrate=%d, num_al_channels=0x%x, buf_size=%d)", name, fmt::alc_error{device, err}, device, samplingrate, num_al_channels, buf_size);
|
||||
device = nullptr;
|
||||
}
|
||||
|
||||
return device;
|
||||
}
|
||||
#endif
|
||||
|
||||
void microphone_device::get_data(const u32 num_samples)
|
||||
{
|
||||
if (num_samples == 0)
|
||||
|
|
|
@ -326,6 +326,11 @@ private:
|
|||
static inline void variable_byteswap(const void* src, void* dst);
|
||||
inline u32 convert_16_bit_pcm_to_float(const std::vector<u8>& buffer, u32 num_bytes);
|
||||
|
||||
#ifndef WITHOUT_OPENAL
|
||||
void enumerate_devices();
|
||||
ALCdevice* open_device(const std::string& name, u32 samplingrate, ALCenum num_al_channels, u32 buf_size);
|
||||
#endif
|
||||
|
||||
u32 capture_audio();
|
||||
|
||||
void get_data(const u32 num_samples);
|
||||
|
@ -345,6 +350,7 @@ private:
|
|||
std::vector<u8> buf;
|
||||
};
|
||||
|
||||
std::vector<std::string> enumerated_devices;
|
||||
std::vector<mic_device> devices;
|
||||
std::vector<u8> temp_buf;
|
||||
std::vector<u8> float_buf;
|
||||
|
@ -376,7 +382,7 @@ public:
|
|||
void wake_up();
|
||||
|
||||
// Returns index of registered device
|
||||
u32 register_device(const std::string& name);
|
||||
u32 register_device(const std::string& device_name);
|
||||
void unregister_device(u32 dev_num);
|
||||
bool check_device(u32 dev_num);
|
||||
|
||||
|
|
|
@ -177,6 +177,11 @@ struct music_state
|
|||
return CELL_MUSIC_ERROR_NO_MORE_CONTENT;
|
||||
}
|
||||
|
||||
if (!fs::is_file(path))
|
||||
{
|
||||
cellMusic.error("set_playback_command: File does not exist: '%s'", path);
|
||||
}
|
||||
|
||||
switch (command)
|
||||
{
|
||||
case CELL_MUSIC_PB_CMD_FASTFORWARD:
|
||||
|
|
|
@ -77,7 +77,7 @@ std::string music_selection_context::get_yaml_path() const
|
|||
|
||||
if (!fs::create_path(path))
|
||||
{
|
||||
cellMusicSelectionContext.fatal("Failed to create path: %s (%s)", path, fs::g_tls_error);
|
||||
cellMusicSelectionContext.fatal("get_yaml_path: Failed to create path: %s (%s)", path, fs::g_tls_error);
|
||||
}
|
||||
|
||||
return path + hash + ".yml";
|
||||
|
@ -101,13 +101,18 @@ void music_selection_context::set_playlist(const std::string& path)
|
|||
continue;
|
||||
}
|
||||
|
||||
playlist.push_back(dir_path + std::string(path + "/" + dir_entry.name).substr(vfs_dir_path.length()));
|
||||
std::string track = dir_path + std::string(path + "/" + dir_entry.name).substr(vfs_dir_path.length());
|
||||
cellMusicSelectionContext.notice("set_playlist: Adding track to playlist: '%s'. (path: '%s', name: '%s')", track, path, dir_entry.name);
|
||||
playlist.push_back(std::move(track));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
content_type = CELL_SEARCH_CONTENTTYPE_MUSIC;
|
||||
playlist.push_back(dir_path + path.substr(vfs_dir_path.length()));
|
||||
|
||||
std::string track = dir_path + path.substr(vfs_dir_path.length());
|
||||
cellMusicSelectionContext.notice("set_playlist: Adding track to playlist: '%s'. (path: '%s')", track, path);
|
||||
playlist.push_back(std::move(track));
|
||||
}
|
||||
|
||||
valid = true;
|
||||
|
@ -118,7 +123,7 @@ void music_selection_context::create_playlist(const std::string& new_hash)
|
|||
hash = new_hash;
|
||||
|
||||
const std::string yaml_path = get_yaml_path();
|
||||
cellMusicSelectionContext.notice("Saving music playlist file %s", yaml_path);
|
||||
cellMusicSelectionContext.notice("create_playlist: Saving music playlist file %s", yaml_path);
|
||||
|
||||
YAML::Emitter out;
|
||||
out << YAML::BeginMap;
|
||||
|
@ -140,9 +145,9 @@ void music_selection_context::create_playlist(const std::string& new_hash)
|
|||
|
||||
fs::pending_file file(yaml_path);
|
||||
|
||||
if (!file.file || (file.file.write(out.c_str(), out.size()), !file.commit()))
|
||||
if (!file.file || file.file.write(out.c_str(), out.size()) < out.size() || !file.commit())
|
||||
{
|
||||
cellMusicSelectionContext.error("Failed to create music playlist file %s (error=%s)", yaml_path, fs::g_tls_error);
|
||||
cellMusicSelectionContext.error("create_playlist: Failed to create music playlist file '%s' (error=%s)", yaml_path, fs::g_tls_error);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +156,7 @@ bool music_selection_context::load_playlist()
|
|||
playlist.clear();
|
||||
|
||||
const std::string path = get_yaml_path();
|
||||
cellMusicSelectionContext.notice("Loading music playlist file %s", path);
|
||||
cellMusicSelectionContext.notice("load_playlist: Loading music playlist file '%s'", path);
|
||||
|
||||
std::string content;
|
||||
{
|
||||
|
@ -160,7 +165,7 @@ bool music_selection_context::load_playlist()
|
|||
|
||||
if (!file)
|
||||
{
|
||||
cellMusicSelectionContext.error("Failed to load music playlist file %s: %s", path, fs::g_tls_error);
|
||||
cellMusicSelectionContext.error("load_playlist: Failed to load music playlist file '%s': %s", path, fs::g_tls_error);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -171,7 +176,7 @@ bool music_selection_context::load_playlist()
|
|||
|
||||
if (!error.empty() || !root)
|
||||
{
|
||||
cellMusicSelectionContext.error("Failed to load music playlist file %s:\n%s", path, error);
|
||||
cellMusicSelectionContext.error("load_playlist: Failed to load music playlist file '%s':\n%s", path, error);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -180,54 +185,54 @@ bool music_selection_context::load_playlist()
|
|||
const std::string version = get_yaml_node_value<std::string>(root["Version"], err);
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No Version entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No Version entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (version != target_version)
|
||||
{
|
||||
cellMusicSelectionContext.error("Version '%s' does not match music playlist target '%s' (file: %s)", version, target_version, path);
|
||||
cellMusicSelectionContext.error("load_playlist: Version '%s' does not match music playlist target '%s' (file: '%s')", version, target_version, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
const std::string file_type = get_yaml_node_value<std::string>(root["FileType"], err);
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No FileType entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No FileType entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (file_type != target_file_type)
|
||||
{
|
||||
cellMusicSelectionContext.error("FileType '%s' does not match music playlist target '%s' (file: %s)", file_type, target_file_type, path);
|
||||
cellMusicSelectionContext.error("load_playlist: FileType '%s' does not match music playlist target '%s' (file: '%s')", file_type, target_file_type, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
content_type = static_cast<CellSearchContentType>(get_yaml_node_value<u32>(root["ContentType"], err));
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No ContentType entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No ContentType entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
context_option = static_cast<CellSearchContextOption>(get_yaml_node_value<u32>(root["ContextOption"], err));
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No ContextOption entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No ContextOption entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
repeat_mode = static_cast<CellSearchRepeatMode>(get_yaml_node_value<u32>(root["RepeatMode"], err));
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No RepeatMode entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No RepeatMode entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
first_track = get_yaml_node_value<u32>(root["FirstTrack"], err);
|
||||
if (!err.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No FirstTrack entry found. Error: '%s' (file: %s)", err, path);
|
||||
cellMusicSelectionContext.error("load_playlist: No FirstTrack entry found. Error: '%s' (file: '%s')", err, path);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -235,15 +240,17 @@ bool music_selection_context::load_playlist()
|
|||
|
||||
if (!track_node || track_node.Type() != YAML::NodeType::Sequence)
|
||||
{
|
||||
cellMusicSelectionContext.error("No Tracks entry found or Tracks is not a Sequence. (file: %s)", path);
|
||||
cellMusicSelectionContext.error("load_playlist: No Tracks entry found or Tracks is not a Sequence. (file: '%s')", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (usz i = 0; i < track_node.size(); i++)
|
||||
{
|
||||
cellMusicSelectionContext.notice("load_playlist: Adding track to playlist: '%s'. (file: '%s')", track_node[i].Scalar(), path);
|
||||
playlist.push_back(track_node[i].Scalar());
|
||||
}
|
||||
|
||||
cellMusicSelectionContext.notice("load_playlist: Loaded music playlist file '%s' (context: %s)", path, to_string());
|
||||
valid = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -254,13 +261,13 @@ void music_selection_context::set_track(std::string_view track)
|
|||
|
||||
if (playlist.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No tracks to play... (requested path='%s')", track);
|
||||
cellMusicSelectionContext.error("set_track: No tracks to play... (requested path='%s')", track);
|
||||
return;
|
||||
}
|
||||
|
||||
for (usz i = 0; i < playlist.size(); i++)
|
||||
{
|
||||
cellMusicSelectionContext.error("Comparing track '%s' vs '%s'", track, playlist[i]);
|
||||
cellMusicSelectionContext.error("set_track: Comparing track '%s' vs '%s'", track, playlist[i]);
|
||||
if (track.ends_with(playlist[i]))
|
||||
{
|
||||
first_track = current_track = static_cast<u32>(i);
|
||||
|
@ -268,14 +275,14 @@ void music_selection_context::set_track(std::string_view track)
|
|||
}
|
||||
}
|
||||
|
||||
cellMusicSelectionContext.error("Track '%s' not found...", track);
|
||||
cellMusicSelectionContext.error("set_track: Track '%s' not found...", track);
|
||||
}
|
||||
|
||||
u32 music_selection_context::step_track(bool next)
|
||||
{
|
||||
if (playlist.empty())
|
||||
{
|
||||
cellMusicSelectionContext.error("No tracks to play...");
|
||||
cellMusicSelectionContext.error("step_track: No tracks to play...");
|
||||
current_track = umax;
|
||||
return umax;
|
||||
}
|
||||
|
@ -290,7 +297,7 @@ u32 music_selection_context::step_track(bool next)
|
|||
if (++current_track >= playlist.size())
|
||||
{
|
||||
// We are at the end of the playlist.
|
||||
cellMusicSelectionContext.notice("No more tracks to play in playlist...");
|
||||
cellMusicSelectionContext.notice("step_track: No more tracks to play in playlist...");
|
||||
current_track = umax;
|
||||
return umax;
|
||||
}
|
||||
|
@ -301,7 +308,7 @@ u32 music_selection_context::step_track(bool next)
|
|||
if (current_track == 0)
|
||||
{
|
||||
// We are at the start of the playlist.
|
||||
cellMusicSelectionContext.notice("No more tracks to play in playlist...");
|
||||
cellMusicSelectionContext.notice("step_track: No more tracks to play in playlist...");
|
||||
current_track = umax;
|
||||
return umax;
|
||||
}
|
||||
|
@ -339,13 +346,13 @@ u32 music_selection_context::step_track(bool next)
|
|||
case CELL_SEARCH_REPEATMODE_NOREPEAT1:
|
||||
{
|
||||
// We are done. We only wanted to decode a single track.
|
||||
cellMusicSelectionContext.notice("No more tracks to play...");
|
||||
cellMusicSelectionContext.notice("step_track: No more tracks to play...");
|
||||
current_track = umax;
|
||||
return umax;
|
||||
}
|
||||
default:
|
||||
{
|
||||
fmt::throw_exception("Unknown repeat mode %d", static_cast<u32>(repeat_mode));
|
||||
fmt::throw_exception("step_track: Unknown repeat mode %d", static_cast<u32>(repeat_mode));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,7 +361,7 @@ u32 music_selection_context::step_track(bool next)
|
|||
if (next ? current_track == 0 : current_track == (playlist.size() - 1))
|
||||
{
|
||||
// We reached the first or last track again. Let's shuffle!
|
||||
cellMusicSelectionContext.notice("Shuffling playlist...");
|
||||
cellMusicSelectionContext.notice("step_track: Shuffling playlist...");
|
||||
auto engine = std::default_random_engine{};
|
||||
std::shuffle(std::begin(playlist), std::end(playlist), engine);
|
||||
}
|
||||
|
|
|
@ -19,11 +19,6 @@ typedef png_bytep iCCP_profile_type;
|
|||
typedef png_charp iCCP_profile_type;
|
||||
#endif
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
LOG_CHANNEL(cellPngDec);
|
||||
|
||||
template <>
|
||||
|
@ -149,7 +144,7 @@ void pngDecRowCallback(png_structp png_ptr, png_bytep new_row, png_uint_32 row_n
|
|||
png_progressive_combine_row(png_ptr, data, new_row);
|
||||
}
|
||||
|
||||
void pngDecInfoCallback(png_structp png_ptr, png_infop info)
|
||||
void pngDecInfoCallback(png_structp png_ptr, png_infop /*info*/)
|
||||
{
|
||||
PngStream* stream = static_cast<PngStream*>(png_get_progressive_ptr(png_ptr));
|
||||
if (!stream)
|
||||
|
@ -162,7 +157,7 @@ void pngDecInfoCallback(png_structp png_ptr, png_infop info)
|
|||
stream->buffer->cursor += (stream->buffer->length - remaining);
|
||||
}
|
||||
|
||||
void pngDecEndCallback(png_structp png_ptr, png_infop info)
|
||||
void pngDecEndCallback(png_structp png_ptr, png_infop /*info*/)
|
||||
{
|
||||
PngStream* stream = static_cast<PngStream*>(png_get_progressive_ptr(png_ptr));
|
||||
if (!stream)
|
||||
|
@ -175,17 +170,17 @@ void pngDecEndCallback(png_structp png_ptr, png_infop info)
|
|||
}
|
||||
|
||||
// Custom error handler for libpng
|
||||
[[noreturn]] void pngDecError(png_structp png_ptr, png_const_charp error_message)
|
||||
[[noreturn]] void pngDecError(png_structp /*png_ptr*/, png_const_charp error_message)
|
||||
{
|
||||
cellPngDec.error("%s", error_message);
|
||||
cellPngDec.error("pngDecError: %s", error_message);
|
||||
// we can't return here or libpng blows up
|
||||
fmt::throw_exception("Fatal Error in libpng: %s", error_message);
|
||||
}
|
||||
|
||||
// Custom warning handler for libpng
|
||||
void pngDecWarning(png_structp png_ptr, png_const_charp error_message)
|
||||
void pngDecWarning(png_structp /*png_ptr*/, png_const_charp error_message)
|
||||
{
|
||||
cellPngDec.warning("%s", error_message);
|
||||
cellPngDec.warning("pngDecWarning: %s", error_message);
|
||||
}
|
||||
|
||||
// Get the chunk information of the PNG file. IDAT is marked as existing, only after decoding or reading the header.
|
||||
|
@ -337,7 +332,7 @@ be_t<u32> pngDecGetChunkInformation(PngStream* stream, bool IDAT = false)
|
|||
return chunk_information;
|
||||
}
|
||||
|
||||
error_code pngDecCreate(ppu_thread& ppu, PPHandle png_handle, PThreadInParam thread_in_param, PThreadOutParam thread_out_param, PExtThreadInParam extra_thread_in_param = vm::null, PExtThreadOutParam extra_thread_out_param = vm::null)
|
||||
error_code pngDecCreate(ppu_thread& ppu, PPHandle png_handle, PThreadInParam thread_in_param, PThreadOutParam thread_out_param, PExtThreadInParam /*extra_thread_in_param*/ = vm::null, PExtThreadOutParam extra_thread_out_param = vm::null)
|
||||
{
|
||||
// Check if partial image decoding is used
|
||||
if (extra_thread_out_param)
|
||||
|
@ -908,103 +903,103 @@ error_code cellPngDecExtDecodeData(ppu_thread& ppu, PHandle handle, PStream stre
|
|||
|
||||
error_code cellPngDecGetUnknownChunks(PHandle handle, PStream stream, vm::pptr<CellPngUnknownChunk> unknownChunk, vm::ptr<u32> unknownChunkNumber)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetUnknownChunks()");
|
||||
cellPngDec.todo("cellPngDecGetUnknownChunks(handle=*0x%x, stream=*0x%x, unknownChunk=*0x%x, unknownChunkNumber=*0x%x)", handle, stream, unknownChunk, unknownChunkNumber);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetpCAL(PHandle handle, PStream stream, vm::ptr<CellPngPCAL> pcal)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetpCAL()");
|
||||
cellPngDec.todo("cellPngDecGetpCAL(handle=*0x%x, stream=*0x%x, pcal=*0x%x)", handle, stream, pcal);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetcHRM(PHandle handle, PStream stream, vm::ptr<CellPngCHRM> chrm)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetcHRM()");
|
||||
cellPngDec.todo("cellPngDecGetcHRM(handle=*0x%x, stream=*0x%x, chrm=*0x%x)", handle, stream, chrm);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetsCAL(PHandle handle, PStream stream, vm::ptr<CellPngSCAL> scal)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetsCAL()");
|
||||
cellPngDec.todo("cellPngDecGetsCAL(handle=*0x%x, stream=*0x%x, scal=*0x%x)", handle, stream, scal);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetpHYs(PHandle handle, PStream stream, vm::ptr<CellPngPHYS> phys)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetpHYs()");
|
||||
cellPngDec.todo("cellPngDecGetpHYs(handle=*0x%x, stream=*0x%x, phys=*0x%x)", handle, stream, phys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetoFFs(PHandle handle, PStream stream, vm::ptr<CellPngOFFS> offs)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetoFFs()");
|
||||
cellPngDec.todo("cellPngDecGetoFFs(handle=*0x%x, stream=*0x%x, offs=*0x%x)", handle, stream, offs);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetsPLT(PHandle handle, PStream stream, vm::ptr<CellPngSPLT> splt)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetsPLT()");
|
||||
cellPngDec.todo("cellPngDecGetsPLT(handle=*0x%x, stream=*0x%x, splt=*0x%x)", handle, stream, splt);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetbKGD(PHandle handle, PStream stream, vm::ptr<CellPngBKGD> bkgd)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetbKGD()");
|
||||
cellPngDec.todo("cellPngDecGetbKGD(handle=*0x%x, stream=*0x%x, bkgd=*0x%x)", handle, stream, bkgd);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGettIME(PHandle handle, PStream stream, vm::ptr<CellPngTIME> time)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGettIME()");
|
||||
cellPngDec.todo("cellPngDecGettIME(handle=*0x%x, stream=*0x%x, time=*0x%x)", handle, stream, time);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGethIST(PHandle handle, PStream stream, vm::ptr<CellPngHIST> hist)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGethIST()");
|
||||
cellPngDec.todo("cellPngDecGethIST(handle=*0x%x, stream=*0x%x, hist=*0x%x)", handle, stream, hist);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGettRNS(PHandle handle, PStream stream, vm::ptr<CellPngTRNS> trns)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGettRNS()");
|
||||
cellPngDec.todo("cellPngDecGettRNS(handle=*0x%x, stream=*0x%x, trns=*0x%x)", handle, stream, trns);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetsBIT(PHandle handle, PStream stream, vm::ptr<CellPngSBIT> sbit)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetsBIT()");
|
||||
cellPngDec.todo("cellPngDecGetsBIT(handle=*0x%x, stream=*0x%x, sbit=*0x%x)", handle, stream, sbit);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetiCCP(PHandle handle, PStream stream, vm::ptr<CellPngICCP> iccp)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetiCCP()");
|
||||
cellPngDec.todo("cellPngDecGetiCCP(handle=*0x%x, stream=*0x%x, iccp=*0x%x)", handle, stream, iccp);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetsRGB(PHandle handle, PStream stream, vm::ptr<CellPngSRGB> srgb)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetsRGB()");
|
||||
cellPngDec.todo("cellPngDecGetsRGB(handle=*0x%x, stream=*0x%x, srgb=*0x%x)", handle, stream, srgb);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetgAMA(PHandle handle, PStream stream, vm::ptr<CellPngGAMA> gama)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetgAMA()");
|
||||
cellPngDec.todo("cellPngDecGetgAMA(handle=*0x%x, stream=*0x%x, gama=*0x%x)", handle, stream, gama);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetPLTE(PHandle handle, PStream stream, vm::ptr<CellPngPLTE> plte)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetPLTE()");
|
||||
cellPngDec.todo("cellPngDecGetPLTE(handle=*0x%x, stream=*0x%x, plte=*0x%x)", handle, stream, plte);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellPngDecGetTextChunk(PHandle handle, PStream stream, vm::ptr<u32> textInfoNum, vm::pptr<CellPngTextInfo> textInfo)
|
||||
{
|
||||
cellPngDec.todo("cellPngDecGetTextChunk()");
|
||||
cellPngDec.todo("cellPngDecGetTextChunk(handle=*0x%x, stream=*0x%x, textInfoNum=*0x%x, textInfo=*0x%x)", handle, stream, textInfoNum, textInfo);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -2500,12 +2500,7 @@ error_code cellSaveDataListDelete(ppu_thread& ppu, PSetList setList, PSetBuf set
|
|||
return savedata_op(ppu, SAVEDATA_OP_LIST_DELETE, 0, vm::null, 0, setList, setBuf, funcList, vm::null, vm::null, vm::null, container, 0x40, userdata, 0, funcDone);
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
error_code cellSaveDataListImport(ppu_thread& ppu, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataListImport(ppu_thread& /*ppu*/, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataListImport(setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2520,7 +2515,7 @@ error_code cellSaveDataListImport(ppu_thread& ppu, PSetList setList, u32 maxSize
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataListExport(ppu_thread& ppu, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataListExport(ppu_thread& /*ppu*/, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataListExport(setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2535,7 +2530,7 @@ error_code cellSaveDataListExport(ppu_thread& ppu, PSetList setList, u32 maxSize
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataFixedImport(ppu_thread& ppu, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataFixedImport(ppu_thread& /*ppu*/, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataFixedImport(dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", dirName, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2550,7 +2545,7 @@ error_code cellSaveDataFixedImport(ppu_thread& ppu, vm::cptr<char> dirName, u32
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataFixedExport(ppu_thread& ppu, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataFixedExport(ppu_thread& /*ppu*/, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataFixedExport(dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", dirName, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2581,7 +2576,7 @@ error_code cellSaveDataUserListDelete(ppu_thread& ppu, u32 userId, PSetList setL
|
|||
return savedata_op(ppu, SAVEDATA_OP_LIST_DELETE, 0, vm::null, 0, setList, setBuf, funcList, vm::null, vm::null, vm::null, container, 0x40, userdata, userId, funcDone);
|
||||
}
|
||||
|
||||
error_code cellSaveDataUserListImport(ppu_thread& ppu, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataUserListImport(ppu_thread& /*ppu*/, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataUserListImport(userId=%d, setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2596,7 +2591,7 @@ error_code cellSaveDataUserListImport(ppu_thread& ppu, u32 userId, PSetList setL
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataUserListExport(ppu_thread& ppu, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataUserListExport(ppu_thread& /*ppu*/, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataUserListExport(userId=%d, setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2611,7 +2606,7 @@ error_code cellSaveDataUserListExport(ppu_thread& ppu, u32 userId, PSetList setL
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataUserFixedImport(ppu_thread& ppu, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataUserFixedImport(ppu_thread& /*ppu*/, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataUserFixedImport(userId=%d, dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, dirName, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
@ -2626,7 +2621,7 @@ error_code cellSaveDataUserFixedImport(ppu_thread& ppu, u32 userId, vm::cptr<cha
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSaveDataUserFixedExport(ppu_thread& ppu, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
error_code cellSaveDataUserFixedExport(ppu_thread& /*ppu*/, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata)
|
||||
{
|
||||
cellSaveData.todo("cellSaveDataUserFixedExport(userId=%d, dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, dirName, maxSizeKB, funcDone, container, userdata);
|
||||
|
||||
|
|
|
@ -160,11 +160,6 @@ void fmt_class_string<SpursWorkloadState>::format(std::string& out, u64 arg)
|
|||
|
||||
error_code sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img);
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Function prototypes
|
||||
//----------------------------------------------------------------------------
|
||||
|
@ -738,7 +733,7 @@ void _spurs::handler_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
|
|||
}
|
||||
}
|
||||
|
||||
s32 _spurs::create_handler(vm::ptr<CellSpurs> spurs, u32 ppuPriority)
|
||||
s32 _spurs::create_handler(vm::ptr<CellSpurs> /*spurs*/, u32 /*ppuPriority*/)
|
||||
{
|
||||
struct handler_thread : ppu_thread
|
||||
{
|
||||
|
@ -909,7 +904,7 @@ void _spurs::event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
|
|||
}
|
||||
}
|
||||
|
||||
s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 ppuPriority)
|
||||
s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 /*ppuPriority*/)
|
||||
{
|
||||
if (s32 rc = _spurs::create_lv2_eq(ppu, spurs, spurs.ptr(&CellSpurs::eventQueue), spurs.ptr(&CellSpurs::spuPort), 0x2A, sys_event_queue_attribute_t{SYS_SYNC_PRIORITY, SYS_PPU_QUEUE, {"_spuPrv\0"_u64}}))
|
||||
{
|
||||
|
@ -981,7 +976,7 @@ void _spurs::init_event_port_mux(vm::ptr<CellSpurs::EventPortMux> eventPortMux,
|
|||
eventPortMux->x08 = unknown;
|
||||
}
|
||||
|
||||
s32 _spurs::add_default_syswkl(vm::ptr<CellSpurs> spurs, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem)
|
||||
s32 _spurs::add_default_syswkl(vm::ptr<CellSpurs> /*spurs*/, vm::cptr<u8> /*swlPriority*/, u32 /*swlMaxSpu*/, u32 /*swlIsPreem*/)
|
||||
{
|
||||
// TODO: Implement this
|
||||
return CELL_OK;
|
||||
|
@ -1859,7 +1854,7 @@ s32 cellSpursSetPriority(vm::ptr<CellSpurs> spurs, u32 wid, u32 spuId, u32 prior
|
|||
/// Set preemption victim SPU
|
||||
s32 cellSpursSetPreemptionVictimHints(vm::ptr<CellSpurs> spurs, vm::cptr<b8> isPreemptible)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellSpurs);
|
||||
cellSpurs.todo("cellSpursSetPreemptionVictimHints(spurs=*0x%x, isPreemptible=*0x%x)", spurs, isPreemptible);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -2230,7 +2225,7 @@ s32 cellSpursTraceStop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
|
|||
//----------------------------------------------------------------------------
|
||||
|
||||
/// Initialize attributes of a workload
|
||||
s32 _cellSpursWorkloadAttributeInitialize(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt)
|
||||
s32 _cellSpursWorkloadAttributeInitialize(ppu_thread& /*ppu*/, vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt)
|
||||
{
|
||||
cellSpurs.warning("_cellSpursWorkloadAttributeInitialize(attr=*0x%x, revision=%d, sdkVersion=0x%x, pm=*0x%x, size=0x%x, data=0x%llx, priority=*0x%x, minCnt=0x%x, maxCnt=0x%x)",
|
||||
attr, revision, sdkVersion, pm, size, data, priority, minCnt, maxCnt);
|
||||
|
@ -2277,7 +2272,7 @@ s32 _cellSpursWorkloadAttributeInitialize(ppu_thread& ppu, vm::ptr<CellSpursWork
|
|||
}
|
||||
|
||||
/// Set the name of a workload
|
||||
s32 cellSpursWorkloadAttributeSetName(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance)
|
||||
s32 cellSpursWorkloadAttributeSetName(ppu_thread& /*ppu*/, vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance)
|
||||
{
|
||||
cellSpurs.warning("cellSpursWorkloadAttributeSetName(attr=*0x%x, nameClass=%s, nameInstance=%s)", attr, nameClass, nameInstance);
|
||||
|
||||
|
@ -2771,7 +2766,7 @@ s32 cellSpursWakeUp(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
|
|||
}
|
||||
|
||||
/// Send a workload signal
|
||||
s32 cellSpursSendWorkloadSignal(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid)
|
||||
s32 cellSpursSendWorkloadSignal(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid)
|
||||
{
|
||||
cellSpurs.warning("cellSpursSendWorkloadSignal(spurs=*0x%x, wid=%d)", spurs, wid);
|
||||
|
||||
|
@ -2833,7 +2828,7 @@ s32 cellSpursGetWorkloadFlag(vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursWorkloa
|
|||
}
|
||||
|
||||
/// Set ready count
|
||||
s32 cellSpursReadyCountStore(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 value)
|
||||
s32 cellSpursReadyCountStore(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, u32 value)
|
||||
{
|
||||
cellSpurs.trace("cellSpursReadyCountStore(spurs=*0x%x, wid=%d, value=0x%x)", spurs, wid, value);
|
||||
|
||||
|
@ -2871,7 +2866,7 @@ s32 cellSpursReadyCountStore(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid,
|
|||
}
|
||||
|
||||
/// Swap ready count
|
||||
s32 cellSpursReadyCountSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 swap)
|
||||
s32 cellSpursReadyCountSwap(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 swap)
|
||||
{
|
||||
cellSpurs.trace("cellSpursReadyCountSwap(spurs=*0x%x, wid=%d, old=*0x%x, swap=0x%x)", spurs, wid, old, swap);
|
||||
|
||||
|
@ -2909,7 +2904,7 @@ s32 cellSpursReadyCountSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid,
|
|||
}
|
||||
|
||||
/// Compare and swap ready count
|
||||
s32 cellSpursReadyCountCompareAndSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 compare, u32 swap)
|
||||
s32 cellSpursReadyCountCompareAndSwap(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 compare, u32 swap)
|
||||
{
|
||||
cellSpurs.trace("cellSpursReadyCountCompareAndSwap(spurs=*0x%x, wid=%d, old=*0x%x, compare=0x%x, swap=0x%x)", spurs, wid, old, compare, swap);
|
||||
|
||||
|
@ -2950,7 +2945,7 @@ s32 cellSpursReadyCountCompareAndSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs,
|
|||
}
|
||||
|
||||
/// Increase or decrease ready count
|
||||
s32 cellSpursReadyCountAdd(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, s32 value)
|
||||
s32 cellSpursReadyCountAdd(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, s32 value)
|
||||
{
|
||||
cellSpurs.trace("cellSpursReadyCountAdd(spurs=*0x%x, wid=%d, old=*0x%x, value=0x%x)", spurs, wid, old, value);
|
||||
|
||||
|
@ -3030,7 +3025,7 @@ s32 cellSpursGetWorkloadData(vm::ptr<CellSpurs> spurs, vm::ptr<u64> data, u32 wi
|
|||
}
|
||||
|
||||
/// Get workload information
|
||||
s32 cellSpursGetWorkloadInfo(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<CellSpursWorkloadInfo> info)
|
||||
s32 cellSpursGetWorkloadInfo(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<CellSpursWorkloadInfo> info)
|
||||
{
|
||||
cellSpurs.todo("cellSpursGetWorkloadInfo(spurs=*0x%x, wid=0x%x, info=*0x%x)", spurs, wid, info);
|
||||
return CELL_OK;
|
||||
|
@ -3135,7 +3130,7 @@ s32 _cellSpursWorkloadFlagReceiver(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u3
|
|||
}
|
||||
|
||||
/// Set/unset the recipient of the workload flag
|
||||
s32 _cellSpursWorkloadFlagReceiver2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set, u32 print_debug_output)
|
||||
s32 _cellSpursWorkloadFlagReceiver2(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set, u32 print_debug_output)
|
||||
{
|
||||
cellSpurs.warning("_cellSpursWorkloadFlagReceiver2(spurs=*0x%x, wid=%d, is_set=%d, print_debug_output=%d)", spurs, wid, is_set, print_debug_output);
|
||||
|
||||
|
@ -3833,19 +3828,19 @@ s32 _cellSpursLFQueueInitialize(vm::ptr<void> pTasksetOrSpurs, vm::ptr<CellSpurs
|
|||
|
||||
s32 _cellSpursLFQueuePushBody()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellSpurs);
|
||||
cellSpurs.todo("_cellSpursLFQueuePushBody()");
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellSpursLFQueueAttachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellSpurs);
|
||||
cellSpurs.todo("cellSpursLFQueueAttachLv2EventQueue(queue=*0x%x)", queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellSpursLFQueueDetachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellSpurs);
|
||||
cellSpurs.todo("cellSpursLFQueueDetachLv2EventQueue(queue=*0x%x)", queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -4134,7 +4129,7 @@ s32 _spurs::create_task(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id,
|
|||
|
||||
u32 tmp_task_id;
|
||||
|
||||
vm::light_op(vm::_ref<atomic_be_t<v128>>(taskset.ptr(&CellSpursTaskset::enabled).addr()), [&](atomic_be_t<v128>& ptr)
|
||||
vm::light_op(*vm::_ptr<atomic_be_t<v128>>(taskset.ptr(&CellSpursTaskset::enabled).addr()), [&](atomic_be_t<v128>& ptr)
|
||||
{
|
||||
// NOTE: Realfw processes this using 4 32-bits atomic loops
|
||||
// But here its processed within a single 128-bit atomic op
|
||||
|
@ -4571,7 +4566,7 @@ s32 cellSpursTasksetUnsetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellSpursLookUpTasksetAddress(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursTaskset> taskset, u32 id)
|
||||
s32 cellSpursLookUpTasksetAddress(ppu_thread& /*ppu*/, vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursTaskset> taskset, u32 id)
|
||||
{
|
||||
cellSpurs.warning("cellSpursLookUpTasksetAddress(spurs=*0x%x, taskset=**0x%x, id=0x%x)", spurs, taskset, id);
|
||||
|
||||
|
@ -4653,9 +4648,9 @@ s32 _cellSpursTasksetAttributeInitialize(vm::ptr<CellSpursTasksetAttribute> attr
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 _spurs::check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 sizeJobDescr, u16 maxGrabbedJob
|
||||
, u64 priorities, u32 maxContention, u8 autoSpuCount, u32 tag1, u32 tag2
|
||||
, u8 isFixedMemAlloc, u32 maxSizeJob, u32 initSpuCount)
|
||||
s32 _spurs::check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 sizeJobDescr, u16 maxGrabbedJob,
|
||||
u64 priorities, u32 /*maxContention*/, u8 autoSpuCount, u32 tag1, u32 tag2,
|
||||
u8 /*isFixedMemAlloc*/, u32 maxSizeJob, u32 initSpuCount)
|
||||
{
|
||||
if (!jcEntry)
|
||||
return CELL_SPURS_JOB_ERROR_NULL_POINTER;
|
||||
|
@ -4686,9 +4681,9 @@ s32 _spurs::check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 siz
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 _spurs::create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJob
|
||||
, u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 autoReadyCount
|
||||
, u32 tag1, u32 tag2, u32 HaltOnError, vm::cptr<char> name, u32 param_13, u32 param_14)
|
||||
s32 _spurs::create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 /*sizeJob*/,
|
||||
u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 /*autoReadyCount*/,
|
||||
u32 tag1, u32 tag2, u32 /*HaltOnError*/, vm::cptr<char> name, u32 /*param_13*/, u32 /*param_14*/)
|
||||
{
|
||||
const s32 sdkVer = _spurs::get_sdk_version();
|
||||
jobChain->spurs = spurs;
|
||||
|
|
|
@ -15,7 +15,6 @@ LOG_CHANNEL(cellSpurs);
|
|||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
|
@ -89,7 +88,7 @@ void spursJobchainPopUrgentCommand(spu_thread& spu);
|
|||
//----------------------------------------------------------------------------
|
||||
|
||||
// Output trace information
|
||||
void cellSpursModulePutTrace(CellSpursTracePacket* packet, u32 dmaTagId)
|
||||
void cellSpursModulePutTrace(CellSpursTracePacket* /*packet*/, u32 /*dmaTagId*/)
|
||||
{
|
||||
// TODO: Implement this
|
||||
}
|
||||
|
@ -871,7 +870,7 @@ void spursSysServiceIdleHandler(spu_thread& spu, SpursKernelContext* ctxt)
|
|||
}
|
||||
|
||||
// Main function for the system service
|
||||
void spursSysServiceMain(spu_thread& spu, u32 pollStatus)
|
||||
void spursSysServiceMain(spu_thread& spu, u32 /*pollStatus*/)
|
||||
{
|
||||
const auto ctxt = spu._ptr<SpursKernelContext>(0x100);
|
||||
|
||||
|
@ -1166,7 +1165,7 @@ void spursSysServiceUpdateShutdownCompletionEvents(spu_thread& spu, SpursKernelC
|
|||
}
|
||||
|
||||
// Update the trace count for this SPU
|
||||
void spursSysServiceTraceSaveCount(spu_thread& spu, SpursKernelContext* ctxt)
|
||||
void spursSysServiceTraceSaveCount(spu_thread& /*spu*/, SpursKernelContext* ctxt)
|
||||
{
|
||||
if (ctxt->traceBuffer)
|
||||
{
|
||||
|
@ -1594,12 +1593,12 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
|
|||
spursHalt(spu);
|
||||
}
|
||||
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::waiting)) = waiting;
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::running)) = running;
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::ready)) = ready;
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::pending_ready)) = v128{};
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::enabled)) = enabled;
|
||||
vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::signalled)) = signalled;
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::waiting)) = waiting;
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::running)) = running;
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::ready)) = ready;
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::pending_ready)) = v128{};
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::enabled)) = enabled;
|
||||
// vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::signalled)) = signalled;
|
||||
|
||||
std::memcpy(spu._ptr<void>(0x2700), spu._ptr<void>(0x100), 128); // Copy data
|
||||
}//);
|
||||
|
@ -2056,7 +2055,7 @@ s32 spursTasksetLoadElf(spu_thread& spu, u32* entryPoint, u32* lowestLoadAddr, u
|
|||
//----------------------------------------------------------------------------
|
||||
// SPURS taskset policy module functions
|
||||
//----------------------------------------------------------------------------
|
||||
bool spursJobChainEntry(spu_thread& spu)
|
||||
bool spursJobChainEntry(spu_thread& /*spu*/)
|
||||
{
|
||||
//const auto ctxt = spu._ptr<SpursJobChainContext>(0x4a00);
|
||||
//auto kernelCtxt = spu._ptr<SpursKernelContext>(spu.gpr[3]._u32[3]);
|
||||
|
|
|
@ -478,11 +478,6 @@ error_code cellVideoOutGetResolutionAvailability(u32 videoOut, u32 resolutionId,
|
|||
return CELL_VIDEO_OUT_ERROR_UNSUPPORTED_VIDEO_OUT;
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
error_code cellVideoOutGetConvertCursorColorInfo(vm::ptr<u8> rgbOutputRange)
|
||||
{
|
||||
cellSysutil.todo("cellVideoOutGetConvertCursorColorInfo(rgbOutputRange=*0x%x)", rgbOutputRange);
|
||||
|
|
|
@ -34,308 +34,303 @@ void fmt_class_string<CellSnd3Error>::format(std::string& out, u64 arg)
|
|||
});
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
error_code cellSnd3Init(u32 maxVoice, u32 samples, vm::ptr<CellSnd3RequestQueueCtx> queue)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3Init(maxVoice=%d, samples=%d, queue=*0x%x)", maxVoice, samples, queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3Exit()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3Exit()");
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u16 cellSnd3Note2Pitch(u16 center_note, u16 center_fine, u16 note, s16 fine)
|
||||
{
|
||||
libsnd3.todo("cellSnd3Note2Pitch()");
|
||||
libsnd3.todo("cellSnd3Note2Pitch(center_note=%d, center_fine=%d, note=%d, fine=%d)", center_note, center_fine, note, fine);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u16 cellSnd3Pitch2Note(u16 center_note, u16 center_fine, u16 pitch)
|
||||
{
|
||||
libsnd3.todo("cellSnd3Pitch2Note()");
|
||||
libsnd3.todo("cellSnd3Pitch2Note(center_note=%d, center_fine=%d, pitch=%d)", center_note, center_fine, pitch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
error_code cellSnd3SetOutputMode(u32 mode)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SetOutputMode(mode=%d)", mode);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3Synthesis(vm::ptr<f32> pOutL, vm::ptr<f32> pOutR)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3Synthesis(pOutL=*0x%x, pOutR=*0x%x)", pOutL, pOutR);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SynthesisEx(vm::ptr<f32> pOutL, vm::ptr<f32> pOutR, vm::ptr<f32> pOutRL, vm::ptr<f32> pOutRR)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SynthesisEx(pOutL=*0x%x, pOutR=*0x%x, pOutRL=*0x%x, pOutRR=*0x%x)", pOutL, pOutR, pOutRL, pOutRR);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3BindSoundData(vm::ptr<CellSnd3DataCtx> snd3Ctx, vm::ptr<void> hd3, u32 synthMemOffset)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3BindSoundData(snd3Ctx=*0x%x, hd3=*0x%x, synthMemOffset=0x%x)", snd3Ctx, hd3, synthMemOffset);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3UnbindSoundData(u32 hd3ID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3UnbindSoundData(hd3ID=0x%x)", hd3ID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3NoteOnByTone(u32 hd3ID, u32 toneIndex, u32 note, u32 keyOnID, vm::ptr<CellSnd3KeyOnParam> keyOnParam)
|
||||
{
|
||||
libsnd3.todo("cellSnd3NoteOnByTone()");
|
||||
libsnd3.todo("cellSnd3NoteOnByTone(hd3ID=0x%x, toneIndex=%d, note=%d, keyOnID=0x%x, keyOnParam=*0x%x)", hd3ID, toneIndex, note, keyOnID, keyOnParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3KeyOnByTone(u32 hd3ID, u32 toneIndex, u32 pitch, u32 keyOnID, vm::ptr<CellSnd3KeyOnParam> keyOnParam)
|
||||
{
|
||||
libsnd3.todo("cellSnd3KeyOnByTone()");
|
||||
libsnd3.todo("cellSnd3KeyOnByTone(hd3ID=0x%x, toneIndex=%d, pitch=%d, keyOnID=0x%x, keyOnParam=*0x%x)", hd3ID, toneIndex, pitch, keyOnID, keyOnParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceNoteOnByTone(u32 hd3ID, u32 voiceNum, u32 toneIndex, u32 note, u32 keyOnID, vm::ptr<CellSnd3KeyOnParam> keyOnParam)
|
||||
{
|
||||
libsnd3.todo("cellSnd3VoiceNoteOnByTone()");
|
||||
libsnd3.todo("cellSnd3VoiceNoteOnByTone(hd3ID=0x%x, voiceNum=%d, toneIndex=%d, note=%d, keyOnID=0x%x, keyOnParam=*0x%x)", hd3ID, voiceNum, toneIndex, note, keyOnID, keyOnParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceKeyOnByTone(u32 hd3ID, u32 voiceNum, u32 toneIndex, u32 pitch, u32 keyOnID, vm::ptr<CellSnd3KeyOnParam> keyOnParam)
|
||||
{
|
||||
libsnd3.todo("cellSnd3VoiceKeyOnByTone()");
|
||||
libsnd3.todo("cellSnd3VoiceKeyOnByTone(hd3ID=0x%x, voiceNum=%d, toneIndex=%d, pitch=%d, keyOnID=0x%x, keyOnParam=*0x%x)", hd3ID, voiceNum, toneIndex, pitch, keyOnID, keyOnParam);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetReserveMode(u32 voiceNum, u32 reserveMode)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetReserveMode(voiceNum=%d, reserveMode=%d)", voiceNum, reserveMode);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetSustainHold(u32 voiceNum, u32 sustainHold)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetSustainHold(voiceNum=%d, sustainHold=%d)", voiceNum, sustainHold);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceKeyOff(u32 voiceNum)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceKeyOff(voiceNum=%d)", voiceNum);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetPitch(u32 voiceNum, s32 addPitch)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetPitch(voiceNum=%d, addPitch=%d)", voiceNum, addPitch);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetVelocity(u32 voiceNum, u32 velocity)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetVelocity(voiceNum=%d, velocity=%d)", voiceNum, velocity);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetPanpot(u32 voiceNum, u32 panpot)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetPanpot(voiceNum=%d, panpot=%d)", voiceNum, panpot);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetPanpotEx(u32 voiceNum, u32 panpotEx)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetPanpotEx(voiceNum=%d, panpotEx=%d)", voiceNum, panpotEx);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceSetPitchBend(u32 voiceNum, u32 bendValue)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceSetPitchBend(voiceNum=%d, bendValue=%d)", voiceNum, bendValue);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceAllKeyOff()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceAllKeyOff()");
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceGetEnvelope(u32 voiceNum)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3VoiceGetEnvelope(voiceNum=%d)", voiceNum);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3VoiceGetStatus(u32 voiceNum)
|
||||
{
|
||||
libsnd3.todo("cellSnd3VoiceGetStatus()");
|
||||
libsnd3.todo("cellSnd3VoiceGetStatus(voiceNum=%d)", voiceNum);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 cellSnd3KeyOffByID(u32 keyOnID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3KeyOffByID(keyOnID=%d)", keyOnID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3GetVoice(u32 midiChannel, u32 keyOnID, vm::ptr<CellSnd3VoiceBitCtx> voiceBit)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3GetVoice(midiChannel=%d, keyOnID=%d, voiceBit=*0x%x)", midiChannel, keyOnID, voiceBit);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3GetVoiceByID(u32 ID, vm::ptr<CellSnd3VoiceBitCtx> voiceBit)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3GetVoiceByID(ID=%d, voiceBit=*0x%x)", ID, voiceBit);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3NoteOn(u32 hd3ID, u32 midiChannel, u32 midiProgram, u32 midiNote, u32 sustain, vm::ptr<CellSnd3KeyOnParam> keyOnParam, u32 keyOnID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3NoteOn(hd3ID=%d, midiChannel=%d, midiProgram=%d, midiNote=%d, sustain=%d, keyOnParam=*0x%x, keyOnID=%d)", hd3ID, midiChannel, midiProgram, midiNote, sustain, keyOnParam, keyOnID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3NoteOff(u32 midiChannel, u32 midiNote, u32 keyOnID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3NoteOff(midiChannel=%d, midiNote=%d, keyOnID=%d)", midiChannel, midiNote, keyOnID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SetSustainHold(u32 midiChannel, u32 sustainHold, u32 keyOnID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SetSustainHold(midiChannel=%d, sustainHold=%d, keyOnID=%d)", midiChannel, sustainHold, keyOnID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SetEffectType(u16 effectType, s16 returnVol, u16 delay, u16 feedback)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SetEffectType(effectType=%d, returnVol=%d, delay=%d, feedback=%d)", effectType, returnVol, delay, feedback);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFBind(vm::ptr<CellSnd3SmfCtx> smfCtx, vm::ptr<void> smf, u32 hd3ID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFBind()");
|
||||
libsnd3.todo("cellSnd3SMFBind(smfCtx=*0x%x, delay=*0x%x, hd3ID=%d)", smfCtx, smf, hd3ID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFUnbind(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFUnbind()");
|
||||
libsnd3.todo("cellSnd3SMFUnbind(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFPlay(u32 smfID, u32 playVelocity, u32 playPan, u32 playCount)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFPlay(smfID=%d, playVelocity=%d, playPan=%d, playCount=%d)", smfID, playVelocity, playPan, playCount);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFPlayEx(u32 smfID, u32 playVelocity, u32 playPan, u32 playPanEx, u32 playCount)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFPlayEx(smfID=%d, playVelocity=%d, playPan=%d, playPanEx=%d, playCount=%d)", smfID, playVelocity, playPan, playPanEx, playCount);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFPause(u32 smfID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFPause(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFResume(u32 smfID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFResume(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFStop(u32 smfID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFStop(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFAddTempo(u32 smfID, s32 addTempo)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFAddTempo(smfID=%d, addTempo=%d)", smfID, addTempo);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetTempo(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetTempo()");
|
||||
libsnd3.todo("cellSnd3SMFGetTempo(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFSetPlayVelocity(u32 smfID, u32 playVelocity)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFSetPlayVelocity(smfID=%d, playVelocity=%d)", smfID, playVelocity);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetPlayVelocity(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetPlayVelocity()");
|
||||
libsnd3.todo("cellSnd3SMFGetPlayVelocity(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFSetPlayPanpot(u32 smfID, u32 playPanpot)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFSetPlayPanpot(smfID=%d, playPanpot=%d)", smfID, playPanpot);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFSetPlayPanpotEx(u32 smfID, u32 playPanpotEx)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFSetPlayPanpotEx(smfID=%d, playPanpotEx=%d)", smfID, playPanpotEx);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetPlayPanpot(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetPlayPanpot()");
|
||||
libsnd3.todo("cellSnd3SMFGetPlayPanpot(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetPlayPanpotEx(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetPlayPanpotEx()");
|
||||
libsnd3.todo("cellSnd3SMFGetPlayPanpotEx(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetPlayStatus(u32 smfID)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetPlayStatus()");
|
||||
libsnd3.todo("cellSnd3SMFGetPlayStatus(smfID=%d)", smfID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFSetPlayChannel(u32 smfID, u32 playChannelBit)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFSetPlayChannel(smfID=%d, playChannelBit=%d)", smfID, playChannelBit);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetPlayChannel(u32 smfID, vm::ptr<u32> playChannelBit)
|
||||
{
|
||||
libsnd3.todo("cellSnd3SMFGetPlayChannel()");
|
||||
libsnd3.todo("cellSnd3SMFGetPlayChannel(smfID=%d, playChannelBit=*0x%x)", smfID, playChannelBit);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellSnd3SMFGetKeyOnID(u32 smfID, u32 midiChannel, vm::ptr<u32> keyOnID)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libsnd3);
|
||||
libsnd3.todo("cellSnd3SMFAddTempo(smfID=%d, midiChannel=%d, keyOnID=*0x%x)", smfID, midiChannel, keyOnID);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -546,6 +546,11 @@ error_code sceNpTrophyDestroyContext(u32 context)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
struct register_context_thread_name
|
||||
{
|
||||
static constexpr std::string_view thread_name = "Trophy Register Thread";
|
||||
};
|
||||
|
||||
error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle, vm::ptr<SceNpTrophyStatusCallback> statusCb, vm::ptr<void> arg, u64 options)
|
||||
{
|
||||
sceNpTrophy.warning("sceNpTrophyRegisterContext(context=0x%x, handle=0x%x, statusCb=*0x%x, arg=*0x%x, options=0x%llx)", context, handle, statusCb, arg, options);
|
||||
|
@ -709,54 +714,76 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
|
|||
|
||||
ensure(tropusr->Load(trophyUsrPath, trophyConfPath).success);
|
||||
|
||||
// This emulates vsh sending the events and ensures that not 2 events are processed at once
|
||||
const std::pair<u32, s32> statuses[] =
|
||||
{
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_SETUP, 3 },
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_PROGRESS, ::narrow<s32>(tropusr->GetTrophiesCount()) - 1 },
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_FINALIZE, 4 },
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_COMPLETE, 0 }
|
||||
};
|
||||
|
||||
lock2.unlock();
|
||||
|
||||
lv2_obj::sleep(ppu);
|
||||
|
||||
// Create a counter which is destroyed after the function ends
|
||||
const auto queued = std::make_shared<atomic_t<u32>>(0);
|
||||
|
||||
for (auto status : statuses)
|
||||
{
|
||||
// One status max per cellSysutilCheckCallback call
|
||||
*queued += status.second;
|
||||
for (s32 completed = 0; completed <= status.second; completed++)
|
||||
const s32 progress_cb_count = ::narrow<s32>(tropusr->GetTrophiesCount()) - 1;
|
||||
{
|
||||
sysutil_register_cb([statusCb, status, context, completed, arg, queued](ppu_thread& cb_ppu) -> s32
|
||||
// This emulates vsh sending the events and ensures that not 2 events are processed at once
|
||||
const std::pair<SceNpTrophyStatus, s32> statuses[] =
|
||||
{
|
||||
// TODO: it is possible that we need to check the return value here as well.
|
||||
statusCb(cb_ppu, context, status.first, completed, status.second, arg);
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_SETUP, 3 },
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_PROGRESS, progress_cb_count },
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_FINALIZE, std::max<s32>(progress_cb_count, 9) - 5 }, // Seems varying, little bit less than progress_cb_count
|
||||
{ SCE_NP_TROPHY_STATUS_PROCESSING_COMPLETE, 0 }
|
||||
};
|
||||
|
||||
if (queued && (*queued)-- == 1)
|
||||
// Create a counter which is destroyed after the function ends
|
||||
const auto queued = std::make_shared<atomic_t<u32>>(0);
|
||||
|
||||
u32 total_events = 0;
|
||||
|
||||
for (auto status : statuses)
|
||||
{
|
||||
total_events += status.second + 1;
|
||||
}
|
||||
|
||||
for (auto status : statuses)
|
||||
{
|
||||
for (s32 completed = 0; completed <= status.second; completed++)
|
||||
{
|
||||
queued->notify_one();
|
||||
// One status max per cellSysutilCheckCallback call
|
||||
*queued += 1;
|
||||
|
||||
sysutil_register_cb([statusCb, status, context, completed, arg, queued](ppu_thread& cb_ppu) -> s32
|
||||
{
|
||||
// TODO: it is possible that we need to check the return value here as well.
|
||||
statusCb(cb_ppu, context, status.first, completed, status.second, arg);
|
||||
|
||||
if (queued && (*queued)-- == 1)
|
||||
{
|
||||
queued->notify_one();
|
||||
}
|
||||
|
||||
return 0;
|
||||
});
|
||||
|
||||
u64 current = get_system_time();
|
||||
|
||||
// Minimum register trophy time 2 seconds globally.
|
||||
const u64 until_min = current + (2'000'000 / total_events);
|
||||
const u64 until_max = until_min + 50'000;
|
||||
|
||||
// If too much time passes just send the rest of the events anyway
|
||||
for (u32 old_value = *queued; current < (old_value ? until_max : until_min);
|
||||
current = get_system_time(), old_value = *queued)
|
||||
{
|
||||
if (!old_value)
|
||||
{
|
||||
thread_ctrl::wait_for(until_min - current);
|
||||
}
|
||||
else
|
||||
{
|
||||
thread_ctrl::wait_on(*queued, old_value, until_max - current);
|
||||
}
|
||||
|
||||
if (thread_ctrl::state() == thread_state::aborting)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
|
||||
u64 current = get_system_time();
|
||||
const u64 until = current + 300'000;
|
||||
|
||||
// If too much time passes just send the rest of the events anyway
|
||||
for (u32 old_value; current < until && (old_value = *queued);
|
||||
current = get_system_time())
|
||||
{
|
||||
thread_ctrl::wait_on(*queued, old_value, until - current);
|
||||
|
||||
if (ppu.is_stopped())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ struct SceNpTrophyFlagArray
|
|||
be_t<u32> flag_bits[SCE_NP_TROPHY_FLAG_SETSIZE >> SCE_NP_TROPHY_FLAG_BITS_SHIFT];
|
||||
};
|
||||
|
||||
enum
|
||||
enum SceNpTrophyStatus : u32
|
||||
{
|
||||
SCE_NP_TROPHY_STATUS_UNKNOWN = 0,
|
||||
SCE_NP_TROPHY_STATUS_NOT_INSTALLED = 1,
|
||||
|
|
|
@ -65,38 +65,33 @@ void fmt_class_string<CellLv2DbgError>::format(std::string& out, u64 arg)
|
|||
});
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
error_code sys_dbg_read_ppu_thread_context(u64 id, vm::ptr<sys_dbg_ppu_thread_context_t> ppu_context)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_read_ppu_thread_context()");
|
||||
sys_lv2dbg.todo("sys_dbg_read_ppu_thread_context(id=0x%x, ppu_context=*0x%x)", id, ppu_context);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_read_spu_thread_context(u32 id, vm::ptr<sys_dbg_spu_thread_context_t> spu_context)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context()");
|
||||
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context(id=0x%x, spu_context=*0x%x)", id, spu_context);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_read_spu_thread_context2(u32 id, vm::ptr<sys_dbg_spu_thread_context2_t> spu_context)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context2()");
|
||||
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context2(id=0x%x, spu_context=*0x%x)", id, spu_context);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_set_stacksize_ppu_exception_handler(u32 stacksize)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_set_stacksize_ppu_exception_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_set_stacksize_ppu_exception_handler(stacksize=0x%x)", stacksize);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_initialize_ppu_exception_handler(s32 prio)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_initialize_ppu_exception_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_initialize_ppu_exception_handler(prio=0x%x)", prio);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -108,7 +103,7 @@ error_code sys_dbg_finalize_ppu_exception_handler()
|
|||
|
||||
error_code sys_dbg_register_ppu_exception_handler(vm::ptr<dbg_exception_handler_t> callback, u64 ctrl_flags)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_register_ppu_exception_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_register_ppu_exception_handler(callback=*0x%x, ctrl_flags=0x%x)", callback, ctrl_flags);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -120,170 +115,163 @@ error_code sys_dbg_unregister_ppu_exception_handler()
|
|||
|
||||
error_code sys_dbg_signal_to_ppu_exception_handler(u64 flags)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_signal_to_ppu_exception_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_signal_to_ppu_exception_handler(flags=0x%x)", flags);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_mutex_information(u32 id, vm::ptr<sys_dbg_mutex_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_mutex_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_mutex_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_cond_information(u32 id, vm::ptr<sys_dbg_cond_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_cond_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_cond_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_rwlock_information(u32 id, vm::ptr<sys_dbg_rwlock_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_rwlock_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_rwlock_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_event_queue_information(u32 id, vm::ptr<sys_dbg_event_queue_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_event_queue_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_event_queue_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_semaphore_information(u32 id, vm::ptr<sys_dbg_semaphore_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_semaphore_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_semaphore_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_lwmutex_information(u32 id, vm::ptr<sys_dbg_lwmutex_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_lwmutex_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_lwmutex_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_lwcond_information(u32 id, vm::ptr<sys_dbg_lwcond_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_lwcond_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_lwcond_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_event_flag_information(u32 id, vm::ptr<sys_dbg_event_flag_information_t> info)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_event_flag_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_event_flag_information(id=0x%x, info=*0x%x)", id, info);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_ppu_thread_ids(vm::ptr<u64> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_ids()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_ids(ids=*0x%x, ids_num=*0x%x, all_ids_num=*0x%x)", ids, ids_num, all_ids_num);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_spu_thread_group_ids(vm::ptr<u32> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_ids()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_ids(ids=*0x%x, ids_num=*0x%x, all_ids_num=*0x%x)", ids, ids_num, all_ids_num);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_spu_thread_ids(u32 group_id, vm::ptr<u32> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_ids()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_ids(group_id=0x%x, ids=*0x%x, ids_num=*0x%x, all_ids_num=*0x%x)", group_id, ids, ids_num, all_ids_num);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_ppu_thread_name(u64 id, vm::ptr<char> name)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_name()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_name(id=0x%x, name=*0x%x)", id, name);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_spu_thread_name(u32 id, vm::ptr<char> name)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_name()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_name(id=0x%x, name=*0x%x)", id, name);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_spu_thread_group_name(u32 id, vm::ptr<char> name)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_name()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_name(id=0x%x, name=*0x%x)", id, name);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_get_ppu_thread_status(u64 id, vm::ptr<u32> status)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_status()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_status(id=0x%x, status=*0x%x)", id, status);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_spu_thread_group_status(u32 id, vm::ptr<u32> status)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_status()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_status(id=0x%x, status=*0x%x)", id, status);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_enable_floating_point_enabled_exception(u64 id, u64 flags, u64 opt1, u64 opt2)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_enable_floating_point_enabled_exception()");
|
||||
sys_lv2dbg.todo("sys_dbg_enable_floating_point_enabled_exception(id=0x%x, flags=0x%x, opt1=0x%x, opt2=0x%x)", id, flags, opt1, opt2);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_disable_floating_point_enabled_exception(u64 id, u64 flags, u64 opt1, u64 opt2)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_disable_floating_point_enabled_exception()");
|
||||
sys_lv2dbg.todo("sys_dbg_disable_floating_point_enabled_exception(id=0x%x, flags=0x%x, opt1=0x%x, opt2=0x%x)", id, flags, opt1, opt2);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_vm_get_page_information(u32 addr, u32 num, vm::ptr<sys_vm_page_information_t> pageinfo)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_vm_get_page_information()");
|
||||
sys_lv2dbg.todo("sys_dbg_vm_get_page_information(addr=0x%x, num=0x%x, pageinfo=*0x%x)", addr, num, pageinfo);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_set_address_to_dabr(u64 addr, u64 ctrl_flag)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_set_address_to_dabr()");
|
||||
sys_lv2dbg.todo("sys_dbg_set_address_to_dabr(addr=0x%x, spu_context=0x%x)", addr, ctrl_flag);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_get_address_from_dabr(vm::ptr<u64> addr, vm::ptr<u64> ctrl_flag)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_address_from_dabr()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_address_from_dabr(addr=*0x%x, spu_context=*0x%x)", addr, ctrl_flag);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_signal_to_coredump_handler(u64 data1, u64 data2, u64 data3)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_signal_to_coredump_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_signal_to_coredump_handler(data1=0x%x, data2=0x%x, data3=0x%x)", data1, data2, data3);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_mat_set_condition(u32 addr, u64 cond)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_mat_set_condition()");
|
||||
sys_lv2dbg.todo("sys_dbg_mat_set_condition(addr=0x%x, cond=0x%x)", addr, cond);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_mat_get_condition(u32 addr, vm::ptr<u64> condp)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_mat_get_condition()");
|
||||
sys_lv2dbg.todo("sys_dbg_mat_get_condition(addr=0x%x, condp=*0x%x)", addr, condp);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
||||
error_code sys_dbg_get_coredump_params(vm::ptr<s32> param)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_get_coredump_params()");
|
||||
sys_lv2dbg.todo("sys_dbg_get_coredump_params(param=*0x%x)", param);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_dbg_set_mask_to_ppu_exception_handler(u64 mask, u64 flags)
|
||||
{
|
||||
sys_lv2dbg.todo("sys_dbg_set_mask_to_ppu_exception_handler()");
|
||||
sys_lv2dbg.todo("sys_dbg_set_mask_to_ppu_exception_handler(mask=0x%x, flags=0x%x)", mask, flags);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,11 +4,6 @@
|
|||
|
||||
LOG_CHANNEL(libnet);
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
s32 sys_net_accept(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
|
||||
{
|
||||
libnet.todo("accept(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
|
||||
|
@ -44,19 +39,22 @@ s32 sys_net_gethostbyname()
|
|||
|
||||
s32 sys_net_getpeername(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("getpeername(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_net_getsockname(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("getsockname(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_net_getsockopt(s32 s, s32 level, s32 optname, vm::ptr<void> optval, vm::ptr<u32> optlen)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("getsockopt(s=%d, level=%d, optname=%d, optval=*0x%x, optlen=*0x%x)", s, level, optname, optval, optlen);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -141,7 +139,8 @@ s32 sys_net_recvfrom(s32 s, vm::ptr<void> buf, u32 len, s32 flags, vm::ptr<sys_n
|
|||
|
||||
s32 sys_net_recvmsg(s32 s, vm::ptr<sys_net_msghdr> msg, s32 flags)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("recvmsg(s=%d, msg=*0x%x, flags=0x%x)", s, msg, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -154,7 +153,8 @@ s32 sys_net_send(s32 s, vm::cptr<void> buf, u32 len, s32 flags)
|
|||
|
||||
s32 sys_net_sendmsg(s32 s, vm::cptr<sys_net_msghdr> msg, s32 flags)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("sendmsg(s=%d, msg=*0x%x, flags=0x%x)", s, msg, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,8 @@ s32 sys_net_socketclose(s32 s)
|
|||
|
||||
s32 sys_net_socketpoll(vm::ptr<sys_net_pollfd> fds, s32 nfds, s32 ms)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("socketpoll(fds=*0x%x, nfds=%d, ms=%d)", fds, nfds, ms);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -270,7 +271,8 @@ s32 sys_net_get_sockinfo(s32 s, vm::ptr<sys_net_sockinfo_t> p, s32 n)
|
|||
|
||||
s32 sys_net_close_dump(s32 id, vm::ptr<s32> pflags)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("sys_net_close_dump(id=%d, pflags=*0x%x)", id, pflags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -308,7 +310,8 @@ s32 sys_net_show_route()
|
|||
|
||||
s32 sys_net_read_dump(s32 id, vm::ptr<void> buf, s32 len, vm::ptr<s32> pflags)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("sys_net_read_dump(id=%d, buf=*0x%x, len=%d, pflags=*0x%x)", id, buf, len, pflags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -344,7 +347,8 @@ s32 sys_net_get_sockinfo_ex()
|
|||
|
||||
s32 sys_net_open_dump(s32 len, s32 flags)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(libnet);
|
||||
libnet.todo("sys_net_open_dump(len=%d, flags=0x%x)", len, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,11 +38,13 @@ void ppubreak(ppu_thread& ppu)
|
|||
}
|
||||
}
|
||||
|
||||
#define PPU_WRITE(type, addr, value) vm::write<type>(addr, value, &ppu);
|
||||
#define PPU_WRITE_8(addr, value) vm::write8(addr, value, &ppu);
|
||||
#define PPU_WRITE_16(addr, value) vm::write16(addr, value, &ppu);
|
||||
#define PPU_WRITE_32(addr, value) vm::write32(addr, value, &ppu);
|
||||
#define PPU_WRITE_64(addr, value) vm::write64(addr, value, &ppu);
|
||||
#else
|
||||
#define PPU_WRITE(type, addr, value) vm::write<type>(addr, value);
|
||||
#define PPU_WRITE_8(addr, value) vm::write8(addr, value);
|
||||
#define PPU_WRITE_16(addr, value) vm::write16(addr, value);
|
||||
#define PPU_WRITE_32(addr, value) vm::write32(addr, value);
|
||||
|
@ -4573,7 +4575,7 @@ auto STVX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull;
|
||||
vm::_ref<v128>(vm::cast(addr)) = ppu.vr[op.vs];
|
||||
PPU_WRITE(v128, vm::cast(addr), ppu.vr[op.vs]);
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5074,7 +5076,7 @@ auto STVXL()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull;
|
||||
vm::_ref<v128>(vm::cast(addr)) = ppu.vr[op.vs];
|
||||
PPU_WRITE(v128, vm::cast(addr), ppu.vr[op.vs]);
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5363,7 +5365,7 @@ auto STDBRX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb];
|
||||
vm::_ref<le_t<u64>>(vm::cast(addr)) = ppu.gpr[op.rs];
|
||||
PPU_WRITE(le_t<u64>, vm::cast(addr), ppu.gpr[op.rs]);
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5402,7 +5404,7 @@ auto STWBRX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb];
|
||||
vm::_ref<le_t<u32>>(vm::cast(addr)) = static_cast<u32>(ppu.gpr[op.rs]);
|
||||
PPU_WRITE(le_t<u32>, vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs]));
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5415,7 +5417,7 @@ auto STFSX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb];
|
||||
vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]);
|
||||
PPU_WRITE(f32, vm::cast(addr), static_cast<float>(ppu.fpr[op.frs]));
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5446,7 +5448,7 @@ auto STFSUX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb];
|
||||
vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]);
|
||||
PPU_WRITE(f32, vm::cast(addr), static_cast<float>(ppu.fpr[op.frs]));
|
||||
ppu.gpr[op.ra] = addr;
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
|
@ -5496,7 +5498,7 @@ auto STFDX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb];
|
||||
vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs];
|
||||
PPU_WRITE(f64, vm::cast(addr), ppu.fpr[op.frs]);
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -5509,7 +5511,7 @@ auto STFDUX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb];
|
||||
vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs];
|
||||
PPU_WRITE(f64, vm::cast(addr), ppu.fpr[op.frs]);
|
||||
ppu.gpr[op.ra] = addr;
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
|
@ -5664,7 +5666,7 @@ auto STHBRX()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb];
|
||||
vm::_ref<le_t<u16>>(vm::cast(addr)) = static_cast<u16>(ppu.gpr[op.rs]);
|
||||
PPU_WRITE(le_t<u16>, vm::cast(addr), static_cast<u16>(ppu.gpr[op.rs]));
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -6054,7 +6056,7 @@ auto STFS()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16;
|
||||
vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]);
|
||||
PPU_WRITE(f32, vm::cast(addr), static_cast<float>(ppu.fpr[op.frs]));
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -6067,7 +6069,7 @@ auto STFSU()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = ppu.gpr[op.ra] + op.simm16;
|
||||
vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]);
|
||||
PPU_WRITE(f32, vm::cast(addr), static_cast<float>(ppu.fpr[op.frs]));
|
||||
ppu.gpr[op.ra] = addr;
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
|
@ -6081,7 +6083,7 @@ auto STFD()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16;
|
||||
vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs];
|
||||
PPU_WRITE(f64, vm::cast(addr), ppu.fpr[op.frs]);
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
}
|
||||
|
@ -6094,7 +6096,7 @@ auto STFDU()
|
|||
|
||||
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) {
|
||||
const u64 addr = ppu.gpr[op.ra] + op.simm16;
|
||||
vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs];
|
||||
PPU_WRITE(f64, vm::cast(addr), ppu.fpr[op.frs]);
|
||||
ppu.gpr[op.ra] = addr;
|
||||
};
|
||||
RETURN_(ppu, op);
|
||||
|
|
|
@ -3380,7 +3380,10 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
fmt::throw_exception("PPU %s: Unaligned address: 0x%08x", sizeof(T) == 4 ? "STWCX" : "STDCX", addr);
|
||||
}
|
||||
|
||||
auto& data = vm::_ref<atomic_be_t<u64>>(addr & -8);
|
||||
// Notify breakpoint handler
|
||||
vm::write<void>(addr, T{0}, &ppu);
|
||||
|
||||
auto& data = const_cast<atomic_be_t<u64>&>(vm::_ref<atomic_be_t<u64>>(addr & -8));
|
||||
auto& res = vm::reservation_acquire(addr);
|
||||
const u64 rtime = ppu.rtime;
|
||||
|
||||
|
|
|
@ -4665,7 +4665,7 @@ void PPUTranslator::MTFSF(ppu_opcode_t op)
|
|||
|
||||
for (u32 i = 16; i < 20; i++)
|
||||
{
|
||||
if (i != 1 && i != 2 && (op.flm & (128 >> (i / 4))) != 0)
|
||||
if ((op.flm & (128 >> (i / 4))) != 0)
|
||||
{
|
||||
SetFPSCRBit(i, Trunc(m_ir->CreateLShr(value, i ^ 31), GetType<bool>()), false);
|
||||
}
|
||||
|
|
|
@ -3879,14 +3879,14 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
|
|||
{
|
||||
if (addr - spurs_addr <= 0x80)
|
||||
{
|
||||
mov_rdata(vm::_ref<spu_rdata_t>(addr), to_write);
|
||||
mov_rdata(*vm::_ptr<spu_rdata_t>(addr), to_write);
|
||||
res += 64;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if (!g_use_rtm)
|
||||
{
|
||||
vm::_ref<atomic_t<u32>>(addr) += 0;
|
||||
*vm::_ptr<atomic_t<u32>>(addr) += 0;
|
||||
}
|
||||
|
||||
if (g_use_rtm) [[likely]]
|
||||
|
|
|
@ -373,7 +373,7 @@ const std::array<std::pair<ppu_intrp_func_t, std::string_view>, 1024> g_ppu_sysc
|
|||
|
||||
uns_func, uns_func, uns_func, uns_func, uns_func, //255-259 UNS
|
||||
|
||||
NULL_FUNC(sys_spu_image_open_by_fd), //260 (0x104)
|
||||
BIND_SYSC(sys_spu_image_open_by_fd), //260 (0x104)
|
||||
|
||||
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //261-269 UNS
|
||||
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //270-279 UNS
|
||||
|
|
|
@ -502,7 +502,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||
|
||||
if (!cond || !mutex)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
return { CELL_ESRCH, fmt::format("lwmutex_id: 0x%x, lwcond_id: 0x%x", lwmutex_id, lwcond_id) };
|
||||
}
|
||||
|
||||
if (ppu.state & cpu_flag::again)
|
||||
|
|
|
@ -183,7 +183,12 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
|||
|
||||
if (!mutex)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
if (lwmutex_id >> 24 == lv2_lwmutex::id_base >> 24)
|
||||
{
|
||||
return { CELL_ESRCH, lwmutex_id };
|
||||
}
|
||||
|
||||
return { CELL_ESRCH, "Invalid ID" };
|
||||
}
|
||||
|
||||
if (mutex.ret)
|
||||
|
@ -313,7 +318,12 @@ error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id)
|
|||
|
||||
if (!mutex)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
if (lwmutex_id >> 24 == lv2_lwmutex::id_base >> 24)
|
||||
{
|
||||
return { CELL_ESRCH, lwmutex_id };
|
||||
}
|
||||
|
||||
return { CELL_ESRCH, "Invalid ID" };
|
||||
}
|
||||
|
||||
if (!mutex.ret)
|
||||
|
|
|
@ -274,7 +274,7 @@ std::optional<s32> lv2_socket_p2p::sendto(s32 flags, const std::vector<u8>& buf,
|
|||
}
|
||||
|
||||
ensure(opt_sn_addr);
|
||||
ensure(socket); // ensures it has been bound
|
||||
ensure(native_socket); // ensures it has been bound
|
||||
ensure(buf.size() <= static_cast<usz>(65535 - VPORT_P2P_HEADER_SIZE)); // catch games using full payload for future fragmentation implementation if necessary
|
||||
const u16 p2p_port = reinterpret_cast<const sys_net_sockaddr_in*>(&*opt_sn_addr)->sin_port;
|
||||
const u16 p2p_vport = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&*opt_sn_addr)->sin_vport;
|
||||
|
|
|
@ -181,7 +181,7 @@ error_code sys_rsx_memory_allocate(cpu_thread& cpu, vm::ptr<u32> mem_handle, vm:
|
|||
|
||||
if (u32 addr = rsx::get_current_renderer()->driver_info)
|
||||
{
|
||||
vm::_ref<RsxDriverInfo>(addr).memory_size = size;
|
||||
vm::_ptr<RsxDriverInfo>(addr)->memory_size = size;
|
||||
}
|
||||
|
||||
*mem_addr = rsx::constants::local_mem_base;
|
||||
|
@ -265,7 +265,7 @@ error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm
|
|||
*lpar_driver_info = dma_address + 0x100000;
|
||||
*lpar_reports = dma_address + 0x200000;
|
||||
|
||||
auto &reports = vm::_ref<RsxReports>(vm::cast(*lpar_reports));
|
||||
auto &reports = *vm::_ptr<RsxReports>(vm::cast(*lpar_reports));
|
||||
std::memset(&reports, 0, sizeof(RsxReports));
|
||||
|
||||
for (usz i = 0; i < std::size(reports.notify); ++i)
|
||||
|
@ -273,10 +273,10 @@ error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm
|
|||
|
||||
for (usz i = 0; i < std::size(reports.semaphore); i += 4)
|
||||
{
|
||||
reports.semaphore[i + 0].val.raw() = 0x1337C0D3;
|
||||
reports.semaphore[i + 1].val.raw() = 0x1337BABE;
|
||||
reports.semaphore[i + 2].val.raw() = 0x1337BEEF;
|
||||
reports.semaphore[i + 3].val.raw() = 0x1337F001;
|
||||
reports.semaphore[i + 0] = 0x1337C0D3;
|
||||
reports.semaphore[i + 1] = 0x1337BABE;
|
||||
reports.semaphore[i + 2] = 0x1337BEEF;
|
||||
reports.semaphore[i + 3] = 0x1337F001;
|
||||
}
|
||||
|
||||
for (usz i = 0; i < std::size(reports.report); ++i)
|
||||
|
@ -286,7 +286,7 @@ error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm
|
|||
reports.report[i].pad = -1;
|
||||
}
|
||||
|
||||
auto &driverInfo = vm::_ref<RsxDriverInfo>(vm::cast(*lpar_driver_info));
|
||||
auto &driverInfo = *vm::_ptr<RsxDriverInfo>(vm::cast(*lpar_driver_info));
|
||||
|
||||
std::memset(&driverInfo, 0, sizeof(RsxDriverInfo));
|
||||
|
||||
|
@ -303,7 +303,7 @@ error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm
|
|||
|
||||
render->driver_info = vm::cast(*lpar_driver_info);
|
||||
|
||||
auto &dmaControl = vm::_ref<RsxDmaControl>(vm::cast(*lpar_dma_control));
|
||||
auto &dmaControl = *vm::_ptr<RsxDmaControl>(vm::cast(*lpar_dma_control));
|
||||
dmaControl.get = 0;
|
||||
dmaControl.put = 0;
|
||||
dmaControl.ref = 0; // Set later to -1 by cellGcmSys
|
||||
|
@ -527,7 +527,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
|||
return { CELL_EINVAL, "context_id is 0x%x", context_id };
|
||||
}
|
||||
|
||||
auto &driverInfo = vm::_ref<RsxDriverInfo>(render->driver_info);
|
||||
auto &driverInfo = *vm::_ptr<RsxDriverInfo>(render->driver_info);
|
||||
switch (package_id)
|
||||
{
|
||||
case 0x001: // FIFO
|
||||
|
@ -862,7 +862,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
|||
|
||||
// seems gcmSysWaitLabel uses this offset, so lets set it to 0 every flip
|
||||
// NOTE: Realhw resets 16 bytes of this semaphore for some reason
|
||||
vm::_ref<atomic_t<u128>>(render->label_addr + 0x10).store(u128{});
|
||||
vm::_ptr<atomic_t<u128>>(render->label_addr + 0x10)->store(u128{});
|
||||
|
||||
render->send_event(0, SYS_RSX_EVENT_FLIP_BASE << 1, 0);
|
||||
break;
|
||||
|
|
|
@ -87,10 +87,7 @@ struct RsxDmaControl
|
|||
be_t<u32> unk1;
|
||||
};
|
||||
|
||||
struct RsxSemaphore
|
||||
{
|
||||
atomic_be_t<u32> val;
|
||||
};
|
||||
using RsxSemaphore = be_t<u32>;
|
||||
|
||||
struct alignas(16) RsxNotify
|
||||
{
|
||||
|
|
|
@ -66,13 +66,14 @@ void fmt_class_string<spu_stop_syscall>::format(std::string& out, u64 arg)
|
|||
});
|
||||
}
|
||||
|
||||
void sys_spu_image::load(const fs::file& stream)
|
||||
bool sys_spu_image::load(const fs::file& stream)
|
||||
{
|
||||
const spu_exec_object obj{stream, 0, elf_opt::no_sections + elf_opt::no_data};
|
||||
|
||||
if (obj != elf_error::ok)
|
||||
{
|
||||
fmt::throw_exception("Failed to load SPU image: %s", obj.get_error());
|
||||
sys_spu.error("Failed to load SPU image: %s", obj.get_error());
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto& shdr : obj.shdrs)
|
||||
|
@ -94,7 +95,7 @@ void sys_spu_image::load(const fs::file& stream)
|
|||
const s32 nsegs = sys_spu_image::get_nsegs(obj.progs);
|
||||
|
||||
const u32 mem_size = nsegs * sizeof(sys_spu_segment) + ::size32(stream);
|
||||
const vm::ptr<sys_spu_segment> segs = vm::cast(vm::alloc(mem_size, vm::main));
|
||||
const vm::ptr<sys_spu_segment> segs = vm::cast(vm::reserve_map(vm::user64k, 0, 0x10000000)->alloc(mem_size));
|
||||
|
||||
//const u32 entry = obj.header.e_entry;
|
||||
|
||||
|
@ -116,6 +117,7 @@ void sys_spu_image::load(const fs::file& stream)
|
|||
this->segs = vm::null;
|
||||
|
||||
vm::page_protect(segs.addr(), utils::align(mem_size, 4096), 0, 0, vm::page_writable);
|
||||
return true;
|
||||
}
|
||||
|
||||
void sys_spu_image::free() const
|
||||
|
@ -388,19 +390,43 @@ struct spu_limits_t
|
|||
|
||||
SAVESTATE_INIT_POS(47);
|
||||
|
||||
bool check(const limits_data& init) const
|
||||
bool check_valid(const limits_data& init) const
|
||||
{
|
||||
const u32 physical_spus_count = init.physical;
|
||||
const u32 controllable_spu_count = init.controllable;
|
||||
|
||||
const u32 spu_limit = init.spu_limit != umax ? init.spu_limit : max_spu;
|
||||
const u32 raw_limit = init.raw_limit != umax ? init.raw_limit : max_raw;
|
||||
|
||||
if (spu_limit + raw_limit > 6 || physical_spus_count > spu_limit || controllable_spu_count > spu_limit)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool check_busy(const limits_data& init) const
|
||||
{
|
||||
u32 physical_spus_count = init.physical;
|
||||
u32 raw_spu_count = init.raw_spu;
|
||||
u32 controllable_spu_count = init.controllable;
|
||||
u32 system_coop = init.controllable != 0 && init.physical != 0 ? 1 : 0;
|
||||
|
||||
const u32 spu_limit = init.spu_limit != umax ? init.spu_limit : max_spu;
|
||||
const u32 raw_limit = init.raw_limit != umax ? init.raw_limit : max_raw;
|
||||
|
||||
idm::select<lv2_spu_group>([&](u32, lv2_spu_group& group)
|
||||
{
|
||||
if (group.has_scheduler_context)
|
||||
if (group.type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM)
|
||||
{
|
||||
controllable_spu_count = std::max(controllable_spu_count, group.max_num);
|
||||
system_coop++;
|
||||
controllable_spu_count = std::max<u32>(controllable_spu_count, 1);
|
||||
physical_spus_count += group.max_num - 1;
|
||||
}
|
||||
else if (group.has_scheduler_context)
|
||||
{
|
||||
controllable_spu_count = std::max<u32>(controllable_spu_count, group.max_num);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -410,11 +436,18 @@ struct spu_limits_t
|
|||
|
||||
raw_spu_count += spu_thread::g_raw_spu_ctr;
|
||||
|
||||
if (spu_limit + raw_limit > 6 || raw_spu_count > raw_limit || physical_spus_count >= spu_limit || physical_spus_count + controllable_spu_count > spu_limit)
|
||||
// physical_spus_count >= spu_limit returns EBUSY, not EINVAL!
|
||||
if (spu_limit + raw_limit > 6 || raw_spu_count > raw_limit || physical_spus_count >= spu_limit || physical_spus_count > spu_limit || controllable_spu_count > spu_limit)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (system_coop > 1)
|
||||
{
|
||||
// Cannot have more than one SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM group at a time
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -437,7 +470,7 @@ error_code sys_spu_initialize(ppu_thread& ppu, u32 max_usable_spu, u32 max_raw_s
|
|||
|
||||
std::lock_guard lock(limits.mutex);
|
||||
|
||||
if (!limits.check(limits_data{.spu_limit = max_usable_spu - max_raw_spu, .raw_limit = max_raw_spu}))
|
||||
if (!limits.check_busy(limits_data{.spu_limit = max_usable_spu - max_raw_spu, .raw_limit = max_raw_spu}))
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
@ -486,15 +519,52 @@ error_code sys_spu_image_open(ppu_thread& ppu, vm::ptr<sys_spu_image> img, vm::c
|
|||
|
||||
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
|
||||
|
||||
const fs::file elf_file = decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic));
|
||||
const fs::file elf_file = decrypt_self(file, reinterpret_cast<const u8*>(&klic));
|
||||
|
||||
if (!elf_file)
|
||||
if (!elf_file || !img->load(elf_file))
|
||||
{
|
||||
sys_spu.error("sys_spu_image_open(): file %s is illegal for SPU image!", path);
|
||||
return {CELL_ENOEXEC, path};
|
||||
}
|
||||
|
||||
img->load(elf_file);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_spu_image_open_by_fd(ppu_thread& ppu, vm::ptr<sys_spu_image> img, s32 fd, s64 offset)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
sys_spu.warning("sys_spu_image_open_by_fd(img=*0x%x, fd=%d, offset=0x%x)", img, fd, offset);
|
||||
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
return CELL_EBADF;
|
||||
}
|
||||
|
||||
if (offset < 0)
|
||||
{
|
||||
return CELL_ENOEXEC;
|
||||
}
|
||||
|
||||
std::lock_guard lock(file->mp->mutex);
|
||||
|
||||
if (!file->file)
|
||||
{
|
||||
return CELL_EBADF;
|
||||
}
|
||||
|
||||
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
|
||||
|
||||
const fs::file elf_file = decrypt_self(lv2_file::make_view(file, offset), reinterpret_cast<const u8*>(&klic));
|
||||
|
||||
if (!img->load(elf_file))
|
||||
{
|
||||
sys_spu.error("sys_spu_image_open(): file %s is illegal for SPU image!", file->name.data());
|
||||
return {CELL_ENOEXEC, file->name.data()};
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -526,7 +596,7 @@ error_code _sys_spu_image_close(ppu_thread& ppu, vm::ptr<sys_spu_image> img)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
ensure(vm::dealloc(handle->segs.addr(), vm::main));
|
||||
ensure(vm::dealloc(handle->segs.addr(), vm::user64k));
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -845,23 +915,24 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
switch (type)
|
||||
{
|
||||
case 0x0:
|
||||
case 0x4:
|
||||
case 0x18:
|
||||
case SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER:
|
||||
case SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT:
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x20:
|
||||
case 0x22:
|
||||
case 0x24:
|
||||
case 0x26:
|
||||
case SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM:
|
||||
case (SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM | 0x2):
|
||||
case (SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM | 0x4):
|
||||
case (SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM | 0x6):
|
||||
{
|
||||
if (type == 0x22 || type == 0x26)
|
||||
{
|
||||
needs_root = true;
|
||||
}
|
||||
|
||||
min_threads = 2; // That's what appears from reversing
|
||||
// For a single thread that is being shared with system (the cooperative victim)
|
||||
min_threads = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -901,7 +972,7 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
|
||||
if (is_system_coop)
|
||||
{
|
||||
// Constant size, unknown what it means
|
||||
// For a single thread that is being shared with system (the cooperative victim)
|
||||
mem_size = SPU_LS_SIZE;
|
||||
}
|
||||
else if (type & SYS_SPU_THREAD_GROUP_TYPE_NON_CONTEXT)
|
||||
|
@ -952,7 +1023,29 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
|
||||
std::unique_lock lock(limits.mutex);
|
||||
|
||||
if (!limits.check(use_scheduler ? limits_data{.controllable = num} : limits_data{.physical = num}))
|
||||
limits_data group_limits{};
|
||||
|
||||
if (is_system_coop)
|
||||
{
|
||||
group_limits.controllable = 1;
|
||||
group_limits.physical = num - 1;
|
||||
}
|
||||
else if (use_scheduler)
|
||||
{
|
||||
group_limits.controllable = num;
|
||||
}
|
||||
else
|
||||
{
|
||||
group_limits.physical = num;
|
||||
}
|
||||
|
||||
if (!limits.check_valid(group_limits))
|
||||
{
|
||||
ct->free(mem_size);
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (!limits.check_busy(group_limits))
|
||||
{
|
||||
ct->free(mem_size);
|
||||
return CELL_EBUSY;
|
||||
|
@ -1135,6 +1228,11 @@ error_code sys_spu_thread_group_suspend(ppu_thread& ppu, u32 id)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::lock_guard lock(group->mutex);
|
||||
|
||||
CellError error;
|
||||
|
@ -1218,6 +1316,11 @@ error_code sys_spu_thread_group_resume(ppu_thread& ppu, u32 id)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
struct notify_on_exit
|
||||
{
|
||||
usz index = umax;
|
||||
|
@ -1565,6 +1668,11 @@ error_code sys_spu_thread_group_set_priority(ppu_thread& ppu, u32 id, s32 priori
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
group->prio.atomic_op([&](std::common_type_t<decltype(lv2_spu_group::prio)>& prio)
|
||||
{
|
||||
prio.prio = priority;
|
||||
|
@ -1592,6 +1700,11 @@ error_code sys_spu_thread_group_get_priority(ppu_thread& ppu, u32 id, vm::ptr<s3
|
|||
{
|
||||
*priority = 0;
|
||||
}
|
||||
else if (group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM)
|
||||
{
|
||||
// Regardless of the value being set in group creation
|
||||
*priority = 15;
|
||||
}
|
||||
else
|
||||
{
|
||||
*priority = group->prio.load().prio;
|
||||
|
@ -1751,7 +1864,7 @@ error_code sys_spu_thread_write_spu_mb(ppu_thread& ppu, u32 id, u32 value)
|
|||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
sys_spu.warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
|
||||
sys_spu.trace("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
|
||||
|
||||
const auto [thread, group] = lv2_spu_group::get_thread(id);
|
||||
|
||||
|
@ -2275,7 +2388,7 @@ error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> at
|
|||
|
||||
std::lock_guard lock(limits.mutex);
|
||||
|
||||
if (!limits.check(limits_data{.raw_spu = 1}))
|
||||
if (!limits.check_busy(limits_data{.raw_spu = 1}))
|
||||
{
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
@ -2331,7 +2444,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<voi
|
|||
|
||||
std::lock_guard lock(limits.mutex);
|
||||
|
||||
if (!limits.check(limits_data{.raw_spu = 1}))
|
||||
if (!limits.check_busy(limits_data{.raw_spu = 1}))
|
||||
{
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ struct sys_spu_image
|
|||
return num_segs;
|
||||
}
|
||||
|
||||
void load(const fs::file& stream);
|
||||
bool load(const fs::file& stream);
|
||||
void free() const;
|
||||
static void deploy(u8* loc, std::span<const sys_spu_segment> segs, bool is_verbose = true);
|
||||
};
|
||||
|
@ -354,6 +354,7 @@ class ppu_thread;
|
|||
error_code sys_spu_initialize(ppu_thread&, u32 max_usable_spu, u32 max_raw_spu);
|
||||
error_code _sys_spu_image_get_information(ppu_thread&, vm::ptr<sys_spu_image> img, vm::ptr<u32> entry_point, vm::ptr<s32> nsegs);
|
||||
error_code sys_spu_image_open(ppu_thread&, vm::ptr<sys_spu_image> img, vm::cptr<char> path);
|
||||
error_code sys_spu_image_open_by_fd(ppu_thread&, vm::ptr<sys_spu_image> img, s32 fd, s64 offset);
|
||||
error_code _sys_spu_image_import(ppu_thread&, vm::ptr<sys_spu_image> img, u32 src, u32 size, u32 arg4);
|
||||
error_code _sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img);
|
||||
error_code _sys_spu_image_get_segments(ppu_thread&, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_segment> segments, s32 nseg);
|
||||
|
|
|
@ -553,7 +553,7 @@ error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2, vm::ptr<u64> out_s
|
|||
case 0x17002:
|
||||
{
|
||||
// TODO
|
||||
vm::_ref<u64>(a5) = a4; // Write back size of buffer
|
||||
vm::write<u64>(a5, a4); // Write back size of buffer
|
||||
break;
|
||||
}
|
||||
// Get EID size
|
||||
|
|
|
@ -307,16 +307,11 @@ void usb_device_emulated::control_transfer(u8 bmRequestType, u8 bRequest, u16 wV
|
|||
}
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
void usb_device_emulated::interrupt_transfer(u32 buf_size, u8* buf, u32 endpoint, UsbTransfer* transfer)
|
||||
void usb_device_emulated::interrupt_transfer(u32 /*buf_size*/, u8* /*buf*/, u32 /*endpoint*/, UsbTransfer* /*transfer*/)
|
||||
{
|
||||
}
|
||||
|
||||
void usb_device_emulated::isochronous_transfer(UsbTransfer* transfer)
|
||||
void usb_device_emulated::isochronous_transfer(UsbTransfer* /*transfer*/)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -8,14 +8,14 @@
|
|||
|
||||
#include "util/to_endian.hpp"
|
||||
|
||||
class ppu_thread;
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
#include "rpcs3qt/breakpoint_handler.h"
|
||||
#include "util/logs.hpp"
|
||||
|
||||
LOG_CHANNEL(debugbp_log, "DebugBP");
|
||||
|
||||
class ppu_thread;
|
||||
|
||||
void ppubreak(ppu_thread& ppu);
|
||||
#endif
|
||||
|
||||
|
@ -284,9 +284,10 @@ namespace vm
|
|||
}
|
||||
|
||||
// Convert specified PS3 address to a reference of specified (possibly converted to BE) type
|
||||
template <typename T, typename U> inline to_be_t<T>& _ref(const U& addr)
|
||||
// Const lvalue: prevent abused writes
|
||||
template <typename T, typename U> inline const to_be_t<T>& _ref(const U& addr)
|
||||
{
|
||||
return *static_cast<to_be_t<T>*>(base(addr));
|
||||
return *static_cast<const to_be_t<T>*>(base(addr));
|
||||
}
|
||||
|
||||
// Access memory bypassing memory protection
|
||||
|
@ -302,42 +303,43 @@ namespace vm
|
|||
}
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
inline void write16(u32 addr, be_t<u16> value, ppu_thread* ppu = nullptr)
|
||||
template <typename T, typename U = T>
|
||||
inline void write(u32 addr, U value, ppu_thread* ppu = nullptr)
|
||||
#else
|
||||
inline void write16(u32 addr, be_t<u16> value)
|
||||
template <typename T, typename U = T>
|
||||
inline void write(u32 addr, U value, ppu_thread* = nullptr)
|
||||
#endif
|
||||
{
|
||||
_ref<u16>(addr) = value;
|
||||
using dest_t = std::conditional_t<std::is_void_v<T>, U, T>;
|
||||
|
||||
if constexpr (!std::is_void_v<T>)
|
||||
{
|
||||
*_ptr<dest_t>(addr) = value;
|
||||
}
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
|
||||
{
|
||||
debugbp_log.success("BPMW: breakpoint writing(16) 0x%x at 0x%x", value, addr);
|
||||
debugbp_log.success("BPMW: breakpoint writing(%d) 0x%x at 0x%x",
|
||||
sizeof(dest_t) * CHAR_BIT, value, addr);
|
||||
ppubreak(*ppu);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void write16(u32 addr, be_t<u16> value, ppu_thread* ppu = nullptr)
|
||||
{
|
||||
write<be_t<u16>>(addr, value, ppu);
|
||||
}
|
||||
|
||||
inline const be_t<u32>& read32(u32 addr)
|
||||
{
|
||||
return _ref<u32>(addr);
|
||||
}
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
inline void write32(u32 addr, be_t<u32> value, ppu_thread* ppu = nullptr)
|
||||
#else
|
||||
inline void write32(u32 addr, be_t<u32> value)
|
||||
#endif
|
||||
{
|
||||
_ref<u32>(addr) = value;
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
|
||||
{
|
||||
debugbp_log.success("BPMW: breakpoint writing(32) 0x%x at 0x%x", value, addr);
|
||||
ppubreak(*ppu);
|
||||
}
|
||||
#endif
|
||||
write<be_t<u32>>(addr, value, ppu);
|
||||
}
|
||||
|
||||
inline const be_t<u64>& read64(u32 addr)
|
||||
|
@ -345,21 +347,9 @@ namespace vm
|
|||
return _ref<u64>(addr);
|
||||
}
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
inline void write64(u32 addr, be_t<u64> value, ppu_thread* ppu = nullptr)
|
||||
#else
|
||||
inline void write64(u32 addr, be_t<u64> value)
|
||||
#endif
|
||||
{
|
||||
_ref<u64>(addr) = value;
|
||||
|
||||
#ifdef RPCS3_HAS_MEMORY_BREAKPOINTS
|
||||
if (ppu && g_breakpoint_handler.HasBreakpoint(addr, breakpoint_types::bp_write))
|
||||
{
|
||||
debugbp_log.success("BPMW: breakpoint writing(64) 0x%x at 0x%x", value, addr);
|
||||
ppubreak(*ppu);
|
||||
}
|
||||
#endif
|
||||
write<be_t<u64>>(addr, value, ppu);
|
||||
}
|
||||
|
||||
void init();
|
||||
|
|
|
@ -6,6 +6,14 @@
|
|||
#include "util/v128.hpp"
|
||||
#include "util/simd.hpp"
|
||||
|
||||
#if defined(ARCH_ARM64)
|
||||
#if !defined(_MSC_VER)
|
||||
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
#endif
|
||||
#undef FORCE_INLINE
|
||||
#include "Emu/CPU/sse2neon.h"
|
||||
#endif
|
||||
|
||||
#if !defined(_MSC_VER)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
|
@ -404,57 +412,70 @@ namespace
|
|||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
NEVER_INLINE std::tuple<T, T, u32> upload_untouched_skip_restart(std::span<to_be_t<const T>> src, std::span<T> dst, T restart_index)
|
||||
{
|
||||
T min_index = index_limit<T>();
|
||||
T max_index = 0;
|
||||
u32 written = 0;
|
||||
u32 length = ::size32(src);
|
||||
|
||||
for (u32 i = written; i < length; ++i)
|
||||
template <typename T>
|
||||
NEVER_INLINE std::tuple<T, T, u32> upload_untouched_skip_restart(std::span<to_be_t<const T>> src, std::span<T> dst, T restart_index)
|
||||
{
|
||||
T index = src[i];
|
||||
if (index != restart_index)
|
||||
T min_index = index_limit<T>();
|
||||
T max_index = 0;
|
||||
u32 written = 0;
|
||||
u32 length = ::size32(src);
|
||||
|
||||
for (u32 i = written; i < length; ++i)
|
||||
{
|
||||
dst[written++] = min_max(min_index, max_index, index);
|
||||
T index = src[i];
|
||||
if (index != restart_index)
|
||||
{
|
||||
dst[written++] = min_max(min_index, max_index, index);
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_tuple(min_index, max_index, written);
|
||||
}
|
||||
|
||||
return std::make_tuple(min_index, max_index, written);
|
||||
}
|
||||
template<typename T, typename U = remove_be_t<T>>
|
||||
requires std::is_same_v<U, u32> || std::is_same_v<U, u16>
|
||||
std::tuple<T, T, u32> upload_untouched(std::span<to_be_t<const T>> src, std::span<T> dst, rsx::primitive_type draw_mode, bool is_primitive_restart_enabled, u32 primitive_restart_index)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, u16>)
|
||||
{
|
||||
if (primitive_restart_index > 0xffff)
|
||||
{
|
||||
// Will never trip index restart, unpload untouched
|
||||
is_primitive_restart_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::tuple<T, T, u32> upload_untouched(std::span<to_be_t<const T>> src, std::span<T> dst, rsx::primitive_type draw_mode, bool is_primitive_restart_enabled, u32 primitive_restart_index)
|
||||
{
|
||||
if (!is_primitive_restart_enabled)
|
||||
{
|
||||
return untouched_impl::upload_untouched(src, dst);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, u16>)
|
||||
{
|
||||
if (primitive_restart_index > 0xffff)
|
||||
if (!is_primitive_restart_enabled)
|
||||
{
|
||||
return untouched_impl::upload_untouched(src, dst);
|
||||
}
|
||||
else if (is_primitive_disjointed(draw_mode))
|
||||
|
||||
if (is_primitive_disjointed(draw_mode))
|
||||
{
|
||||
return upload_untouched_skip_restart(src, dst, static_cast<u16>(primitive_restart_index));
|
||||
return upload_untouched_skip_restart(src, dst, static_cast<U>(primitive_restart_index));
|
||||
}
|
||||
else
|
||||
|
||||
return primitive_restart_impl::upload_untouched(src, dst, static_cast<U>(primitive_restart_index));
|
||||
}
|
||||
|
||||
void iota16(u16* dst, u32 count)
|
||||
{
|
||||
unsigned i = 0;
|
||||
#if defined(ARCH_X64) || defined(ARCH_ARM64)
|
||||
const unsigned step = 8; // We do 8 entries per step
|
||||
const __m128i vec_step = _mm_set1_epi16(8); // Constant to increment the raw values
|
||||
__m128i values = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0);
|
||||
__m128i* vec_ptr = reinterpret_cast<__m128i*>(dst);
|
||||
|
||||
for (; (i + step) <= count; i += step, vec_ptr++)
|
||||
{
|
||||
return primitive_restart_impl::upload_untouched(src, dst, static_cast<u16>(primitive_restart_index));
|
||||
_mm_stream_si128(vec_ptr, values);
|
||||
_mm_add_epi16(values, vec_step);
|
||||
}
|
||||
#endif
|
||||
for (; i < count; ++i)
|
||||
dst[i] = i;
|
||||
}
|
||||
else if (is_primitive_disjointed(draw_mode))
|
||||
{
|
||||
return upload_untouched_skip_restart(src, dst, primitive_restart_index);
|
||||
}
|
||||
else
|
||||
{
|
||||
return primitive_restart_impl::upload_untouched(src, dst, primitive_restart_index);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::tuple<T, T, u32> expand_indexed_triangle_fan(std::span<to_be_t<const T>> src, std::span<T> dst, bool is_primitive_restart_enabled, u32 primitive_restart_index)
|
||||
|
@ -624,8 +645,7 @@ void write_index_array_for_non_indexed_non_native_primitive_to_buffer(char* dst,
|
|||
switch (draw_mode)
|
||||
{
|
||||
case rsx::primitive_type::line_loop:
|
||||
for (unsigned i = 0; i < count; ++i)
|
||||
typedDst[i] = i;
|
||||
iota16(typedDst, count);
|
||||
typedDst[count] = 0;
|
||||
return;
|
||||
case rsx::primitive_type::triangle_fan:
|
||||
|
|
|
@ -15,34 +15,46 @@ class data_heap
|
|||
{
|
||||
protected:
|
||||
/**
|
||||
* Does alloc cross get position ?
|
||||
* Internal implementation of allocation test
|
||||
* Does alloc cross get position?
|
||||
*/
|
||||
bool can_alloc_impl(usz aligned_put_pos, usz aligned_alloc_size) const
|
||||
{
|
||||
const usz alloc_end = aligned_put_pos + aligned_alloc_size;
|
||||
if (alloc_end < m_size) [[ likely ]]
|
||||
{
|
||||
// Range before get
|
||||
if (alloc_end < m_get_pos)
|
||||
return true;
|
||||
|
||||
// Range after get
|
||||
if (aligned_put_pos > m_get_pos)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// ..]....[..get..
|
||||
if (aligned_put_pos < m_get_pos)
|
||||
return false;
|
||||
|
||||
// ..get..]...[...
|
||||
// Actually all resources extending beyond heap space starts at 0
|
||||
if (aligned_alloc_size > m_get_pos)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does alloc cross get position?
|
||||
*/
|
||||
template<int Alignment>
|
||||
bool can_alloc(usz size) const
|
||||
{
|
||||
usz alloc_size = utils::align(size, Alignment);
|
||||
usz aligned_put_pos = utils::align(m_put_pos, Alignment);
|
||||
if (aligned_put_pos + alloc_size < m_size)
|
||||
{
|
||||
// range before get
|
||||
if (aligned_put_pos + alloc_size < m_get_pos)
|
||||
return true;
|
||||
// range after get
|
||||
if (aligned_put_pos > m_get_pos)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
// ..]....[..get..
|
||||
if (aligned_put_pos < m_get_pos)
|
||||
return false;
|
||||
// ..get..]...[...
|
||||
// Actually all resources extending beyond heap space starts at 0
|
||||
if (alloc_size > m_get_pos)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
const usz alloc_size = utils::align(size, Alignment);
|
||||
const usz aligned_put_pos = utils::align(m_put_pos, Alignment);
|
||||
return can_alloc_impl(aligned_put_pos, alloc_size);
|
||||
}
|
||||
|
||||
// Grow the buffer to hold at least size bytes
|
||||
|
@ -53,10 +65,9 @@ protected:
|
|||
}
|
||||
|
||||
usz m_size;
|
||||
usz m_put_pos; // Start of free space
|
||||
usz m_min_guard_size; //If an allocation touches the guard region, reset the heap to avoid going over budget
|
||||
usz m_current_allocated_size;
|
||||
usz m_largest_allocated_pool;
|
||||
usz m_put_pos; // Start of free space
|
||||
usz m_get_pos; // End of free space
|
||||
usz m_min_guard_size; // If an allocation touches the guard region, reset the heap to avoid going over budget
|
||||
|
||||
char* m_name;
|
||||
public:
|
||||
|
@ -65,8 +76,6 @@ public:
|
|||
data_heap(const data_heap&) = delete;
|
||||
data_heap(data_heap&&) = delete;
|
||||
|
||||
usz m_get_pos; // End of free space
|
||||
|
||||
void init(usz heap_size, const char* buffer_name = "unnamed", usz min_guard_size=0x10000)
|
||||
{
|
||||
m_name = const_cast<char*>(buffer_name);
|
||||
|
@ -75,10 +84,8 @@ public:
|
|||
m_put_pos = 0;
|
||||
m_get_pos = heap_size - 1;
|
||||
|
||||
//allocation stats
|
||||
// Allocation stats
|
||||
m_min_guard_size = min_guard_size;
|
||||
m_current_allocated_size = 0;
|
||||
m_largest_allocated_pool = 0;
|
||||
}
|
||||
|
||||
template<int Alignment>
|
||||
|
@ -89,24 +96,46 @@ public:
|
|||
|
||||
if (!can_alloc<Alignment>(size) && !grow(alloc_size))
|
||||
{
|
||||
fmt::throw_exception("[%s] Working buffer not big enough, buffer_length=%d allocated=%d requested=%d guard=%d largest_pool=%d",
|
||||
m_name, m_size, m_current_allocated_size, size, m_min_guard_size, m_largest_allocated_pool);
|
||||
fmt::throw_exception("[%s] Working buffer not big enough, buffer_length=%d requested=%d guard=%d",
|
||||
m_name, m_size, size, m_min_guard_size);
|
||||
}
|
||||
|
||||
const usz block_length = (aligned_put_pos - m_put_pos) + alloc_size;
|
||||
m_current_allocated_size += block_length;
|
||||
m_largest_allocated_pool = std::max(m_largest_allocated_pool, block_length);
|
||||
|
||||
if (aligned_put_pos + alloc_size < m_size)
|
||||
const usz alloc_end = aligned_put_pos + alloc_size;
|
||||
if (alloc_end < m_size)
|
||||
{
|
||||
m_put_pos = aligned_put_pos + alloc_size;
|
||||
m_put_pos = alloc_end;
|
||||
return aligned_put_pos;
|
||||
}
|
||||
else
|
||||
|
||||
m_put_pos = alloc_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For use in cases where we take a fixed amount each time
|
||||
*/
|
||||
template<int Alignment, usz Size = Alignment>
|
||||
usz static_alloc()
|
||||
{
|
||||
static_assert((Size & (Alignment - 1)) == 0);
|
||||
ensure((m_put_pos & (Alignment - 1)) == 0);
|
||||
|
||||
if (!can_alloc_impl(m_put_pos, Size) && !grow(Size))
|
||||
{
|
||||
m_put_pos = alloc_size;
|
||||
return 0;
|
||||
fmt::throw_exception("[%s] Working buffer not big enough, buffer_length=%d requested=%d guard=%d",
|
||||
m_name, m_size, Size, m_min_guard_size);
|
||||
}
|
||||
|
||||
const usz alloc_end = m_put_pos + Size;
|
||||
if (alloc_end < m_size)
|
||||
{
|
||||
const auto ret_pos = m_put_pos;
|
||||
m_put_pos = alloc_end;
|
||||
return ret_pos;
|
||||
}
|
||||
|
||||
m_put_pos = Size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -117,30 +146,20 @@ public:
|
|||
return (m_put_pos > 0) ? m_put_pos - 1 : m_size - 1;
|
||||
}
|
||||
|
||||
virtual bool is_critical() const
|
||||
inline void set_get_pos(usz value)
|
||||
{
|
||||
const usz guard_length = std::max(m_min_guard_size, m_largest_allocated_pool);
|
||||
return (m_current_allocated_size + guard_length) >= m_size;
|
||||
m_get_pos = value;
|
||||
}
|
||||
|
||||
void reset_allocation_stats()
|
||||
{
|
||||
m_current_allocated_size = 0;
|
||||
m_largest_allocated_pool = 0;
|
||||
m_get_pos = get_current_put_pos_minus_one();
|
||||
}
|
||||
|
||||
// Updates the current_allocated_size metrics
|
||||
void notify()
|
||||
inline void notify()
|
||||
{
|
||||
if (m_get_pos == umax)
|
||||
m_current_allocated_size = 0;
|
||||
else if (m_get_pos < m_put_pos)
|
||||
m_current_allocated_size = (m_put_pos - m_get_pos - 1);
|
||||
else if (m_get_pos > m_put_pos)
|
||||
m_current_allocated_size = (m_put_pos + (m_size - m_get_pos - 1));
|
||||
else
|
||||
fmt::throw_exception("m_put_pos == m_get_pos!");
|
||||
// @unused
|
||||
}
|
||||
|
||||
usz size() const
|
||||
|
|
|
@ -174,7 +174,7 @@ namespace rsx
|
|||
{
|
||||
// Switch to heap storage
|
||||
_data = static_cast<Ty*>(std::malloc(sizeof(Ty) * size));
|
||||
std::memcpy(_data, _local_storage, size_bytes());
|
||||
std::memcpy(static_cast<void*>(_data), _local_storage, size_bytes());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -390,14 +390,22 @@ namespace rsx
|
|||
}
|
||||
|
||||
bool ret = false;
|
||||
for (auto ptr = _data, last = _data + _size - 1; ptr < last; ptr++)
|
||||
for (auto ptr = _data, last = _data + _size - 1; ptr <= last; ptr++)
|
||||
{
|
||||
if (predicate(*ptr))
|
||||
{
|
||||
ret = true;
|
||||
|
||||
if (ptr == last)
|
||||
{
|
||||
// Popping the last entry from list. Just set the new size and exit
|
||||
_size--;
|
||||
break;
|
||||
}
|
||||
|
||||
// Move item to the end of the list and shrink by 1
|
||||
std::memcpy(ptr, last, sizeof(Ty));
|
||||
last = _data + (--_size);
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,11 +9,11 @@ namespace rsx
|
|||
using unordered_map = std::unordered_map<T, U>;
|
||||
}
|
||||
#else
|
||||
#include "3rdparty/robin_hood/include/robin_hood.h"
|
||||
#include "3rdparty/unordered_dense/include/unordered_dense.h"
|
||||
|
||||
namespace rsx
|
||||
{
|
||||
template<typename T, typename U>
|
||||
using unordered_map = ::robin_hood::unordered_map<T, U>;
|
||||
using unordered_map = ankerl::unordered_dense::map<T, U>;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Emu/RSX/Common/BufferUtils.h"
|
||||
#include "Emu/RSX/Common/buffer_stream.hpp"
|
||||
#include "Emu/RSX/Common/io_buffer.h"
|
||||
#include "Emu/RSX/Common/simple_array.hpp"
|
||||
#include "Emu/RSX/NV47/HW/context_accessors.define.h"
|
||||
#include "Emu/RSX/Program/GLSLCommon.h"
|
||||
#include "Emu/RSX/rsx_methods.h"
|
||||
|
@ -759,7 +758,8 @@ namespace rsx
|
|||
ensure(draw_call.is_trivial_instanced_draw);
|
||||
|
||||
// Temp indirection table. Used to track "running" updates.
|
||||
rsx::simple_array<u32> instancing_indirection_table;
|
||||
auto& instancing_indirection_table = m_scratch_buffers.u32buf;
|
||||
|
||||
// indirection table size
|
||||
const auto full_reupload = !prog || prog->has_indexed_constants;
|
||||
const auto reloc_table = full_reupload ? decltype(prog->constant_ids){} : prog->constant_ids;
|
||||
|
@ -767,7 +767,8 @@ namespace rsx
|
|||
instancing_indirection_table.resize(redirection_table_size);
|
||||
|
||||
// Temp constants data
|
||||
rsx::simple_array<u128> constants_data;
|
||||
auto& constants_data = m_scratch_buffers.u128buf;
|
||||
constants_data.clear();
|
||||
constants_data.reserve(redirection_table_size * draw_call.pass_count());
|
||||
|
||||
// Allocate indirection buffer on GPU stream
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <util/types.hpp>
|
||||
|
||||
#include "Emu/RSX/Common/simple_array.hpp"
|
||||
#include "Emu/RSX/Core/RSXVertexTypes.h"
|
||||
#include "Emu/RSX/NV47/FW/draw_call.hpp"
|
||||
#include "Emu/RSX/Program/ProgramStateCache.h"
|
||||
|
@ -28,6 +29,12 @@ namespace rsx
|
|||
std::array<push_buffer_vertex_info, 16> m_vertex_push_buffers;
|
||||
rsx::simple_array<u32> m_element_push_buffer;
|
||||
|
||||
struct
|
||||
{
|
||||
rsx::simple_array<u32> u32buf;
|
||||
rsx::simple_array<u128> u128buf;
|
||||
} mutable m_scratch_buffers;
|
||||
|
||||
public:
|
||||
draw_command_processor() = default;
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@ namespace rsx
|
|||
}
|
||||
};
|
||||
|
||||
draw_command_barrier_mask |= (1u << type);
|
||||
|
||||
if (type == primitive_restart_barrier)
|
||||
{
|
||||
// Rasterization flow barrier
|
||||
|
@ -97,48 +99,32 @@ namespace rsx
|
|||
return false;
|
||||
}
|
||||
|
||||
// For instancing all draw calls must be identical
|
||||
const auto& ref = draw_command_ranges.front();
|
||||
for (const auto& range : draw_command_ranges)
|
||||
{
|
||||
if (range.first != ref.first || range.count != ref.count)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (draw_command_barriers.empty())
|
||||
{
|
||||
// Raise alarm here for investigation, we may be missing a corner case.
|
||||
rsx_log.error("Instanced draw detected, but no command barriers found!");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Barriers must exist, but can only involve updating transform constants (for now)
|
||||
for (const auto& barrier : draw_command_barriers)
|
||||
{
|
||||
if (barrier.type != rsx::transform_constant_load_modifier_barrier &&
|
||||
barrier.type != rsx::transform_constant_update_barrier)
|
||||
{
|
||||
ensure(barrier.draw_id < ::size32(draw_command_ranges));
|
||||
if (draw_command_ranges[barrier.draw_id].count == 0)
|
||||
{
|
||||
// Dangling command barriers are ignored. We're also at the end of the command, so abort.
|
||||
break;
|
||||
}
|
||||
const u32 compatible_barrier_mask =
|
||||
(1u << rsx::transform_constant_load_modifier_barrier) |
|
||||
(1u << rsx::transform_constant_update_barrier);
|
||||
|
||||
// Fail. Only transform constant instancing is supported at the moment.
|
||||
return false;
|
||||
}
|
||||
if (draw_command_barrier_mask & ~compatible_barrier_mask)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
// For instancing all draw calls must be identical
|
||||
// FIXME: This requirement can be easily lifted by chunking contiguous chunks.
|
||||
const auto& ref = draw_command_ranges.front();
|
||||
return !draw_command_ranges.any(FN(x.first != ref.first || x.count != ref.count));
|
||||
}
|
||||
|
||||
void draw_clause::reset(primitive_type type)
|
||||
{
|
||||
current_range_index = ~0u;
|
||||
last_execution_barrier_index = 0;
|
||||
draw_command_barrier_mask = 0;
|
||||
|
||||
command = draw_command::none;
|
||||
primitive = type;
|
||||
|
@ -152,14 +138,56 @@ namespace rsx
|
|||
is_disjoint_primitive = is_primitive_disjointed(primitive);
|
||||
}
|
||||
|
||||
const simple_array<draw_range_t>& draw_clause::get_subranges() const
|
||||
{
|
||||
ensure(!is_single_draw());
|
||||
|
||||
const auto range = get_range();
|
||||
const auto limit = range.first + range.count;
|
||||
const auto _pass_count = pass_count();
|
||||
|
||||
auto &ret = subranges_store;
|
||||
ret.clear();
|
||||
ret.reserve(_pass_count);
|
||||
|
||||
u32 previous_barrier = range.first;
|
||||
u32 vertex_counter = 0;
|
||||
|
||||
for (auto it = current_barrier_it;
|
||||
it != draw_command_barriers.end() && it->draw_id == current_range_index;
|
||||
it++)
|
||||
{
|
||||
const auto& barrier = *it;
|
||||
if (barrier.type != primitive_restart_barrier)
|
||||
continue;
|
||||
|
||||
if (barrier.address <= range.first)
|
||||
continue;
|
||||
|
||||
if (barrier.address >= limit)
|
||||
break;
|
||||
|
||||
const u32 count = barrier.address - previous_barrier;
|
||||
ret.push_back({ 0, vertex_counter, count });
|
||||
previous_barrier = barrier.address;
|
||||
vertex_counter += count;
|
||||
}
|
||||
|
||||
ensure(!ret.empty());
|
||||
ensure(previous_barrier < limit);
|
||||
ret.push_back({ 0, vertex_counter, limit - previous_barrier });
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 draw_clause::execute_pipeline_dependencies(context* ctx, instanced_draw_config_t* instance_config) const
|
||||
{
|
||||
u32 result = 0u;
|
||||
for (;
|
||||
current_barrier_it != draw_command_barriers.end() && current_barrier_it->draw_id == current_range_index;
|
||||
current_barrier_it++)
|
||||
for (auto it = current_barrier_it;
|
||||
it != draw_command_barriers.end() && it->draw_id == current_range_index;
|
||||
it++)
|
||||
{
|
||||
const auto& barrier = *current_barrier_it;
|
||||
const auto& barrier = *it;
|
||||
switch (barrier.type)
|
||||
{
|
||||
case primitive_restart_barrier:
|
||||
|
|
|
@ -29,9 +29,15 @@ namespace rsx
|
|||
// Location of last execution barrier
|
||||
u32 last_execution_barrier_index{};
|
||||
|
||||
// Mask of all active barriers
|
||||
u32 draw_command_barrier_mask = 0;
|
||||
|
||||
// Draw-time iterator to the draw_command_barriers struct
|
||||
mutable simple_array<barrier_t>::iterator current_barrier_it;
|
||||
|
||||
// Subranges memory cache
|
||||
mutable rsx::simple_array<draw_range_t> subranges_store;
|
||||
|
||||
// Helper functions
|
||||
// Add a new draw command
|
||||
void append_draw_command(const draw_range_t& range)
|
||||
|
@ -246,6 +252,12 @@ namespace rsx
|
|||
return false;
|
||||
}
|
||||
|
||||
// Advance barrier iterator so it always points to the current draw
|
||||
for (;
|
||||
current_barrier_it != draw_command_barriers.end() &&
|
||||
current_barrier_it->draw_id < current_range_index;
|
||||
++current_barrier_it);
|
||||
|
||||
if (draw_command_ranges[current_range_index].count == 0)
|
||||
{
|
||||
// Dangling execution barrier
|
||||
|
@ -282,48 +294,19 @@ namespace rsx
|
|||
*/
|
||||
u32 execute_pipeline_dependencies(struct context* ctx, instanced_draw_config_t* instance_config = nullptr) const;
|
||||
|
||||
/**
|
||||
* Returns the first-count data for the current subdraw
|
||||
*/
|
||||
const draw_range_t& get_range() const
|
||||
{
|
||||
ensure(current_range_index < draw_command_ranges.size());
|
||||
return draw_command_ranges[current_range_index];
|
||||
}
|
||||
|
||||
simple_array<draw_range_t> get_subranges() const
|
||||
{
|
||||
ensure(!is_single_draw());
|
||||
|
||||
const auto range = get_range();
|
||||
const auto limit = range.first + range.count;
|
||||
|
||||
simple_array<draw_range_t> ret;
|
||||
u32 previous_barrier = range.first;
|
||||
u32 vertex_counter = 0;
|
||||
|
||||
for (const auto& barrier : draw_command_barriers)
|
||||
{
|
||||
if (barrier.draw_id != current_range_index)
|
||||
continue;
|
||||
|
||||
if (barrier.type != primitive_restart_barrier)
|
||||
continue;
|
||||
|
||||
if (barrier.address <= range.first)
|
||||
continue;
|
||||
|
||||
if (barrier.address >= limit)
|
||||
break;
|
||||
|
||||
const u32 count = barrier.address - previous_barrier;
|
||||
ret.push_back({ 0, vertex_counter, count });
|
||||
previous_barrier = barrier.address;
|
||||
vertex_counter += count;
|
||||
}
|
||||
|
||||
ensure(!ret.empty());
|
||||
ensure(previous_barrier < limit);
|
||||
ret.push_back({ 0, vertex_counter, limit - previous_barrier });
|
||||
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Returns a compiled list of all subdraws.
|
||||
* NOTE: This is a non-trivial operation as it takes disjoint primitive boundaries into account.
|
||||
*/
|
||||
const simple_array<draw_range_t>& get_subranges() const;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace rsx
|
|||
RSX(ctx)->sync();
|
||||
|
||||
// Write ref+get (get will be written again with the same value at command end)
|
||||
auto& dma = vm::_ref<RsxDmaControl>(RSX(ctx)->dma_address);
|
||||
auto& dma = *vm::_ptr<RsxDmaControl>(RSX(ctx)->dma_address);
|
||||
dma.get.release(RSX(ctx)->fifo_ctrl->get_pos());
|
||||
dma.ref.store(arg);
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ namespace rsx
|
|||
// Syncronization point, may be associated with memory changes without actually changing addresses
|
||||
RSX(ctx)->m_graphics_state |= rsx::pipeline_state::fragment_program_needs_rehash;
|
||||
|
||||
const auto& sema = vm::_ref<RsxSemaphore>(addr).val;
|
||||
const auto& sema = vm::_ref<RsxSemaphore>(addr);
|
||||
|
||||
if (sema == arg)
|
||||
{
|
||||
|
|
|
@ -566,7 +566,7 @@ namespace rsx
|
|||
default:
|
||||
rsx_log.error("NV4097_GET_REPORT: Bad type %d", type);
|
||||
|
||||
vm::_ref<atomic_t<CellGcmReportData>>(address_ptr).atomic_op([&](CellGcmReportData& data)
|
||||
vm::_ptr<atomic_t<CellGcmReportData>>(address_ptr)->atomic_op([&](CellGcmReportData& data)
|
||||
{
|
||||
data.timer = RSX(ctx)->timestamp();
|
||||
data.padding = 0;
|
||||
|
@ -651,7 +651,7 @@ namespace rsx
|
|||
|
||||
ensure(addr != umax);
|
||||
|
||||
vm::_ref<atomic_t<RsxNotify>>(addr).store(
|
||||
vm::_ptr<atomic_t<RsxNotify>>(addr)->store(
|
||||
{
|
||||
RSX(ctx)->timestamp(),
|
||||
0
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace rsx
|
|||
// First, queue the GPU work. If it flushes the queue for us, the following routines will be faster.
|
||||
const bool handled = RSX(ctx)->get_backend_config().supports_host_gpu_labels && RSX(ctx)->release_GCM_label(address, data);
|
||||
|
||||
if (vm::_ref<RsxSemaphore>(address).val == data)
|
||||
if (vm::_ref<RsxSemaphore>(address) == data)
|
||||
{
|
||||
// It's a no-op to write the same value (although there is a delay in real-hw so it's more accurate to allow GPU label in this case)
|
||||
return;
|
||||
|
@ -57,7 +57,7 @@ namespace rsx
|
|||
}
|
||||
}
|
||||
|
||||
vm::_ref<RsxSemaphore>(address).val = data;
|
||||
vm::write<atomic_t<RsxSemaphore>>(address, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,6 +76,8 @@ vec4 _fetch_constant(const in uint base_offset)
|
|||
// uint override
|
||||
return _fetch_constant(int(base_offset));
|
||||
}
|
||||
#elif defined(VULKAN)
|
||||
#define _fetch_constant(x) vc[x + xform_constants_offset]
|
||||
#else
|
||||
#define _fetch_constant(x) vc[x]
|
||||
#endif
|
||||
|
|
|
@ -1212,7 +1212,7 @@ namespace rsx
|
|||
if (const u64 get_put = new_get_put.exchange(u64{umax});
|
||||
get_put != umax)
|
||||
{
|
||||
vm::_ref<atomic_be_t<u64>>(dma_address + ::offset32(&RsxDmaControl::put)).release(get_put);
|
||||
vm::_ptr<atomic_be_t<u64>>(dma_address + ::offset32(&RsxDmaControl::put))->release(get_put);
|
||||
fifo_ctrl->set_get(static_cast<u32>(get_put));
|
||||
fifo_ctrl->abort();
|
||||
fifo_ret_addr = RSX_CALL_STACK_EMPTY;
|
||||
|
@ -2457,7 +2457,7 @@ namespace rsx
|
|||
}
|
||||
|
||||
rsx::reservation_lock<true> lock(sink, 16);
|
||||
vm::_ref<atomic_t<CellGcmReportData>>(sink).store({timestamp(), value, 0});
|
||||
vm::_ptr<atomic_t<CellGcmReportData>>(sink)->store({timestamp(), value, 0});
|
||||
}
|
||||
|
||||
u32 thread::copy_zcull_stats(u32 memory_range_start, u32 memory_range, u32 destination)
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace vk
|
|||
|
||||
idx++;
|
||||
|
||||
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
bindings[idx].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
bindings[idx].descriptorCount = 1;
|
||||
bindings[idx].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
|
||||
bindings[idx].binding = binding_table.vertex_constant_buffers_bind_slot;
|
||||
|
@ -101,7 +101,8 @@ namespace vk
|
|||
return bindings;
|
||||
}
|
||||
|
||||
std::tuple<VkPipelineLayout, VkDescriptorSetLayout> get_common_pipeline_layout(VkDevice dev)
|
||||
std::tuple<VkPipelineLayout, VkDescriptorSetLayout, rsx::simple_array<VkDescriptorSetLayoutBinding>>
|
||||
get_common_pipeline_layout(VkDevice dev)
|
||||
{
|
||||
const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table();
|
||||
auto bindings = get_common_binding_table();
|
||||
|
@ -135,13 +136,13 @@ namespace vk
|
|||
|
||||
std::array<VkPushConstantRange, 1> push_constants;
|
||||
push_constants[0].offset = 0;
|
||||
push_constants[0].size = 16;
|
||||
push_constants[0].size = 20;
|
||||
push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
|
||||
|
||||
if (vk::emulate_conditional_rendering())
|
||||
{
|
||||
// Conditional render toggle
|
||||
push_constants[0].size = 20;
|
||||
push_constants[0].size = 24;
|
||||
}
|
||||
|
||||
const auto set_layout = vk::descriptors::create_layout(bindings);
|
||||
|
@ -155,6 +156,25 @@ namespace vk
|
|||
|
||||
VkPipelineLayout result;
|
||||
CHECK_RESULT(vkCreatePipelineLayout(dev, &layout_info, nullptr, &result));
|
||||
return std::make_tuple(result, set_layout);
|
||||
return std::make_tuple(result, set_layout, bindings);
|
||||
}
|
||||
|
||||
rsx::simple_array<VkDescriptorPoolSize> get_descriptor_pool_sizes(const rsx::simple_array<VkDescriptorSetLayoutBinding>& bindings)
|
||||
{
|
||||
// Compile descriptor pool sizes
|
||||
const u32 num_ubo = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ? y.descriptorCount : 0)));
|
||||
const u32 num_texel_buffers = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ? y.descriptorCount : 0)));
|
||||
const u32 num_combined_image_sampler = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ? y.descriptorCount : 0)));
|
||||
const u32 num_ssbo = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ? y.descriptorCount : 0)));
|
||||
|
||||
ensure(num_ubo > 0 && num_texel_buffers > 0 && num_combined_image_sampler > 0 && num_ssbo > 0);
|
||||
|
||||
return
|
||||
{
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , num_ubo },
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , num_texel_buffers },
|
||||
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , num_combined_image_sampler },
|
||||
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_ssbo }
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,11 @@ namespace vk
|
|||
{
|
||||
// Grab standard layout for decompiled RSX programs. Also used by the interpreter.
|
||||
// FIXME: This generates a bloated monstrosity that needs to die.
|
||||
std::tuple<VkPipelineLayout, VkDescriptorSetLayout> get_common_pipeline_layout(VkDevice dev);
|
||||
std::tuple<VkPipelineLayout, VkDescriptorSetLayout, rsx::simple_array<VkDescriptorSetLayoutBinding>> get_common_pipeline_layout(VkDevice dev);
|
||||
|
||||
// Returns the standard binding layout without texture slots. Those have special handling depending on the consumer.
|
||||
rsx::simple_array<VkDescriptorSetLayoutBinding> get_common_binding_table();
|
||||
|
||||
// Returns an array of pool sizes that can be used to generate a proper descriptor pool
|
||||
rsx::simple_array<VkDescriptorPoolSize> get_descriptor_pool_sizes(const rsx::simple_array<VkDescriptorSetLayoutBinding>& bindings);
|
||||
}
|
||||
|
|
66
rpcs3/Emu/RSX/VK/VKDataHeapManager.cpp
Normal file
66
rpcs3/Emu/RSX/VK/VKDataHeapManager.cpp
Normal file
|
@ -0,0 +1,66 @@
|
|||
#include "stdafx.h"
|
||||
#include "VKDataHeapManager.h"
|
||||
|
||||
#include "vkutils/data_heap.h"
|
||||
#include <unordered_set>
|
||||
|
||||
namespace vk::data_heap_manager
|
||||
{
|
||||
std::unordered_set<vk::data_heap*> g_managed_heaps;
|
||||
|
||||
void register_ring_buffer(vk::data_heap& heap)
|
||||
{
|
||||
g_managed_heaps.insert(&heap);
|
||||
}
|
||||
|
||||
void register_ring_buffers(std::initializer_list<std::reference_wrapper<vk::data_heap>> heaps)
|
||||
{
|
||||
for (auto&& heap : heaps)
|
||||
{
|
||||
register_ring_buffer(heap);
|
||||
}
|
||||
}
|
||||
|
||||
managed_heap_snapshot_t get_heap_snapshot()
|
||||
{
|
||||
managed_heap_snapshot_t result{};
|
||||
for (auto& heap : g_managed_heaps)
|
||||
{
|
||||
result[heap] = heap->get_current_put_pos_minus_one();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void restore_snapshot(const managed_heap_snapshot_t& snapshot)
|
||||
{
|
||||
for (auto& heap : g_managed_heaps)
|
||||
{
|
||||
const auto found = snapshot.find(heap);
|
||||
if (found == snapshot.end())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
heap->set_get_pos(found->second);
|
||||
heap->notify();
|
||||
}
|
||||
}
|
||||
|
||||
void reset_heap_allocations()
|
||||
{
|
||||
for (auto& heap : g_managed_heaps)
|
||||
{
|
||||
heap->reset_allocation_stats();
|
||||
}
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
for (auto& heap : g_managed_heaps)
|
||||
{
|
||||
heap->destroy();
|
||||
}
|
||||
|
||||
g_managed_heaps.clear();
|
||||
}
|
||||
}
|
33
rpcs3/Emu/RSX/VK/VKDataHeapManager.h
Normal file
33
rpcs3/Emu/RSX/VK/VKDataHeapManager.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
#pragma once
|
||||
|
||||
#include <util/types.hpp>
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
namespace vk
|
||||
{
|
||||
class data_heap;
|
||||
|
||||
namespace data_heap_manager
|
||||
{
|
||||
using managed_heap_snapshot_t = std::unordered_map<const vk::data_heap*, s64>;
|
||||
|
||||
// Submit ring buffer for management
|
||||
void register_ring_buffer(vk::data_heap& heap);
|
||||
|
||||
// Bulk registration
|
||||
void register_ring_buffers(std::initializer_list<std::reference_wrapper<vk::data_heap>> heaps);
|
||||
|
||||
// Capture managed ring buffers snapshot at current time
|
||||
managed_heap_snapshot_t get_heap_snapshot();
|
||||
|
||||
// Synchronize heap with snapshot
|
||||
void restore_snapshot(const managed_heap_snapshot_t& snapshot);
|
||||
|
||||
// Reset all managed heap allocations
|
||||
void reset_heap_allocations();
|
||||
|
||||
// Cleanup
|
||||
void reset();
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@
|
|||
#include "VKGSRender.h"
|
||||
#include "vkutils/buffer_object.h"
|
||||
#include "vkutils/chip_class.h"
|
||||
#include <vulkan/vulkan_core.h>
|
||||
|
||||
namespace vk
|
||||
{
|
||||
|
@ -269,7 +270,6 @@ void VKGSRender::load_texture_env()
|
|||
{
|
||||
if (tex.enabled())
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
|
||||
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
|
||||
}
|
||||
else
|
||||
|
@ -428,7 +428,6 @@ void VKGSRender::load_texture_env()
|
|||
{
|
||||
if (rsx::method_registers.vertex_textures[i].enabled())
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
|
||||
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
|
||||
}
|
||||
else
|
||||
|
@ -837,7 +836,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
|
|||
if (m_vertex_layout_storage &&
|
||||
m_vertex_layout_storage->info.buffer != m_vertex_layout_ring_info.heap->value)
|
||||
{
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage));
|
||||
vk::get_resource_manager()->dispose(m_vertex_layout_storage);
|
||||
}
|
||||
|
||||
vk::clear_status_interrupt(vk::heap_changed);
|
||||
|
@ -918,7 +917,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
|
|||
info.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
|
||||
info.buffer = m_cond_render_buffer->value;
|
||||
|
||||
m_device->_vkCmdBeginConditionalRenderingEXT(*m_current_command_buffer, &info);
|
||||
_vkCmdBeginConditionalRenderingEXT(*m_current_command_buffer, &info);
|
||||
m_current_command_buffer->flags |= vk::command_buffer::cb_has_conditional_render;
|
||||
}
|
||||
}
|
||||
|
@ -937,6 +936,12 @@ void VKGSRender::emit_geometry(u32 sub_index)
|
|||
{
|
||||
vkCmdDraw(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0);
|
||||
}
|
||||
else if (m_device->get_multidraw_support())
|
||||
{
|
||||
const auto subranges = draw_call.get_subranges();
|
||||
auto ptr = reinterpret_cast<const VkMultiDrawInfoEXT*>(& subranges.front().first);
|
||||
_vkCmdDrawMultiEXT(*m_current_command_buffer, ::size32(subranges), ptr, 1, 0, sizeof(rsx::draw_range_t));
|
||||
}
|
||||
else
|
||||
{
|
||||
u32 vertex_offset = 0;
|
||||
|
@ -963,6 +968,26 @@ void VKGSRender::emit_geometry(u32 sub_index)
|
|||
{
|
||||
vkCmdDrawIndexed(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0, 0);
|
||||
}
|
||||
else if (m_device->get_multidraw_support())
|
||||
{
|
||||
const auto subranges = draw_call.get_subranges();
|
||||
const auto subranges_count = ::size32(subranges);
|
||||
const auto allocation_size = subranges_count * 3;
|
||||
|
||||
m_multidraw_parameters_buffer.resize(allocation_size);
|
||||
auto _ptr = m_multidraw_parameters_buffer.data();
|
||||
u32 vertex_offset = 0;
|
||||
|
||||
for (const auto& range : subranges)
|
||||
{
|
||||
const auto count = get_index_count(draw_call.primitive, range.count);
|
||||
*_ptr++ = 0;
|
||||
*_ptr++ = vertex_offset;
|
||||
*_ptr++ = count;
|
||||
vertex_offset += count;
|
||||
}
|
||||
_vkCmdDrawMultiIndexedEXT(*m_current_command_buffer, subranges_count, reinterpret_cast<const VkMultiDrawIndexedInfoEXT*>(_ptr), 1, 0, sizeof(u32) * 3, nullptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
u32 vertex_offset = 0;
|
||||
|
@ -1107,9 +1132,6 @@ void VKGSRender::end()
|
|||
m_texture_cache.release_uncached_temporary_subresources();
|
||||
m_frame_stats.textures_upload_time += m_profiler.duration();
|
||||
|
||||
// Final heap check...
|
||||
check_heap_status(VK_HEAP_CHECK_VERTEX_STORAGE | VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE);
|
||||
|
||||
u32 sub_index = 0; // RSX subdraw ID
|
||||
m_current_draw.subdraw_id = 0; // Host subdraw ID. Invalid RSX subdraws do not increment this value
|
||||
|
||||
|
@ -1134,7 +1156,7 @@ void VKGSRender::end()
|
|||
|
||||
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
|
||||
{
|
||||
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
|
||||
_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
|
||||
m_current_command_buffer->flags &= ~(vk::command_buffer::cb_has_conditional_render);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "../Program/SPIRVCommon.h"
|
||||
|
||||
#include "util/asm.hpp"
|
||||
#include <vulkan/vulkan_core.h>
|
||||
|
||||
namespace vk
|
||||
{
|
||||
|
@ -471,6 +472,7 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
|
|||
|
||||
m_device = const_cast<vk::render_device*>(&m_swapchain->get_device());
|
||||
vk::set_current_renderer(m_swapchain->get_device());
|
||||
vk::init();
|
||||
|
||||
m_swapchain_dims.width = m_frame->client_width();
|
||||
m_swapchain_dims.height = m_frame->client_height();
|
||||
|
@ -491,7 +493,8 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
|
|||
m_secondary_cb_list.create(m_secondary_command_buffer_pool, vk::command_buffer::access_type_hint::all);
|
||||
|
||||
//Precalculated stuff
|
||||
std::tie(m_pipeline_layout, m_descriptor_layouts) = vk::get_common_pipeline_layout(*m_device);
|
||||
rsx::simple_array<VkDescriptorSetLayoutBinding> binding_layout;
|
||||
std::tie(m_pipeline_layout, m_descriptor_layouts, binding_layout) = vk::get_common_pipeline_layout(*m_device);
|
||||
|
||||
//Occlusion
|
||||
m_occlusion_query_manager = std::make_unique<vk::query_pool_manager>(*m_device, VK_QUERY_TYPE_OCCLUSION, OCCLUSION_MAX_POOL_SIZE);
|
||||
|
@ -507,18 +510,7 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
|
|||
|
||||
// Generate frame contexts
|
||||
const u32 max_draw_calls = m_device->get_descriptor_max_draw_calls();
|
||||
const auto& binding_table = m_device->get_pipeline_binding_table();
|
||||
const u32 num_fs_samplers = binding_table.vertex_textures_first_bind_slot - binding_table.textures_first_bind_slot;
|
||||
|
||||
rsx::simple_array<VkDescriptorPoolSize> descriptor_type_sizes =
|
||||
{
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 },
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 },
|
||||
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , (num_fs_samplers + 4) },
|
||||
|
||||
// Conditional rendering predicate slot; refactor to allow skipping this when not needed
|
||||
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 }
|
||||
};
|
||||
const auto descriptor_type_sizes = vk::get_descriptor_pool_sizes(binding_layout);
|
||||
m_descriptor_pool.create(*m_device, descriptor_type_sizes, max_draw_calls);
|
||||
|
||||
VkSemaphoreCreateInfo semaphore_info = {};
|
||||
|
@ -531,18 +523,39 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
|
|||
m_fragment_texture_params_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "fragment texture params buffer");
|
||||
m_vertex_layout_ring_info.create(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "vertex layout buffer", 0x10000, VK_TRUE);
|
||||
m_fragment_constants_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "fragment constants buffer");
|
||||
m_transform_constants_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_TRANSFORM_CONSTANTS_BUFFER_SIZE_M * 0x100000, "transform constants buffer");
|
||||
m_transform_constants_ring_info.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VK_TRANSFORM_CONSTANTS_BUFFER_SIZE_M * 0x100000, "transform constants buffer");
|
||||
m_index_buffer_ring_info.create(VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_INDEX_RING_BUFFER_SIZE_M * 0x100000, "index buffer");
|
||||
m_texture_upload_buffer_ring_info.create(VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_TEXTURE_UPLOAD_RING_BUFFER_SIZE_M * 0x100000, "texture upload buffer", 32 * 0x100000);
|
||||
m_raster_env_ring_info.create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_UBO_RING_BUFFER_SIZE_M * 0x100000, "raster env buffer");
|
||||
m_instancing_buffer_ring_info.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VK_TRANSFORM_CONSTANTS_BUFFER_SIZE_M * 0x100000, "instancing data buffer");
|
||||
|
||||
vk::data_heap_manager::register_ring_buffers
|
||||
({
|
||||
std::ref(m_attrib_ring_info),
|
||||
std::ref(m_fragment_env_ring_info),
|
||||
std::ref(m_vertex_env_ring_info),
|
||||
std::ref(m_fragment_texture_params_ring_info),
|
||||
std::ref(m_vertex_layout_ring_info),
|
||||
std::ref(m_fragment_constants_ring_info),
|
||||
std::ref(m_transform_constants_ring_info),
|
||||
std::ref(m_index_buffer_ring_info),
|
||||
std::ref(m_texture_upload_buffer_ring_info),
|
||||
std::ref(m_raster_env_ring_info),
|
||||
std::ref(m_instancing_buffer_ring_info)
|
||||
});
|
||||
|
||||
const auto shadermode = g_cfg.video.shadermode.get();
|
||||
|
||||
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
|
||||
{
|
||||
m_vertex_instructions_buffer.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 64 * 0x100000, "vertex instructions buffer", 512 * 16);
|
||||
m_fragment_instructions_buffer.create(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 64 * 0x100000, "fragment instructions buffer", 2048);
|
||||
|
||||
vk::data_heap_manager::register_ring_buffers
|
||||
({
|
||||
std::ref(m_vertex_instructions_buffer),
|
||||
std::ref(m_fragment_instructions_buffer)
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize optional allocation information with placeholders
|
||||
|
@ -813,19 +826,7 @@ VKGSRender::~VKGSRender()
|
|||
m_upscaler.reset();
|
||||
|
||||
// Heaps
|
||||
m_attrib_ring_info.destroy();
|
||||
m_fragment_env_ring_info.destroy();
|
||||
m_vertex_env_ring_info.destroy();
|
||||
m_fragment_texture_params_ring_info.destroy();
|
||||
m_vertex_layout_ring_info.destroy();
|
||||
m_fragment_constants_ring_info.destroy();
|
||||
m_transform_constants_ring_info.destroy();
|
||||
m_index_buffer_ring_info.destroy();
|
||||
m_texture_upload_buffer_ring_info.destroy();
|
||||
m_vertex_instructions_buffer.destroy();
|
||||
m_fragment_instructions_buffer.destroy();
|
||||
m_raster_env_ring_info.destroy();
|
||||
m_instancing_buffer_ring_info.destroy();
|
||||
vk::data_heap_manager::reset();
|
||||
|
||||
// Fallback bindables
|
||||
null_buffer.reset();
|
||||
|
@ -835,19 +836,14 @@ VKGSRender::~VKGSRender()
|
|||
{
|
||||
// Return resources back to the owner
|
||||
m_current_frame = &frame_context_storage[m_current_queue_index];
|
||||
m_current_frame->swap_storage(m_aux_frame_context);
|
||||
m_current_frame->grab_resources(m_aux_frame_context);
|
||||
}
|
||||
|
||||
m_aux_frame_context.buffer_views_to_clean.clear();
|
||||
|
||||
// NOTE: aux_context uses descriptor pools borrowed from the main queues and any allocations will be automatically freed when pool is destroyed
|
||||
for (auto &ctx : frame_context_storage)
|
||||
{
|
||||
vkDestroySemaphore((*m_device), ctx.present_wait_semaphore, nullptr);
|
||||
vkDestroySemaphore((*m_device), ctx.acquire_signal_semaphore, nullptr);
|
||||
|
||||
ctx.buffer_views_to_clean.clear();
|
||||
}
|
||||
|
||||
// Textures
|
||||
|
@ -1147,114 +1143,6 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
|||
}
|
||||
}
|
||||
|
||||
void VKGSRender::check_heap_status(u32 flags)
|
||||
{
|
||||
ensure(flags);
|
||||
|
||||
bool heap_critical;
|
||||
if (flags == VK_HEAP_CHECK_ALL)
|
||||
{
|
||||
heap_critical = m_attrib_ring_info.is_critical() ||
|
||||
m_texture_upload_buffer_ring_info.is_critical() ||
|
||||
m_fragment_env_ring_info.is_critical() ||
|
||||
m_vertex_env_ring_info.is_critical() ||
|
||||
m_fragment_texture_params_ring_info.is_critical() ||
|
||||
m_vertex_layout_ring_info.is_critical() ||
|
||||
m_fragment_constants_ring_info.is_critical() ||
|
||||
m_transform_constants_ring_info.is_critical() ||
|
||||
m_index_buffer_ring_info.is_critical() ||
|
||||
m_raster_env_ring_info.is_critical() ||
|
||||
m_instancing_buffer_ring_info.is_critical();
|
||||
}
|
||||
else
|
||||
{
|
||||
heap_critical = false;
|
||||
u32 test = 1u << std::countr_zero(flags);
|
||||
|
||||
do
|
||||
{
|
||||
switch (flags & test)
|
||||
{
|
||||
case 0:
|
||||
break;
|
||||
case VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE:
|
||||
heap_critical = m_texture_upload_buffer_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_VERTEX_STORAGE:
|
||||
heap_critical = m_attrib_ring_info.is_critical() || m_index_buffer_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_VERTEX_ENV_STORAGE:
|
||||
heap_critical = m_vertex_env_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE:
|
||||
heap_critical = m_fragment_env_ring_info.is_critical() || m_raster_env_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_TEXTURE_ENV_STORAGE:
|
||||
heap_critical = m_fragment_texture_params_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE:
|
||||
heap_critical = m_vertex_layout_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE:
|
||||
heap_critical = (current_vertex_program.ctrl & RSX_SHADER_CONTROL_INSTANCED_CONSTANTS)
|
||||
? m_instancing_buffer_ring_info.is_critical()
|
||||
: m_transform_constants_ring_info.is_critical();
|
||||
break;
|
||||
case VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE:
|
||||
heap_critical = m_fragment_constants_ring_info.is_critical();
|
||||
break;
|
||||
default:
|
||||
fmt::throw_exception("Unexpected heap flag set! (0x%X)", test);
|
||||
}
|
||||
|
||||
flags &= ~test;
|
||||
test <<= 1;
|
||||
}
|
||||
while (flags && !heap_critical);
|
||||
}
|
||||
|
||||
if (heap_critical)
|
||||
{
|
||||
m_profiler.start();
|
||||
|
||||
vk::frame_context_t *target_frame = nullptr;
|
||||
if (!m_queued_frames.empty())
|
||||
{
|
||||
if (m_current_frame != &m_aux_frame_context)
|
||||
{
|
||||
target_frame = m_queued_frames.front();
|
||||
}
|
||||
}
|
||||
|
||||
if (target_frame == nullptr)
|
||||
{
|
||||
flush_command_queue(true);
|
||||
m_vertex_cache->purge();
|
||||
|
||||
m_index_buffer_ring_info.reset_allocation_stats();
|
||||
m_fragment_env_ring_info.reset_allocation_stats();
|
||||
m_vertex_env_ring_info.reset_allocation_stats();
|
||||
m_fragment_texture_params_ring_info.reset_allocation_stats();
|
||||
m_vertex_layout_ring_info.reset_allocation_stats();
|
||||
m_fragment_constants_ring_info.reset_allocation_stats();
|
||||
m_transform_constants_ring_info.reset_allocation_stats();
|
||||
m_attrib_ring_info.reset_allocation_stats();
|
||||
m_texture_upload_buffer_ring_info.reset_allocation_stats();
|
||||
m_raster_env_ring_info.reset_allocation_stats();
|
||||
m_instancing_buffer_ring_info.reset_allocation_stats();
|
||||
m_current_frame->reset_heap_ptrs();
|
||||
m_last_heap_sync_time = rsx::get_shared_tag();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Flush the frame context
|
||||
frame_context_cleanup(target_frame);
|
||||
}
|
||||
|
||||
m_frame_stats.flip_time += m_profiler.duration();
|
||||
}
|
||||
}
|
||||
|
||||
void VKGSRender::check_present_status()
|
||||
{
|
||||
while (!m_queued_frames.empty())
|
||||
|
@ -2058,10 +1946,8 @@ void VKGSRender::load_program_env()
|
|||
|
||||
if (update_vertex_env)
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_VERTEX_ENV_STORAGE);
|
||||
|
||||
// Vertex state
|
||||
const auto mem = m_vertex_env_ring_info.alloc<256>(256);
|
||||
const auto mem = m_vertex_env_ring_info.static_alloc<256>();
|
||||
auto buf = static_cast<u8*>(m_vertex_env_ring_info.map(mem, 148));
|
||||
|
||||
m_draw_processor.fill_scale_offset_data(buf, false);
|
||||
|
@ -2107,7 +1993,7 @@ void VKGSRender::load_program_env()
|
|||
usz mem_offset = 0;
|
||||
auto alloc_storage = [&](usz size) -> std::pair<void*, usz>
|
||||
{
|
||||
const auto alignment = m_device->gpu().get_limits().minUniformBufferOffsetAlignment;
|
||||
const auto alignment = m_device->gpu().get_limits().minStorageBufferOffsetAlignment;
|
||||
mem_offset = m_transform_constants_ring_info.alloc<1>(utils::align(size, alignment));
|
||||
return std::make_pair(m_transform_constants_ring_info.map(mem_offset, size), size);
|
||||
};
|
||||
|
@ -2118,14 +2004,13 @@ void VKGSRender::load_program_env()
|
|||
if (!io_buf.empty())
|
||||
{
|
||||
m_transform_constants_ring_info.unmap();
|
||||
m_vertex_constants_buffer_info = { m_transform_constants_ring_info.heap->value, mem_offset, io_buf.size() };
|
||||
m_vertex_constants_buffer_info = { m_transform_constants_ring_info.heap->value, 0, VK_WHOLE_SIZE };
|
||||
m_xform_constants_dynamic_offset = mem_offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (update_fragment_constants && !m_shader_interpreter.is_interpreter(m_program))
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE);
|
||||
|
||||
// Fragment constants
|
||||
if (fragment_constants_size)
|
||||
{
|
||||
|
@ -2146,9 +2031,7 @@ void VKGSRender::load_program_env()
|
|||
|
||||
if (update_fragment_env)
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE);
|
||||
|
||||
auto mem = m_fragment_env_ring_info.alloc<256>(256);
|
||||
auto mem = m_fragment_env_ring_info.static_alloc<256>();
|
||||
auto buf = m_fragment_env_ring_info.map(mem, 32);
|
||||
|
||||
m_draw_processor.fill_fragment_state_buffer(buf, current_fragment_program);
|
||||
|
@ -2158,9 +2041,7 @@ void VKGSRender::load_program_env()
|
|||
|
||||
if (update_fragment_texture_env)
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_TEXTURE_ENV_STORAGE);
|
||||
|
||||
auto mem = m_fragment_texture_params_ring_info.alloc<256>(768);
|
||||
auto mem = m_fragment_texture_params_ring_info.static_alloc<256, 768>();
|
||||
auto buf = m_fragment_texture_params_ring_info.map(mem, 768);
|
||||
|
||||
current_fragment_program.texture_params.write_to(buf, current_fp_metadata.referenced_textures_mask);
|
||||
|
@ -2170,9 +2051,7 @@ void VKGSRender::load_program_env()
|
|||
|
||||
if (update_raster_env)
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE);
|
||||
|
||||
auto mem = m_raster_env_ring_info.alloc<256>(256);
|
||||
auto mem = m_raster_env_ring_info.static_alloc<256>();
|
||||
auto buf = m_raster_env_ring_info.map(mem, 128);
|
||||
|
||||
std::memcpy(buf, rsx::method_registers.polygon_stipple_pattern(), 128);
|
||||
|
@ -2225,7 +2104,7 @@ void VKGSRender::load_program_env()
|
|||
const auto& binding_table = m_device->get_pipeline_binding_table();
|
||||
|
||||
m_program->bind_uniform(m_vertex_env_buffer_info, binding_table.vertex_params_bind_slot, m_current_frame->descriptor_set);
|
||||
m_program->bind_uniform(m_vertex_constants_buffer_info, binding_table.vertex_constant_buffers_bind_slot, m_current_frame->descriptor_set);
|
||||
m_program->bind_buffer(m_vertex_constants_buffer_info, binding_table.vertex_constant_buffers_bind_slot, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_current_frame->descriptor_set);
|
||||
m_program->bind_uniform(m_fragment_env_buffer_info, binding_table.fragment_state_bind_slot, m_current_frame->descriptor_set);
|
||||
m_program->bind_uniform(m_fragment_texture_params_buffer_info, binding_table.fragment_texture_params_bind_slot, m_current_frame->descriptor_set);
|
||||
m_program->bind_uniform(m_raster_env_buffer_info, binding_table.rasterizer_env_bind_slot, m_current_frame->descriptor_set);
|
||||
|
@ -2288,8 +2167,6 @@ void VKGSRender::upload_transform_constants(const rsx::io_buffer& buffer)
|
|||
|
||||
if (transform_constants_size)
|
||||
{
|
||||
check_heap_status(VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE);
|
||||
|
||||
buffer.reserve(transform_constants_size);
|
||||
auto buf = buffer.data();
|
||||
|
||||
|
@ -2310,9 +2187,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
|
|||
if (!m_vertex_layout_storage || !m_vertex_layout_storage->in_range(offset32, range32, base_offset))
|
||||
{
|
||||
ensure(m_texbuffer_view_size >= m_vertex_layout_stream_info.range);
|
||||
|
||||
if (m_vertex_layout_storage)
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage));
|
||||
vk::get_resource_manager()->dispose(m_vertex_layout_storage);
|
||||
|
||||
const usz alloc_addr = m_vertex_layout_stream_info.offset;
|
||||
const usz view_size = (alloc_addr + m_texbuffer_view_size) > m_vertex_layout_ring_info.size() ? m_vertex_layout_ring_info.size() - alloc_addr : m_texbuffer_view_size;
|
||||
|
@ -2320,21 +2195,31 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
|
|||
base_offset = 0;
|
||||
}
|
||||
|
||||
u8 data_size = 16;
|
||||
u32 draw_info[5];
|
||||
const u32 vertex_layout_offset = (id * 16) + (base_offset / 8);
|
||||
const volatile u32 constant_id_offset = static_cast<volatile u32>(m_xform_constants_dynamic_offset) / 16u;
|
||||
|
||||
draw_info[0] = vertex_info.vertex_index_base;
|
||||
draw_info[1] = vertex_info.vertex_index_offset;
|
||||
draw_info[2] = id;
|
||||
draw_info[3] = (id * 16) + (base_offset / 8);
|
||||
u32 push_constants[6];
|
||||
u32 data_length = 20;
|
||||
|
||||
push_constants[0] = vertex_info.vertex_index_base;
|
||||
push_constants[1] = vertex_info.vertex_index_offset;
|
||||
push_constants[2] = id;
|
||||
push_constants[3] = vertex_layout_offset;
|
||||
push_constants[4] = constant_id_offset;
|
||||
|
||||
if (vk::emulate_conditional_rendering())
|
||||
{
|
||||
draw_info[4] = cond_render_ctrl.hw_cond_active ? 1 : 0;
|
||||
data_size = 20;
|
||||
push_constants[5] = cond_render_ctrl.hw_cond_active ? 1 : 0;
|
||||
data_length += 4;
|
||||
}
|
||||
|
||||
vkCmdPushConstants(*m_current_command_buffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info);
|
||||
vkCmdPushConstants(
|
||||
*m_current_command_buffer,
|
||||
m_pipeline_layout,
|
||||
VK_SHADER_STAGE_VERTEX_BIT,
|
||||
0,
|
||||
data_length,
|
||||
push_constants);
|
||||
|
||||
const usz data_offset = (id * 128) + m_vertex_layout_stream_info.offset;
|
||||
auto dst = m_vertex_layout_ring_info.map(data_offset, 128);
|
||||
|
@ -2351,7 +2236,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
|
|||
m_vertex_layout_ring_info.unmap();
|
||||
}
|
||||
|
||||
void VKGSRender::patch_transform_constants(rsx::context* ctx, u32 index, u32 count)
|
||||
void VKGSRender::patch_transform_constants(rsx::context* /*ctx*/, u32 index, u32 count)
|
||||
{
|
||||
if (!m_program || !m_vertex_prog)
|
||||
{
|
||||
|
@ -2366,83 +2251,16 @@ void VKGSRender::patch_transform_constants(rsx::context* ctx, u32 index, u32 cou
|
|||
return;
|
||||
}
|
||||
|
||||
// Hot-patching transform constants mid-draw (instanced draw)
|
||||
std::pair<VkDeviceSize, VkDeviceSize> data_range;
|
||||
void* data_source = nullptr;
|
||||
|
||||
if (m_vertex_prog->has_indexed_constants)
|
||||
// Buffer updates mid-pass violate the spec and destroy performance on NVIDIA
|
||||
auto allocate_mem = [&](usz size) -> std::pair<void*, usz>
|
||||
{
|
||||
// We're working with a full range. We can do a direct patch in this case since no index translation is required.
|
||||
const auto byte_count = count * 16;
|
||||
const auto byte_offset = index * 16;
|
||||
|
||||
data_range = { m_vertex_constants_buffer_info.offset + byte_offset, byte_count };
|
||||
data_source = ®S(ctx)->transform_constants[index];
|
||||
}
|
||||
else if (auto xform_id = m_vertex_prog->translate_constants_range(index, count); xform_id >= 0)
|
||||
{
|
||||
const auto write_offset = xform_id * 16;
|
||||
const auto byte_count = count * 16;
|
||||
|
||||
data_range = { m_vertex_constants_buffer_info.offset + write_offset, byte_count };
|
||||
data_source = ®S(ctx)->transform_constants[index];
|
||||
}
|
||||
else
|
||||
{
|
||||
// Indexed. This is a bit trickier. Use scratchpad to avoid UAF
|
||||
auto allocate_mem = [&](usz size) -> std::pair<void*, usz>
|
||||
{
|
||||
m_scratch_mem.resize(size);
|
||||
return { m_scratch_mem.data(), size };
|
||||
};
|
||||
|
||||
rsx::io_buffer iobuf(allocate_mem);
|
||||
upload_transform_constants(iobuf);
|
||||
|
||||
ensure(iobuf.size() >= m_vertex_constants_buffer_info.range);
|
||||
data_range = { m_vertex_constants_buffer_info.offset, m_vertex_constants_buffer_info.range };
|
||||
data_source = iobuf.data();
|
||||
}
|
||||
|
||||
// Preserving an active renderpass across a transfer operation is illegal vulkan. However, splitting up the CB into thousands of renderpasses incurs an overhead.
|
||||
// We cheat here for specific cases where we already know the driver can let us get away with this.
|
||||
static const std::set<vk::driver_vendor> s_allowed_vendors =
|
||||
{
|
||||
vk::driver_vendor::AMD,
|
||||
vk::driver_vendor::RADV,
|
||||
vk::driver_vendor::LAVAPIPE,
|
||||
vk::driver_vendor::NVIDIA,
|
||||
vk::driver_vendor::NVK
|
||||
const usz alignment = m_device->gpu().get_limits().minStorageBufferOffsetAlignment;
|
||||
m_xform_constants_dynamic_offset = m_transform_constants_ring_info.alloc<1>(utils::align(size, alignment));
|
||||
return std::make_pair(m_transform_constants_ring_info.map(m_xform_constants_dynamic_offset, size), size);
|
||||
};
|
||||
|
||||
const auto driver_vendor = vk::get_driver_vendor();
|
||||
const bool preserve_renderpass = !g_cfg.video.strict_rendering_mode && s_allowed_vendors.contains(driver_vendor);
|
||||
|
||||
vk::insert_buffer_memory_barrier(
|
||||
*m_current_command_buffer,
|
||||
m_vertex_constants_buffer_info.buffer,
|
||||
data_range.first,
|
||||
data_range.second,
|
||||
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_ACCESS_UNIFORM_READ_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
preserve_renderpass);
|
||||
|
||||
// FIXME: This is illegal during a renderpass
|
||||
vkCmdUpdateBuffer(
|
||||
*m_current_command_buffer,
|
||||
m_vertex_constants_buffer_info.buffer,
|
||||
data_range.first,
|
||||
data_range.second,
|
||||
data_source);
|
||||
|
||||
vk::insert_buffer_memory_barrier(
|
||||
*m_current_command_buffer,
|
||||
m_vertex_constants_buffer_info.buffer,
|
||||
data_range.first,
|
||||
data_range.second,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_UNIFORM_READ_BIT,
|
||||
preserve_renderpass);
|
||||
rsx::io_buffer iobuf(allocate_mem);
|
||||
upload_transform_constants(iobuf);
|
||||
}
|
||||
|
||||
void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool)
|
||||
|
@ -2504,7 +2322,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
|
|||
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
|
||||
{
|
||||
ensure(m_render_pass_open);
|
||||
m_device->_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
|
||||
_vkCmdEndConditionalRenderingEXT(*m_current_command_buffer);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2832,9 +2650,6 @@ bool VKGSRender::scaled_image_from_memory(const rsx::blit_src_info& src, const r
|
|||
if (swapchain_unavailable)
|
||||
return false;
|
||||
|
||||
// Verify enough memory exists before attempting to handle data transfer
|
||||
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
|
||||
|
||||
if (m_texture_cache.blit(src, dst, interpolate, m_rtts, *m_current_command_buffer))
|
||||
{
|
||||
m_samplers_dirty.store(true);
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
#include "Emu/RSX/GSRender.h"
|
||||
#include "Emu/RSX/Host/RSXDMAWriter.h"
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
|
||||
using namespace vk::vmm_allocation_pool_; // clang workaround.
|
||||
using namespace vk::upscaling_flags_; // ditto
|
||||
|
@ -32,21 +34,6 @@ namespace vk
|
|||
class VKGSRender : public GSRender, public ::rsx::reports::ZCULL_control
|
||||
{
|
||||
private:
|
||||
enum
|
||||
{
|
||||
VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE = 0x1,
|
||||
VK_HEAP_CHECK_VERTEX_STORAGE = 0x2,
|
||||
VK_HEAP_CHECK_VERTEX_ENV_STORAGE = 0x4,
|
||||
VK_HEAP_CHECK_FRAGMENT_ENV_STORAGE = 0x8,
|
||||
VK_HEAP_CHECK_TEXTURE_ENV_STORAGE = 0x10,
|
||||
VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE = 0x20,
|
||||
VK_HEAP_CHECK_TRANSFORM_CONSTANTS_STORAGE = 0x40,
|
||||
VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE = 0x80,
|
||||
|
||||
VK_HEAP_CHECK_MAX_ENUM = VK_HEAP_CHECK_FRAGMENT_CONSTANTS_STORAGE,
|
||||
VK_HEAP_CHECK_ALL = 0xFF,
|
||||
};
|
||||
|
||||
enum frame_context_state : u32
|
||||
{
|
||||
dirty = 1
|
||||
|
@ -160,6 +147,9 @@ private:
|
|||
VkDescriptorBufferInfo m_vertex_instructions_buffer_info {};
|
||||
VkDescriptorBufferInfo m_fragment_instructions_buffer_info {};
|
||||
|
||||
rsx::simple_array<u32> m_multidraw_parameters_buffer;
|
||||
u64 m_xform_constants_dynamic_offset = 0; // We manage transform_constants dynamic offset manually to alleviate performance penalty of doing a hot-patch of constants.
|
||||
|
||||
std::array<vk::frame_context_t, VK_MAX_ASYNC_FRAMES> frame_context_storage;
|
||||
//Temp frame context to use if the real frame queue is overburdened. Only used for storage
|
||||
vk::frame_context_t m_aux_frame_context;
|
||||
|
@ -228,8 +218,6 @@ private:
|
|||
VkRenderPass get_render_pass();
|
||||
|
||||
void update_draw_state();
|
||||
|
||||
void check_heap_status(u32 flags = VK_HEAP_CHECK_ALL);
|
||||
void check_present_status();
|
||||
|
||||
VkDescriptorSet allocate_descriptor_set();
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include "vkutils/commands.h"
|
||||
#include "vkutils/descriptors.h"
|
||||
#include "VKDataHeapManager.h"
|
||||
#include "VKResourceManager.h"
|
||||
|
||||
#include "Emu/RSX/Common/simple_array.hpp"
|
||||
|
@ -181,24 +182,10 @@ namespace vk
|
|||
|
||||
rsx::flags32_t flags = 0;
|
||||
|
||||
std::vector<std::unique_ptr<vk::buffer_view>> buffer_views_to_clean;
|
||||
|
||||
u32 present_image = -1;
|
||||
command_buffer_chunk* swap_command_buffer = nullptr;
|
||||
|
||||
// Heap pointers
|
||||
s64 attrib_heap_ptr = 0;
|
||||
s64 vtx_env_heap_ptr = 0;
|
||||
s64 frag_env_heap_ptr = 0;
|
||||
s64 frag_const_heap_ptr = 0;
|
||||
s64 vtx_const_heap_ptr = 0;
|
||||
s64 vtx_layout_heap_ptr = 0;
|
||||
s64 frag_texparam_heap_ptr = 0;
|
||||
s64 index_heap_ptr = 0;
|
||||
s64 texture_upload_heap_ptr = 0;
|
||||
s64 rasterizer_env_heap_ptr = 0;
|
||||
s64 instancing_heap_ptr = 0;
|
||||
|
||||
data_heap_manager::managed_heap_snapshot_t heap_snapshot;
|
||||
u64 last_frame_sync_time = 0;
|
||||
|
||||
// Copy shareable information
|
||||
|
@ -208,49 +195,19 @@ namespace vk
|
|||
acquire_signal_semaphore = other.acquire_signal_semaphore;
|
||||
descriptor_set.swap(other.descriptor_set);
|
||||
flags = other.flags;
|
||||
|
||||
attrib_heap_ptr = other.attrib_heap_ptr;
|
||||
vtx_env_heap_ptr = other.vtx_env_heap_ptr;
|
||||
frag_env_heap_ptr = other.frag_env_heap_ptr;
|
||||
vtx_layout_heap_ptr = other.vtx_layout_heap_ptr;
|
||||
frag_texparam_heap_ptr = other.frag_texparam_heap_ptr;
|
||||
frag_const_heap_ptr = other.frag_const_heap_ptr;
|
||||
vtx_const_heap_ptr = other.vtx_const_heap_ptr;
|
||||
index_heap_ptr = other.index_heap_ptr;
|
||||
texture_upload_heap_ptr = other.texture_upload_heap_ptr;
|
||||
rasterizer_env_heap_ptr = other.rasterizer_env_heap_ptr;
|
||||
instancing_heap_ptr = other.instancing_heap_ptr;
|
||||
heap_snapshot = other.heap_snapshot;
|
||||
}
|
||||
|
||||
// Exchange storage (non-copyable)
|
||||
void swap_storage(frame_context_t& other)
|
||||
void tag_frame_end()
|
||||
{
|
||||
std::swap(buffer_views_to_clean, other.buffer_views_to_clean);
|
||||
}
|
||||
|
||||
void tag_frame_end(
|
||||
s64 attrib_loc, s64 vtxenv_loc, s64 fragenv_loc, s64 vtxlayout_loc,
|
||||
s64 fragtex_loc, s64 fragconst_loc, s64 vtxconst_loc, s64 index_loc,
|
||||
s64 texture_loc, s64 rasterizer_loc, s64 instancing_loc)
|
||||
{
|
||||
attrib_heap_ptr = attrib_loc;
|
||||
vtx_env_heap_ptr = vtxenv_loc;
|
||||
frag_env_heap_ptr = fragenv_loc;
|
||||
vtx_layout_heap_ptr = vtxlayout_loc;
|
||||
frag_texparam_heap_ptr = fragtex_loc;
|
||||
frag_const_heap_ptr = fragconst_loc;
|
||||
vtx_const_heap_ptr = vtxconst_loc;
|
||||
index_heap_ptr = index_loc;
|
||||
texture_upload_heap_ptr = texture_loc;
|
||||
rasterizer_env_heap_ptr = rasterizer_loc;
|
||||
instancing_heap_ptr = instancing_loc;
|
||||
|
||||
heap_snapshot = data_heap_manager::get_heap_snapshot();
|
||||
last_frame_sync_time = rsx::get_shared_tag();
|
||||
}
|
||||
|
||||
void reset_heap_ptrs()
|
||||
{
|
||||
last_frame_sync_time = 0;
|
||||
heap_snapshot.clear();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -155,17 +155,7 @@ void VKGSRender::advance_queued_frames()
|
|||
vk::remove_unused_framebuffers();
|
||||
|
||||
m_vertex_cache->purge();
|
||||
m_current_frame->tag_frame_end(m_attrib_ring_info.get_current_put_pos_minus_one(),
|
||||
m_vertex_env_ring_info.get_current_put_pos_minus_one(),
|
||||
m_fragment_env_ring_info.get_current_put_pos_minus_one(),
|
||||
m_vertex_layout_ring_info.get_current_put_pos_minus_one(),
|
||||
m_fragment_texture_params_ring_info.get_current_put_pos_minus_one(),
|
||||
m_fragment_constants_ring_info.get_current_put_pos_minus_one(),
|
||||
m_transform_constants_ring_info.get_current_put_pos_minus_one(),
|
||||
m_index_buffer_ring_info.get_current_put_pos_minus_one(),
|
||||
m_texture_upload_buffer_ring_info.get_current_put_pos_minus_one(),
|
||||
m_raster_env_ring_info.get_current_put_pos_minus_one(),
|
||||
m_instancing_buffer_ring_info.get_current_put_pos_minus_one());
|
||||
m_current_frame->tag_frame_end();
|
||||
|
||||
m_queued_frames.push_back(m_current_frame);
|
||||
ensure(m_queued_frames.size() <= VK_MAX_ASYNC_FRAMES);
|
||||
|
@ -219,7 +209,6 @@ void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx)
|
|||
}
|
||||
|
||||
// Resource cleanup.
|
||||
// TODO: This is some outdated crap.
|
||||
{
|
||||
if (m_overlay_manager && m_overlay_manager->has_dirty())
|
||||
{
|
||||
|
@ -243,45 +232,12 @@ void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx)
|
|||
|
||||
vk::reset_global_resources();
|
||||
|
||||
ctx->buffer_views_to_clean.clear();
|
||||
|
||||
const auto shadermode = g_cfg.video.shadermode.get();
|
||||
|
||||
if (shadermode == shader_mode::async_with_interpreter || shadermode == shader_mode::interpreter_only)
|
||||
{
|
||||
// TODO: This is jank AF
|
||||
m_vertex_instructions_buffer.reset_allocation_stats();
|
||||
m_fragment_instructions_buffer.reset_allocation_stats();
|
||||
}
|
||||
|
||||
if (ctx->last_frame_sync_time > m_last_heap_sync_time)
|
||||
{
|
||||
m_last_heap_sync_time = ctx->last_frame_sync_time;
|
||||
|
||||
// Heap cleanup; deallocates memory consumed by the frame if it is still held
|
||||
m_attrib_ring_info.m_get_pos = ctx->attrib_heap_ptr;
|
||||
m_vertex_env_ring_info.m_get_pos = ctx->vtx_env_heap_ptr;
|
||||
m_fragment_env_ring_info.m_get_pos = ctx->frag_env_heap_ptr;
|
||||
m_fragment_constants_ring_info.m_get_pos = ctx->frag_const_heap_ptr;
|
||||
m_transform_constants_ring_info.m_get_pos = ctx->vtx_const_heap_ptr;
|
||||
m_vertex_layout_ring_info.m_get_pos = ctx->vtx_layout_heap_ptr;
|
||||
m_fragment_texture_params_ring_info.m_get_pos = ctx->frag_texparam_heap_ptr;
|
||||
m_index_buffer_ring_info.m_get_pos = ctx->index_heap_ptr;
|
||||
m_texture_upload_buffer_ring_info.m_get_pos = ctx->texture_upload_heap_ptr;
|
||||
m_raster_env_ring_info.m_get_pos = ctx->rasterizer_env_heap_ptr;
|
||||
m_instancing_buffer_ring_info.m_get_pos = ctx->instancing_heap_ptr;
|
||||
|
||||
m_attrib_ring_info.notify();
|
||||
m_vertex_env_ring_info.notify();
|
||||
m_fragment_env_ring_info.notify();
|
||||
m_fragment_constants_ring_info.notify();
|
||||
m_transform_constants_ring_info.notify();
|
||||
m_vertex_layout_ring_info.notify();
|
||||
m_fragment_texture_params_ring_info.notify();
|
||||
m_index_buffer_ring_info.notify();
|
||||
m_texture_upload_buffer_ring_info.notify();
|
||||
m_raster_env_ring_info.notify();
|
||||
m_instancing_buffer_ring_info.notify();
|
||||
vk::data_heap_manager::restore_snapshot(ctx->heap_snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -450,7 +406,6 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
|
|||
}
|
||||
|
||||
// Swap aux storage and current frame; aux storage should always be ready for use at all times
|
||||
m_current_frame->swap_storage(m_aux_frame_context);
|
||||
m_current_frame->grab_resources(m_aux_frame_context);
|
||||
}
|
||||
else if (m_current_frame->swap_command_buffer)
|
||||
|
|
44
rpcs3/Emu/RSX/VK/VKProcTable.h
Normal file
44
rpcs3/Emu/RSX/VK/VKProcTable.h
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Wrangler for Vulkan functions.
|
||||
// TODO: Eventually, we shall declare vulkan with NO_PROTOTYPES and wrap everything here for android multi-driver support.
|
||||
// For now, we just use it for extensions since we're on VK_1_0
|
||||
|
||||
#define VK_DECL_EXTERN(func) extern PFN_##func _##func
|
||||
#define VK_DECL_LOCAL(func) PFN_##func _##func
|
||||
|
||||
#if defined(DECLARE_VK_FUNCTION_HEADER)
|
||||
#define VK_FUNC VK_DECL_EXTERN
|
||||
#elif defined(DECLARE_VK_FUNCTION_BODY)
|
||||
#define VK_FUNC VK_DECL_LOCAL
|
||||
#elif !defined(VK_FUNC)
|
||||
#error "VK_FUNC is not defined"
|
||||
#endif
|
||||
|
||||
// EXT_conditional_rendering
|
||||
VK_FUNC(vkCmdBeginConditionalRenderingEXT);
|
||||
VK_FUNC(vkCmdEndConditionalRenderingEXT);
|
||||
|
||||
// EXT_debug_utils
|
||||
VK_FUNC(vkSetDebugUtilsObjectNameEXT);
|
||||
VK_FUNC(vkQueueInsertDebugUtilsLabelEXT);
|
||||
VK_FUNC(vkCmdInsertDebugUtilsLabelEXT);
|
||||
|
||||
// KHR_synchronization2
|
||||
VK_FUNC(vkCmdSetEvent2KHR);
|
||||
VK_FUNC(vkCmdWaitEvents2KHR);
|
||||
VK_FUNC(vkCmdPipelineBarrier2KHR);
|
||||
|
||||
// EXT_device_fault
|
||||
VK_FUNC(vkGetDeviceFaultInfoEXT);
|
||||
|
||||
// EXT_multi_draw
|
||||
VK_FUNC(vkCmdDrawMultiEXT);
|
||||
VK_FUNC(vkCmdDrawMultiIndexedEXT);
|
||||
|
||||
// EXT_external_memory_host
|
||||
VK_FUNC(vkGetMemoryHostPointerPropertiesEXT);
|
||||
|
||||
#undef VK_FUNC
|
||||
#undef DECLARE_VK_FUNCTION_HEADER
|
||||
#undef DECLARE_VK_FUNCTION_BODY
|
||||
#undef VK_DECL_EXTERN
|
||||
#undef VK_DECL_LOCAL
|
|
@ -4,6 +4,8 @@
|
|||
#include "VKRenderPass.h"
|
||||
#include "vkutils/image.h"
|
||||
|
||||
#include "Emu/RSX/Common/unordered_map.hpp"
|
||||
|
||||
namespace vk
|
||||
{
|
||||
struct active_renderpass_info_t
|
||||
|
@ -14,10 +16,10 @@ namespace vk
|
|||
|
||||
atomic_t<u64> g_cached_renderpass_key = 0;
|
||||
VkRenderPass g_cached_renderpass = VK_NULL_HANDLE;
|
||||
std::unordered_map<VkCommandBuffer, active_renderpass_info_t> g_current_renderpass;
|
||||
rsx::unordered_map<VkCommandBuffer, active_renderpass_info_t> g_current_renderpass;
|
||||
|
||||
shared_mutex g_renderpass_cache_mutex;
|
||||
std::unordered_map<u64, VkRenderPass> g_renderpass_cache;
|
||||
rsx::unordered_map<u64, VkRenderPass> g_renderpass_cache;
|
||||
|
||||
// Key structure
|
||||
// 0-7 color_format
|
||||
|
|
|
@ -277,7 +277,7 @@ namespace vk
|
|||
|
||||
static_parameters_width = 3;
|
||||
|
||||
build(false, true, false);
|
||||
build(false, true, true);
|
||||
}
|
||||
|
||||
void get_dynamic_state_entries(std::vector<VkDynamicState>& state_descriptors) override
|
||||
|
|
|
@ -330,21 +330,7 @@ namespace vk
|
|||
idx++;
|
||||
bindings.resize(idx);
|
||||
|
||||
// Compile descriptor pool sizes
|
||||
const u32 num_ubo = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ? y.descriptorCount : 0)));
|
||||
const u32 num_texel_buffers = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ? y.descriptorCount : 0)));
|
||||
const u32 num_combined_image_sampler = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ? y.descriptorCount : 0)));
|
||||
const u32 num_ssbo = bindings.reduce(0, FN(x + (y.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ? y.descriptorCount : 0)));
|
||||
|
||||
ensure(num_ubo > 0 && num_texel_buffers > 0 && num_combined_image_sampler > 0 && num_ssbo > 0);
|
||||
|
||||
m_descriptor_pool_sizes =
|
||||
{
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , num_ubo },
|
||||
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , num_texel_buffers },
|
||||
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , num_combined_image_sampler },
|
||||
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_ssbo }
|
||||
};
|
||||
m_descriptor_pool_sizes = get_descriptor_pool_sizes(bindings);
|
||||
|
||||
std::array<VkPushConstantRange, 1> push_constants;
|
||||
push_constants[0].offset = 0;
|
||||
|
|
|
@ -320,13 +320,13 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
|
|||
if (m_persistent_attribute_storage &&
|
||||
m_persistent_attribute_storage->info.buffer != m_attrib_ring_info.heap->value)
|
||||
{
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_persistent_attribute_storage));
|
||||
vk::get_resource_manager()->dispose(m_persistent_attribute_storage);
|
||||
}
|
||||
|
||||
if (m_volatile_attribute_storage &&
|
||||
m_volatile_attribute_storage->info.buffer != m_attrib_ring_info.heap->value)
|
||||
{
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_volatile_attribute_storage));
|
||||
vk::get_resource_manager()->dispose(m_volatile_attribute_storage);
|
||||
}
|
||||
|
||||
vk::clear_status_interrupt(vk::heap_changed);
|
||||
|
@ -337,9 +337,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
|
|||
if (!m_persistent_attribute_storage || !m_persistent_attribute_storage->in_range(persistent_range_base, required.first, persistent_range_base))
|
||||
{
|
||||
ensure(m_texbuffer_view_size >= required.first); // "Incompatible driver (MacOS?)"
|
||||
|
||||
if (m_persistent_attribute_storage)
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_persistent_attribute_storage));
|
||||
vk::get_resource_manager()->dispose(m_persistent_attribute_storage);
|
||||
|
||||
//View 64M blocks at a time (different drivers will only allow a fixed viewable heap size, 64M should be safe)
|
||||
const usz view_size = (persistent_range_base + m_texbuffer_view_size) > m_attrib_ring_info.size() ? m_attrib_ring_info.size() - persistent_range_base : m_texbuffer_view_size;
|
||||
|
@ -353,9 +351,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
|
|||
if (!m_volatile_attribute_storage || !m_volatile_attribute_storage->in_range(volatile_range_base, required.second, volatile_range_base))
|
||||
{
|
||||
ensure(m_texbuffer_view_size >= required.second); // "Incompatible driver (MacOS?)"
|
||||
|
||||
if (m_volatile_attribute_storage)
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_volatile_attribute_storage));
|
||||
vk::get_resource_manager()->dispose(m_volatile_attribute_storage);
|
||||
|
||||
const usz view_size = (volatile_range_base + m_texbuffer_view_size) > m_attrib_ring_info.size() ? m_attrib_ring_info.size() - volatile_range_base : m_texbuffer_view_size;
|
||||
m_volatile_attribute_storage = std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, volatile_range_base, view_size);
|
||||
|
|
|
@ -29,8 +29,9 @@ std::string VKVertexDecompilerThread::compareFunction(COMPARE f, const std::stri
|
|||
|
||||
void VKVertexDecompilerThread::insertHeader(std::stringstream &OS)
|
||||
{
|
||||
OS << "#version 450\n\n";
|
||||
OS << "#extension GL_ARB_separate_shader_objects : enable\n\n";
|
||||
OS <<
|
||||
"#version 450\n\n"
|
||||
"#extension GL_ARB_separate_shader_objects : enable\n\n";
|
||||
|
||||
OS <<
|
||||
"layout(std140, set = 0, binding = 0) uniform VertexContextBuffer\n"
|
||||
|
@ -59,7 +60,8 @@ void VKVertexDecompilerThread::insertHeader(std::stringstream &OS)
|
|||
" uint vertex_base_index;\n"
|
||||
" uint vertex_index_offset;\n"
|
||||
" uint draw_id;\n"
|
||||
" uint layout_ptr_offset;\n";
|
||||
" uint layout_ptr_offset;\n"
|
||||
" uint xform_constants_offset;\n";
|
||||
|
||||
if (m_device_props.emulate_conditional_rendering)
|
||||
{
|
||||
|
@ -115,15 +117,15 @@ void VKVertexDecompilerThread::insertConstants(std::stringstream & OS, const std
|
|||
{
|
||||
if (!(m_prog.ctrl & RSX_SHADER_CONTROL_INSTANCED_CONSTANTS))
|
||||
{
|
||||
OS << "layout(std140, set=0, binding=" << static_cast<int>(m_binding_table.vertex_constant_buffers_bind_slot) << ") uniform VertexConstantsBuffer\n";
|
||||
OS << "layout(std430, set=0, binding=" << static_cast<int>(m_binding_table.vertex_constant_buffers_bind_slot) << ") readonly buffer VertexConstantsBuffer\n";
|
||||
OS << "{\n";
|
||||
OS << " vec4 " << PI.name << ";\n";
|
||||
OS << " vec4 vc[];\n";
|
||||
OS << "};\n\n";
|
||||
|
||||
in.location = m_binding_table.vertex_constant_buffers_bind_slot;
|
||||
in.domain = glsl::glsl_vertex_program;
|
||||
in.name = "VertexConstantsBuffer";
|
||||
in.type = vk::glsl::input_type_uniform_buffer;
|
||||
in.type = vk::glsl::input_type_storage_buffer;
|
||||
|
||||
inputs.push_back(in);
|
||||
continue;
|
||||
|
|
20
rpcs3/Emu/RSX/VK/VulkanAPI.cpp
Normal file
20
rpcs3/Emu/RSX/VK/VulkanAPI.cpp
Normal file
|
@ -0,0 +1,20 @@
|
|||
#include "stdafx.h"
|
||||
#include "VulkanAPI.h"
|
||||
|
||||
#include "vkutils/device.h"
|
||||
|
||||
#define DECLARE_VK_FUNCTION_BODY
|
||||
#include "VKProcTable.h"
|
||||
|
||||
namespace vk
|
||||
{
|
||||
const render_device* get_current_renderer();
|
||||
|
||||
void init()
|
||||
{
|
||||
auto pdev = get_current_renderer();
|
||||
|
||||
#define VK_FUNC(func) _##func = reinterpret_cast<PFN_##func>(vkGetDeviceProcAddr(*pdev, #func))
|
||||
#include "VKProcTable.h"
|
||||
}
|
||||
}
|
|
@ -26,3 +26,11 @@
|
|||
#if VK_HEADER_VERSION < 287
|
||||
constexpr VkDriverId VK_DRIVER_ID_MESA_HONEYKRISP = static_cast<VkDriverId>(26);
|
||||
#endif
|
||||
|
||||
#define DECLARE_VK_FUNCTION_HEADER 1
|
||||
#include "VKProcTable.h"
|
||||
|
||||
namespace vk
|
||||
{
|
||||
void init();
|
||||
}
|
||||
|
|
|
@ -82,11 +82,11 @@ namespace vk
|
|||
CHECK_RESULT(vkCreateBuffer(m_device, &info, nullptr, &value));
|
||||
|
||||
auto& memory_map = dev.get_memory_mapping();
|
||||
ensure(memory_map._vkGetMemoryHostPointerPropertiesEXT);
|
||||
ensure(_vkGetMemoryHostPointerPropertiesEXT);
|
||||
|
||||
VkMemoryHostPointerPropertiesEXT memory_properties{};
|
||||
memory_properties.sType = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT;
|
||||
CHECK_RESULT(memory_map._vkGetMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
|
||||
CHECK_RESULT(_vkGetMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
|
||||
|
||||
VkMemoryRequirements memory_reqs;
|
||||
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);
|
||||
|
|
|
@ -42,7 +42,7 @@ namespace vk
|
|||
|
||||
queue_submit_t(const queue_submit_t& other)
|
||||
{
|
||||
std::memcpy(this, &other, sizeof(queue_submit_t));
|
||||
std::memcpy(static_cast<void*>(this), &other, sizeof(queue_submit_t));
|
||||
}
|
||||
|
||||
inline queue_submit_t& wait_on(VkSemaphore semaphore, VkPipelineStageFlags stage)
|
||||
|
|
|
@ -51,13 +51,6 @@ namespace vk
|
|||
|
||||
bool data_heap::grow(usz size)
|
||||
{
|
||||
if (shadow)
|
||||
{
|
||||
// Shadowed. Growing this can be messy as it requires double allocation (macOS only)
|
||||
rsx_log.error("[%s] Auto-grow of shadowed heaps is not currently supported. This error should typically only be seen on MacOS.", m_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create new heap. All sizes are aligned up by 64M, upto 1GiB
|
||||
const usz size_limit = 1024 * 0x100000;
|
||||
usz aligned_new_size = utils::align(m_size + size, 64 * 0x100000);
|
||||
|
@ -88,7 +81,19 @@ namespace vk
|
|||
::data_heap::init(aligned_new_size, m_name, m_min_guard_size);
|
||||
|
||||
// Discard old heap and create a new one. Old heap will be garbage collected when no longer needed
|
||||
get_resource_manager()->dispose(heap);
|
||||
auto gc = get_resource_manager();
|
||||
if (shadow)
|
||||
{
|
||||
rsx_log.warning("Buffer usage %u is not heap-compatible using this driver, explicit staging buffer in use", usage);
|
||||
|
||||
gc->dispose(shadow);
|
||||
shadow = std::make_unique<buffer>(*g_render_device, aligned_new_size, memory_index, memory_flags, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, VMM_ALLOCATION_POOL_SYSTEM);
|
||||
usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
memory_flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
memory_index = memory_map.device_local;
|
||||
}
|
||||
|
||||
gc->dispose(heap);
|
||||
heap = std::make_unique<buffer>(*g_render_device, aligned_new_size, memory_index, memory_flags, usage, 0, VMM_ALLOCATION_POOL_SYSTEM);
|
||||
|
||||
if (notify_on_grow)
|
||||
|
@ -154,20 +159,6 @@ namespace vk
|
|||
return !dirty_ranges.empty();
|
||||
}
|
||||
|
||||
bool data_heap::is_critical() const
|
||||
{
|
||||
if (!::data_heap::is_critical())
|
||||
return false;
|
||||
|
||||
// By default, allow the size to grow upto 8x larger
|
||||
// This value is arbitrary, theoretically it is possible to allow infinite stretching to improve performance
|
||||
const usz soft_limit = initial_size * 8;
|
||||
if ((m_size + m_min_guard_size) < soft_limit)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
data_heap* get_upload_heap()
|
||||
{
|
||||
if (!g_upload_heap.heap)
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "commands.h"
|
||||
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace vk
|
||||
|
@ -38,11 +39,19 @@ namespace vk
|
|||
void* map(usz offset, usz size);
|
||||
void unmap(bool force = false);
|
||||
|
||||
template<int Alignment, typename T = char>
|
||||
requires std::is_trivially_destructible_v<T>
|
||||
std::pair<usz, T*> alloc_and_map(usz count)
|
||||
{
|
||||
const auto size_bytes = count * sizeof(T);
|
||||
const auto addr = alloc<Alignment>(size_bytes);
|
||||
return { addr, reinterpret_cast<T*>(map(addr, size_bytes)) };
|
||||
}
|
||||
|
||||
void sync(const vk::command_buffer& cmd);
|
||||
|
||||
// Properties
|
||||
bool is_dirty() const;
|
||||
bool is_critical() const override;
|
||||
};
|
||||
|
||||
extern data_heap* get_upload_heap();
|
||||
|
|
|
@ -430,6 +430,17 @@ namespace vk
|
|||
}
|
||||
}
|
||||
|
||||
void descriptor_set::push(const descriptor_set_dynamic_offset_t& offset)
|
||||
{
|
||||
ensure(offset.location >= 0 && offset.location <= 16);
|
||||
while (m_dynamic_offsets.size() < (static_cast<u32>(offset.location) + 1u))
|
||||
{
|
||||
m_dynamic_offsets.push_back(0);
|
||||
}
|
||||
|
||||
m_dynamic_offsets[offset.location] = offset.value;
|
||||
}
|
||||
|
||||
void descriptor_set::bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout)
|
||||
{
|
||||
if ((m_push_type_mask & ~m_update_after_bind_mask) || (m_pending_writes.size() >= max_cache_size))
|
||||
|
@ -437,7 +448,7 @@ namespace vk
|
|||
flush();
|
||||
}
|
||||
|
||||
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr);
|
||||
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, ::size32(m_dynamic_offsets), m_dynamic_offsets.data());
|
||||
}
|
||||
|
||||
void descriptor_set::flush()
|
||||
|
|
|
@ -27,6 +27,12 @@ namespace vk
|
|||
}
|
||||
};
|
||||
|
||||
struct descriptor_set_dynamic_offset_t
|
||||
{
|
||||
int location;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
class descriptor_pool
|
||||
{
|
||||
public:
|
||||
|
@ -95,6 +101,7 @@ namespace vk
|
|||
void push(const VkDescriptorImageInfo& image_info, VkDescriptorType type, u32 binding);
|
||||
void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding);
|
||||
void push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax);
|
||||
void push(const descriptor_set_dynamic_offset_t& offset);
|
||||
|
||||
void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
|
||||
|
||||
|
@ -109,6 +116,7 @@ namespace vk
|
|||
rsx::simple_array<VkBufferView> m_buffer_view_pool;
|
||||
rsx::simple_array<VkDescriptorBufferInfo> m_buffer_info_pool;
|
||||
rsx::simple_array<VkDescriptorImageInfo> m_image_info_pool;
|
||||
rsx::simple_array<u32> m_dynamic_offsets;
|
||||
|
||||
#ifdef __clang__
|
||||
// Clang (pre 16.x) does not support LWG 2089, std::construct_at for POD types
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include "instance.h"
|
||||
#include "util/logs.hpp"
|
||||
#include "Emu/system_config.h"
|
||||
#include <vulkan/vulkan_core.h>
|
||||
|
||||
namespace vk
|
||||
{
|
||||
|
@ -36,6 +37,7 @@ namespace vk
|
|||
VkPhysicalDeviceCustomBorderColorFeaturesEXT custom_border_color_info{};
|
||||
VkPhysicalDeviceBorderColorSwizzleFeaturesEXT border_color_swizzle_info{};
|
||||
VkPhysicalDeviceFaultFeaturesEXT device_fault_info{};
|
||||
VkPhysicalDeviceMultiDrawFeaturesEXT multidraw_info{};
|
||||
|
||||
if (device_extensions.is_supported(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME))
|
||||
{
|
||||
|
@ -86,6 +88,13 @@ namespace vk
|
|||
features2.pNext = &device_fault_info;
|
||||
}
|
||||
|
||||
if (device_extensions.is_supported(VK_EXT_MULTI_DRAW_EXTENSION_NAME))
|
||||
{
|
||||
multidraw_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT;
|
||||
multidraw_info.pNext = features2.pNext;
|
||||
features2.pNext = &multidraw_info;
|
||||
}
|
||||
|
||||
auto _vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
|
||||
ensure(_vkGetPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
|
||||
_vkGetPhysicalDeviceFeatures2KHR(dev, &features2);
|
||||
|
@ -98,6 +107,9 @@ namespace vk
|
|||
custom_border_color_support.swizzle_extension_supported = border_color_swizzle_info.sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT;
|
||||
custom_border_color_support.require_border_color_remap = !border_color_swizzle_info.borderColorSwizzleFromImage;
|
||||
|
||||
multidraw_support.supported = !!multidraw_info.multiDraw;
|
||||
multidraw_support.max_batch_size = 65536;
|
||||
|
||||
optional_features_support.barycentric_coords = !!shader_barycentric_info.fragmentShaderBarycentric;
|
||||
optional_features_support.framebuffer_loops = !!fbo_loops_info.attachmentFeedbackLoopLayout;
|
||||
optional_features_support.extended_device_fault = !!device_fault_info.deviceFault;
|
||||
|
@ -164,6 +176,7 @@ namespace vk
|
|||
properties2.pNext = nullptr;
|
||||
|
||||
VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props{};
|
||||
VkPhysicalDeviceMultiDrawPropertiesEXT multidraw_props{};
|
||||
|
||||
if (descriptor_indexing_support)
|
||||
{
|
||||
|
@ -172,6 +185,13 @@ namespace vk
|
|||
properties2.pNext = &descriptor_indexing_props;
|
||||
}
|
||||
|
||||
if (multidraw_support.supported)
|
||||
{
|
||||
multidraw_props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT;
|
||||
multidraw_props.pNext = properties2.pNext;
|
||||
properties2.pNext = &multidraw_props;
|
||||
}
|
||||
|
||||
if (device_extensions.is_supported(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME))
|
||||
{
|
||||
driver_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
|
||||
|
@ -198,6 +218,17 @@ namespace vk
|
|||
descriptor_max_draw_calls = 8192;
|
||||
}
|
||||
}
|
||||
|
||||
if (multidraw_support.supported)
|
||||
{
|
||||
multidraw_support.max_batch_size = multidraw_props.maxMultiDrawCount;
|
||||
|
||||
if (!multidraw_props.maxMultiDrawCount)
|
||||
{
|
||||
rsx_log.error("Physical device reports 0 support maxMultiDraw count. Multidraw support will be disabled.");
|
||||
multidraw_support.supported = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -493,6 +524,11 @@ namespace vk
|
|||
requested_extensions.push_back(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
|
||||
}
|
||||
|
||||
if (pgpu->multidraw_support)
|
||||
{
|
||||
requested_extensions.push_back(VK_EXT_MULTI_DRAW_EXTENSION_NAME);
|
||||
}
|
||||
|
||||
if (pgpu->optional_features_support.conditional_rendering)
|
||||
{
|
||||
requested_extensions.push_back(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME);
|
||||
|
@ -723,6 +759,15 @@ namespace vk
|
|||
device.pNext = &custom_border_color_features;
|
||||
}
|
||||
|
||||
VkPhysicalDeviceMultiDrawFeaturesEXT multidraw_features{};
|
||||
if (pgpu->multidraw_support)
|
||||
{
|
||||
multidraw_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT;
|
||||
multidraw_features.multiDraw = VK_TRUE;
|
||||
multidraw_features.pNext = const_cast<void*>(device.pNext);
|
||||
device.pNext = &multidraw_features;
|
||||
}
|
||||
|
||||
VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT fbo_loop_features{};
|
||||
if (pgpu->optional_features_support.framebuffer_loops)
|
||||
{
|
||||
|
@ -791,41 +836,10 @@ namespace vk
|
|||
vkGetDeviceQueue(dev, present_queue_idx, 0, &m_present_queue);
|
||||
}
|
||||
|
||||
// Import optional function endpoints
|
||||
if (pgpu->optional_features_support.conditional_rendering)
|
||||
{
|
||||
_vkCmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdBeginConditionalRenderingEXT"));
|
||||
_vkCmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdEndConditionalRenderingEXT"));
|
||||
}
|
||||
|
||||
if (pgpu->optional_features_support.debug_utils)
|
||||
{
|
||||
_vkSetDebugUtilsObjectNameEXT = reinterpret_cast<PFN_vkSetDebugUtilsObjectNameEXT>(vkGetDeviceProcAddr(dev, "vkSetDebugUtilsObjectNameEXT"));
|
||||
_vkQueueInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkQueueInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkQueueInsertDebugUtilsLabelEXT"));
|
||||
_vkCmdInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkCmdInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkCmdInsertDebugUtilsLabelEXT"));
|
||||
}
|
||||
|
||||
if (pgpu->optional_features_support.synchronization_2)
|
||||
{
|
||||
_vkCmdSetEvent2KHR = reinterpret_cast<PFN_vkCmdSetEvent2KHR>(vkGetDeviceProcAddr(dev, "vkCmdSetEvent2KHR"));
|
||||
_vkCmdWaitEvents2KHR = reinterpret_cast<PFN_vkCmdWaitEvents2KHR>(vkGetDeviceProcAddr(dev, "vkCmdWaitEvents2KHR"));
|
||||
_vkCmdPipelineBarrier2KHR = reinterpret_cast<PFN_vkCmdPipelineBarrier2KHR>(vkGetDeviceProcAddr(dev, "vkCmdPipelineBarrier2KHR"));
|
||||
}
|
||||
|
||||
if (pgpu->optional_features_support.extended_device_fault)
|
||||
{
|
||||
_vkGetDeviceFaultInfoEXT = reinterpret_cast<PFN_vkGetDeviceFaultInfoEXT>(vkGetDeviceProcAddr(dev, "vkGetDeviceFaultInfoEXT"));
|
||||
}
|
||||
|
||||
memory_map = vk::get_memory_mapping(pdev);
|
||||
m_formats_support = vk::get_optimal_tiling_supported_formats(pdev);
|
||||
m_pipeline_binding_table = vk::get_pipeline_binding_table(pdev);
|
||||
|
||||
if (pgpu->optional_features_support.external_memory_host)
|
||||
{
|
||||
memory_map._vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(vkGetDeviceProcAddr(dev, "vkGetMemoryHostPointerPropertiesEXT"));
|
||||
}
|
||||
|
||||
if (g_cfg.video.disable_vulkan_mem_allocator)
|
||||
{
|
||||
m_allocator = std::make_unique<vk::mem_allocator_vk>(*this, pdev);
|
||||
|
|
|
@ -37,8 +37,6 @@ namespace vk
|
|||
u64 device_local_total_bytes;
|
||||
u64 host_visible_total_bytes;
|
||||
u64 device_bar_total_bytes;
|
||||
|
||||
PFN_vkGetMemoryHostPointerPropertiesEXT _vkGetMemoryHostPointerPropertiesEXT;
|
||||
};
|
||||
|
||||
struct descriptor_indexing_features
|
||||
|
@ -61,6 +59,14 @@ namespace vk
|
|||
operator bool() const { return supported; }
|
||||
};
|
||||
|
||||
struct multidraw_features
|
||||
{
|
||||
bool supported;
|
||||
u32 max_batch_size;
|
||||
|
||||
operator bool() const { return supported; }
|
||||
};
|
||||
|
||||
class physical_device
|
||||
{
|
||||
VkInstance parent = VK_NULL_HANDLE;
|
||||
|
@ -79,6 +85,8 @@ namespace vk
|
|||
|
||||
custom_border_color_features custom_border_color_support{};
|
||||
|
||||
multidraw_features multidraw_support{};
|
||||
|
||||
struct
|
||||
{
|
||||
bool barycentric_coords = false;
|
||||
|
@ -145,18 +153,6 @@ namespace vk
|
|||
const std::vector<const char*>& requested_extensions,
|
||||
const VkPhysicalDeviceFeatures& requested_features) const;
|
||||
|
||||
public:
|
||||
// Exported device endpoints
|
||||
PFN_vkCmdBeginConditionalRenderingEXT _vkCmdBeginConditionalRenderingEXT = nullptr;
|
||||
PFN_vkCmdEndConditionalRenderingEXT _vkCmdEndConditionalRenderingEXT = nullptr;
|
||||
PFN_vkSetDebugUtilsObjectNameEXT _vkSetDebugUtilsObjectNameEXT = nullptr;
|
||||
PFN_vkQueueInsertDebugUtilsLabelEXT _vkQueueInsertDebugUtilsLabelEXT = nullptr;
|
||||
PFN_vkCmdInsertDebugUtilsLabelEXT _vkCmdInsertDebugUtilsLabelEXT = nullptr;
|
||||
PFN_vkCmdSetEvent2KHR _vkCmdSetEvent2KHR = nullptr;
|
||||
PFN_vkCmdWaitEvents2KHR _vkCmdWaitEvents2KHR = nullptr;
|
||||
PFN_vkCmdPipelineBarrier2KHR _vkCmdPipelineBarrier2KHR = nullptr;
|
||||
PFN_vkGetDeviceFaultInfoEXT _vkGetDeviceFaultInfoEXT = nullptr;
|
||||
|
||||
public:
|
||||
render_device() = default;
|
||||
~render_device() = default;
|
||||
|
@ -175,6 +171,7 @@ namespace vk
|
|||
const pipeline_binding_table& get_pipeline_binding_table() const { return m_pipeline_binding_table; }
|
||||
const gpu_shader_types_support& get_shader_types_support() const { return pgpu->shader_types_support; }
|
||||
const custom_border_color_features& get_custom_border_color_support() const { return pgpu->custom_border_color_support; }
|
||||
const multidraw_features get_multidraw_support() const { return pgpu->multidraw_support; }
|
||||
|
||||
bool get_shader_stencil_export_support() const { return pgpu->optional_features_support.shader_stencil_export; }
|
||||
bool get_depth_bounds_support() const { return pgpu->features.depthBounds != VK_FALSE; }
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace vk
|
|||
graphics_pipeline_state()
|
||||
{
|
||||
// NOTE: Vk** structs have padding bytes
|
||||
memset(this, 0, sizeof(graphics_pipeline_state));
|
||||
std::memset(static_cast<void*>(this), 0, sizeof(graphics_pipeline_state));
|
||||
|
||||
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
|
||||
cs.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
|
||||
|
@ -43,7 +43,7 @@ namespace vk
|
|||
graphics_pipeline_state(const graphics_pipeline_state& other)
|
||||
{
|
||||
// NOTE: Vk** structs have padding bytes
|
||||
memcpy(this, &other, sizeof(graphics_pipeline_state));
|
||||
std::memcpy(static_cast<void*>(this), &other, sizeof(graphics_pipeline_state));
|
||||
|
||||
if (other.cs.pAttachments == other.att_state)
|
||||
{
|
||||
|
@ -59,7 +59,7 @@ namespace vk
|
|||
if (this != &other)
|
||||
{
|
||||
// NOTE: Vk** structs have padding bytes
|
||||
memcpy(this, &other, sizeof(graphics_pipeline_state));
|
||||
std::memcpy(static_cast<void*>(this), &other, sizeof(graphics_pipeline_state));
|
||||
|
||||
if (other.cs.pAttachments == other.att_state)
|
||||
{
|
||||
|
|
|
@ -292,7 +292,7 @@ namespace vk
|
|||
name_info.objectHandle = reinterpret_cast<u64>(value);
|
||||
name_info.pObjectName = name.c_str();
|
||||
|
||||
g_render_device->_vkSetDebugUtilsObjectNameEXT(m_device, &name_info);
|
||||
_vkSetDebugUtilsObjectNameEXT(m_device, &name_info);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ namespace vk
|
|||
return "Extended fault info is not available. Extension 'VK_EXT_device_fault' is probably not supported by your driver.";
|
||||
}
|
||||
|
||||
ensure(g_render_device->_vkGetDeviceFaultInfoEXT);
|
||||
ensure(_vkGetDeviceFaultInfoEXT);
|
||||
|
||||
VkDeviceFaultCountsEXT fault_counts
|
||||
{
|
||||
|
@ -30,7 +30,7 @@ namespace vk
|
|||
std::string fault_description;
|
||||
|
||||
// Retrieve sizes
|
||||
g_render_device->_vkGetDeviceFaultInfoEXT(*g_render_device, &fault_counts, nullptr);
|
||||
_vkGetDeviceFaultInfoEXT(*g_render_device, &fault_counts, nullptr);
|
||||
|
||||
// Resize arrays and fill
|
||||
address_info.resize(fault_counts.addressInfoCount);
|
||||
|
@ -44,7 +44,7 @@ namespace vk
|
|||
.pVendorInfos = vendor_info.data(),
|
||||
.pVendorBinaryData = vendor_binary_data.data()
|
||||
};
|
||||
g_render_device->_vkGetDeviceFaultInfoEXT(*g_render_device, &fault_counts, &fault_info);
|
||||
_vkGetDeviceFaultInfoEXT(*g_render_device, &fault_counts, &fault_info);
|
||||
|
||||
fault_description = fault_info.description;
|
||||
std::string fault_message = fmt::format(
|
||||
|
@ -235,14 +235,9 @@ namespace vk
|
|||
return false;
|
||||
}
|
||||
|
||||
// Temporarily
|
||||
#ifndef _MSC_VER
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
#endif
|
||||
|
||||
VkBool32 BreakCallback(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType,
|
||||
u64 srcObject, usz location, s32 msgCode,
|
||||
const char* pLayerPrefix, const char* pMsg, void* pUserData)
|
||||
VkBool32 BreakCallback(VkFlags /*msgFlags*/, VkDebugReportObjectTypeEXT /*objType*/,
|
||||
u64 /*srcObject*/, usz /*location*/, s32 /*msgCode*/,
|
||||
const char* /*pLayerPrefix*/, const char* /*pMsg*/, void* /*pUserData*/)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
DebugBreak();
|
||||
|
|
|
@ -253,7 +253,7 @@ namespace vk
|
|||
|
||||
if (m_backend == sync_backend::events_v2)
|
||||
{
|
||||
m_device->_vkCmdPipelineBarrier2KHR(cmd, &dependency);
|
||||
_vkCmdPipelineBarrier2KHR(cmd, &dependency);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ namespace vk
|
|||
// The expectation is that this will be awaited using the gpu_wait function.
|
||||
if (m_backend == sync_backend::events_v2) [[ likely ]]
|
||||
{
|
||||
m_device->_vkCmdSetEvent2KHR(cmd, m_vk_event, &dependency);
|
||||
_vkCmdSetEvent2KHR(cmd, m_vk_event, &dependency);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -322,7 +322,7 @@ namespace vk
|
|||
.pMemoryBarriers = &mem_barrier
|
||||
};
|
||||
|
||||
m_device->_vkCmdSetEvent2KHR(cmd, m_vk_event, &empty_dependency);
|
||||
_vkCmdSetEvent2KHR(cmd, m_vk_event, &empty_dependency);
|
||||
}
|
||||
|
||||
void event::host_signal() const
|
||||
|
@ -342,7 +342,7 @@ namespace vk
|
|||
|
||||
if (m_backend == sync_backend::events_v2) [[ likely ]]
|
||||
{
|
||||
m_device->_vkCmdWaitEvents2KHR(cmd, 1, &m_vk_event, &dependency);
|
||||
_vkCmdWaitEvents2KHR(cmd, 1, &m_vk_event, &dependency);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue