Merge pull request #198 from fjtrujy/fix_LockAPI

Fix lock api
This commit is contained in:
Diamond Rivero
2024-04-25 19:19:36 +08:00
committed by GitHub
4 changed files with 182 additions and 48 deletions

View File

@@ -33,9 +33,27 @@ GLUE_OBJS = __dummy_passwd.o __psp_heap_blockid.o __psp_free_heap.o _fork.o _wai
INIT_OBJS = __libpthreadglue_init.o __libcglue_init.o __libcglue_deinit.o _exit.o abort.o exit.o
LOCK_OBJS = __retarget_lock_init.o __retarget_lock_acquire.o __retarget_lock_release.o __retarget_lock_try_acquire.o \
__retarget_lock_close.o __retarget_lock_init_recursive.o __retarget_lock_acquire_recursive.o __retarget_lock_release_recursive.o \
__retarget_lock_try_acquire_recursive.o __retarget_lock_close_recursive.o
LOCK_OBJS = \
__lock___sfp_recursive_mutex.o \
__lock___atexit_recursive_mutex.o \
__lock___at_quick_exit_mutex.o \
__lock___malloc_recursive_mutex.o \
__lock___env_recursive_mutex.o \
__lock___tz_mutex.o \
__lock___dd_hash_mutex.o \
__lock___arc4random_mutex.o \
__retarget_lock_init.o \
__retarget_lock_init_recursive.o \
__retarget_lock_close.o \
__retarget_lock_close_recursive.o \
__retarget_lock_acquire.o \
__retarget_lock_acquire_recursive.o \
__retarget_lock_try_acquire.o \
__retarget_lock_try_acquire_recursive.o \
__retarget_lock_release.o \
__retarget_lock_release_recursive.o \
__locks_init.o \
__locks_deinit.o
MUTEXMAN_OBJS = __sbrk_mutex.o __fdman_mutex.o __init_mutex.o __deinit_mutex.o

View File

@@ -1099,7 +1099,7 @@ int renameat(int olddirfd, const char *oldpath, int newdirfd, const char *newpat
{
// TODO: Do better implementation following https://linux.die.net/man/2/renameat
// for now use the same as rename
return _rename(oldpath, newpath);
return rename(oldpath, newpath);
}
#endif /* F_renameat */

View File

@@ -21,14 +21,91 @@
// Structure representing the lock
struct __lock {
SceLwMutexWorkarea mutex;
int32_t thread_id;
int32_t count;
};
#ifdef F___lock___sfp_recursive_mutex
struct __lock __lock___sfp_recursive_mutex;
#endif
#ifdef F___lock___atexit_recursive_mutex
struct __lock __lock___atexit_recursive_mutex;
#endif
#ifdef F___lock___at_quick_exit_mutex
struct __lock __lock___at_quick_exit_mutex;
#endif
#ifdef F___lock___malloc_recursive_mutex
struct __lock __lock___malloc_recursive_mutex;
#endif
#ifdef F___lock___env_recursive_mutex
struct __lock __lock___env_recursive_mutex;
#endif
#ifdef F___lock___tz_mutex
struct __lock __lock___tz_mutex;
#endif
#ifdef F___lock___dd_hash_mutex
struct __lock __lock___dd_hash_mutex;
#endif
#ifdef F___lock___arc4random_mutex
struct __lock __lock___arc4random_mutex;
#endif
static inline void __common_lock_init(_LOCK_T lock)
{
sceKernelCreateLwMutex(&lock->mutex, "lock API mutex", PSP_LW_MUTEX_ATTR_THFIFO, 0, 0);
}
static inline void __common_lock_init_recursive(_LOCK_T lock)
{
sceKernelCreateLwMutex(&lock->mutex, "lock API mutex Recursive", PSP_LW_MUTEX_ATTR_RECURSIVE, 0, 0);
}
static inline void __common_lock_close(_LOCK_T lock)
{
sceKernelDeleteLwMutex(&lock->mutex);
}
static inline void __common_lock_close_recursive(_LOCK_T lock)
{
sceKernelDeleteLwMutex(&lock->mutex);
}
#ifdef F___retarget_lock_init
void __retarget_lock_init(_LOCK_T *lock)
{
sceKernelCreateLwMutex(&(*lock)->mutex, "lock API mutex", 0, 0, 0);
_LOCK_T new_lock = (_LOCK_T)malloc(sizeof(struct __lock));
__common_lock_init(new_lock);
*lock = new_lock;
}
#endif
#ifdef F___retarget_lock_init_recursive
void __retarget_lock_init_recursive(_LOCK_T *lock)
{
_LOCK_T new_lock = (_LOCK_T)malloc(sizeof(struct __lock));
__common_lock_init_recursive(new_lock);
*lock = new_lock;
}
#endif
#ifdef F___retarget_lock_close
void __retarget_lock_close(_LOCK_T lock)
{
__common_lock_close(lock);
free(lock);
}
#endif
#ifdef F___retarget_lock_close_recursive
void __retarget_lock_close_recursive(_LOCK_T lock)
{
__common_lock_close_recursive(lock);
free(lock);
}
#endif
@@ -39,10 +116,10 @@ void __retarget_lock_acquire(_LOCK_T lock)
}
#endif
#ifdef F___retarget_lock_release
void __retarget_lock_release(_LOCK_T lock)
#ifdef F___retarget_lock_acquire_recursive
void __retarget_lock_acquire_recursive(_LOCK_T lock)
{
sceKernelUnlockLwMutex(&lock->mutex, 1);
sceKernelLockLwMutex(&lock->mutex, 1, 0);
}
#endif
@@ -53,60 +130,88 @@ int __retarget_lock_try_acquire(_LOCK_T lock)
}
#endif
#ifdef F___retarget_lock_close
void __retarget_lock_close(_LOCK_T lock)
#ifdef F___retarget_lock_try_acquire_recursive
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
sceKernelDeleteLwMutex(&lock->mutex);
return sceKernelTryLockLwMutex(&lock->mutex, 1);
}
#endif
#ifdef F___retarget_lock_init_recursive
void __retarget_lock_init_recursive(_LOCK_T *lock)
#ifdef F___retarget_lock_release
void __retarget_lock_release(_LOCK_T lock)
{
sceKernelCreateLwMutex(&(*lock)->mutex, "lock API recursive mutex", 0, 0, 0);
(*lock)->count = 0;
(*lock)->thread_id = sceKernelGetThreadId();
}
#endif
#ifdef F___retarget_lock_acquire_recursive
void __retarget_lock_acquire_recursive(_LOCK_T lock)
{
int32_t thread_id = sceKernelGetThreadId();
if (lock->count == 0 || lock->thread_id != thread_id) {
sceKernelLockLwMutex(&lock->mutex, 1, 0);
}
lock->count++;
sceKernelUnlockLwMutex(&lock->mutex, 1);
}
#endif
#ifdef F___retarget_lock_release_recursive
void __retarget_lock_release_recursive(_LOCK_T lock)
{
int32_t thread_id = sceKernelGetThreadId();
if (lock->count == 1 || lock->thread_id != thread_id) {
sceKernelUnlockLwMutex(&lock->mutex, 1);
}
lock->count--;
sceKernelUnlockLwMutex(&lock->mutex, 1);
}
#endif
#ifdef F___retarget_lock_try_acquire_recursive
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
#ifdef F___locks_init
extern struct __lock __lock___malloc_recursive_mutex;
extern struct __lock __lock___atexit_recursive_mutex;
extern struct __lock __lock___at_quick_exit_mutex;
extern struct __lock __lock___sfp_recursive_mutex;
extern struct __lock __lock___env_recursive_mutex;
extern struct __lock __lock___tz_mutex;
extern struct __lock __lock___dd_hash_mutex;
extern struct __lock __lock___arc4random_mutex;
void __locks_init()
{
int res = 0;
int32_t thread_id = sceKernelGetThreadId();
if (lock->count == 0 || lock->thread_id != thread_id) {
res = sceKernelTryLockLwMutex(&lock->mutex, 1) != 0;
}
lock->count++;
return res;
_LOCK_T lock_malloc = &__lock___malloc_recursive_mutex;
_LOCK_T lock_atexit = &__lock___atexit_recursive_mutex;
_LOCK_T lock_quick_exit = &__lock___at_quick_exit_mutex;
_LOCK_T lock_sfp = &__lock___sfp_recursive_mutex;
_LOCK_T lock_env = &__lock___env_recursive_mutex;
_LOCK_T lock_tz = &__lock___tz_mutex;
_LOCK_T lock_dd_hash = &__lock___dd_hash_mutex;
_LOCK_T lock_arc4random = &__lock___arc4random_mutex;
__common_lock_init_recursive(lock_malloc);
__common_lock_init_recursive(lock_atexit);
__common_lock_init(lock_quick_exit);
__common_lock_init_recursive(lock_sfp);
__common_lock_init_recursive(lock_env);
__common_lock_init(lock_tz);
__common_lock_init(lock_dd_hash);
__common_lock_init(lock_arc4random);
}
#endif
#ifdef F___retarget_lock_close_recursive
void __retarget_lock_close_recursive(_LOCK_T lock)
#ifdef F___locks_deinit
extern struct __lock __lock___malloc_recursive_mutex;
extern struct __lock __lock___atexit_recursive_mutex;
extern struct __lock __lock___at_quick_exit_mutex;
extern struct __lock __lock___sfp_recursive_mutex;
extern struct __lock __lock___env_recursive_mutex;
extern struct __lock __lock___tz_mutex;
extern struct __lock __lock___dd_hash_mutex;
extern struct __lock __lock___arc4random_mutex;
void __locks_deinit()
{
sceKernelDeleteLwMutex(&lock->mutex);
_LOCK_T lock_malloc = &__lock___malloc_recursive_mutex;
_LOCK_T lock_atexit = &__lock___atexit_recursive_mutex;
_LOCK_T lock_quick_exit = &__lock___at_quick_exit_mutex;
_LOCK_T lock_sfp = &__lock___sfp_recursive_mutex;
_LOCK_T lock_env = &__lock___env_recursive_mutex;
_LOCK_T lock_tz = &__lock___tz_mutex;
_LOCK_T lock_dd_hash = &__lock___dd_hash_mutex;
_LOCK_T lock_arc4random = &__lock___arc4random_mutex;
__common_lock_close_recursive(lock_malloc);
__common_lock_close_recursive(lock_atexit);
__common_lock_close(lock_quick_exit);
__common_lock_close_recursive(lock_sfp);
__common_lock_close_recursive(lock_env);
__common_lock_close(lock_tz);
__common_lock_close(lock_dd_hash);
__common_lock_close(lock_arc4random);
}
#endif

View File

@@ -589,6 +589,17 @@ int sceKernelPollSema(SceUID semaid, int signal);
*/
int sceKernelReferSemaStatus(SceUID semaid, SceKernelSemaInfo *info);
/** Attribute for lightweight mutex. */
enum PspLwMutexAttributes
{
/** The wait thread is queued using FIFO. */
PSP_LW_MUTEX_ATTR_THFIFO = 0x0000U,
/** The wait thread is queued by thread priority . */
PSP_LW_MUTEX_ATTR_THPRI = 0x0100U,
/** A recursive lock is allowed by the thread that acquired the lightweight mutex */
PSP_LW_MUTEX_ATTR_RECURSIVE = 0x0200U
};
/** Struct as workarea for lightweight mutex */
typedef struct {
/** Count */
@@ -610,13 +621,13 @@ typedef struct {
*
* @param workarea - The pointer to the workarea
* @param name - The name of the lightweight mutex
* @param attr -
* @param attr - The LwMutex attributes, zero or more of ::PspLwMutexAttributes.
* @param initialCount - THe inital value of the mutex
* @param optionsPTr - Other optioons for mutex
*
* @return 0 on success, otherwise one of ::PspKernelErrorCodes
*/
int sceKernelCreateLwMutex(SceLwMutexWorkarea *workarea, const char *name, u32 attr, int initialCount, u32 *optionsPtr);
int sceKernelCreateLwMutex(SceLwMutexWorkarea *workarea, const char *name, SceUInt32 attr, int initialCount, u32 *optionsPtr);
/**
* Delete a lightweight mutex