mirror of
https://github.com/luanti-org/luanti.git
synced 2025-06-27 16:36:03 +00:00
Clean up threading
* Rename everything. * Strip J prefix. * Change UpperCamelCase functions to lowerCamelCase. * Remove global (!) semaphore count mutex on OSX. * Remove semaphore count getter (unused, unsafe, depended on internal API functions on Windows, and used a hack on OSX). * Add `Atomic<type>`. * Make `Thread` handle thread names. * Add support for C++11 multi-threading. * Combine pthread and win32 sources. * Remove `ThreadStarted` (unused, unneeded). * Move some includes from the headers to the sources. * Move all of `Event` into its header (allows inlining with no new includes). * Make `Event` use `Semaphore` (except on Windows). * Move some porting functions into `Thread`. * Integrate logging with `Thread`. * Add threading test.
This commit is contained in:
parent
6a1047d8c1
commit
e4bff8be94
77 changed files with 1594 additions and 2046 deletions
|
@ -47,32 +47,31 @@ AsyncEngine::~AsyncEngine()
|
|||
// Request all threads to stop
|
||||
for (std::vector<AsyncWorkerThread *>::iterator it = workerThreads.begin();
|
||||
it != workerThreads.end(); it++) {
|
||||
(*it)->Stop();
|
||||
(*it)->stop();
|
||||
}
|
||||
|
||||
|
||||
// Wake up all threads
|
||||
for (std::vector<AsyncWorkerThread *>::iterator it = workerThreads.begin();
|
||||
it != workerThreads.end(); it++) {
|
||||
jobQueueCounter.Post();
|
||||
jobQueueCounter.post();
|
||||
}
|
||||
|
||||
// Wait for threads to finish
|
||||
for (std::vector<AsyncWorkerThread *>::iterator it = workerThreads.begin();
|
||||
it != workerThreads.end(); it++) {
|
||||
(*it)->Wait();
|
||||
(*it)->wait();
|
||||
}
|
||||
|
||||
// Force kill all threads
|
||||
for (std::vector<AsyncWorkerThread *>::iterator it = workerThreads.begin();
|
||||
it != workerThreads.end(); it++) {
|
||||
(*it)->Kill();
|
||||
delete *it;
|
||||
}
|
||||
|
||||
jobQueueMutex.Lock();
|
||||
jobQueueMutex.lock();
|
||||
jobQueue.clear();
|
||||
jobQueueMutex.Unlock();
|
||||
jobQueueMutex.unlock();
|
||||
workerThreads.clear();
|
||||
}
|
||||
|
||||
|
@ -92,16 +91,17 @@ void AsyncEngine::initialize(unsigned int numEngines)
|
|||
initDone = true;
|
||||
|
||||
for (unsigned int i = 0; i < numEngines; i++) {
|
||||
AsyncWorkerThread *toAdd = new AsyncWorkerThread(this, i);
|
||||
AsyncWorkerThread *toAdd = new AsyncWorkerThread(this,
|
||||
std::string("AsyncWorker-") + itos(i));
|
||||
workerThreads.push_back(toAdd);
|
||||
toAdd->Start();
|
||||
toAdd->start();
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
unsigned int AsyncEngine::queueAsyncJob(std::string func, std::string params)
|
||||
{
|
||||
jobQueueMutex.Lock();
|
||||
jobQueueMutex.lock();
|
||||
LuaJobInfo toAdd;
|
||||
toAdd.id = jobIdCounter++;
|
||||
toAdd.serializedFunction = func;
|
||||
|
@ -109,9 +109,9 @@ unsigned int AsyncEngine::queueAsyncJob(std::string func, std::string params)
|
|||
|
||||
jobQueue.push_back(toAdd);
|
||||
|
||||
jobQueueCounter.Post();
|
||||
jobQueueCounter.post();
|
||||
|
||||
jobQueueMutex.Unlock();
|
||||
jobQueueMutex.unlock();
|
||||
|
||||
return toAdd.id;
|
||||
}
|
||||
|
@ -119,8 +119,8 @@ unsigned int AsyncEngine::queueAsyncJob(std::string func, std::string params)
|
|||
/******************************************************************************/
|
||||
LuaJobInfo AsyncEngine::getJob()
|
||||
{
|
||||
jobQueueCounter.Wait();
|
||||
jobQueueMutex.Lock();
|
||||
jobQueueCounter.wait();
|
||||
jobQueueMutex.lock();
|
||||
|
||||
LuaJobInfo retval;
|
||||
retval.valid = false;
|
||||
|
@ -130,7 +130,7 @@ LuaJobInfo AsyncEngine::getJob()
|
|||
jobQueue.pop_front();
|
||||
retval.valid = true;
|
||||
}
|
||||
jobQueueMutex.Unlock();
|
||||
jobQueueMutex.unlock();
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -138,16 +138,16 @@ LuaJobInfo AsyncEngine::getJob()
|
|||
/******************************************************************************/
|
||||
void AsyncEngine::putJobResult(LuaJobInfo result)
|
||||
{
|
||||
resultQueueMutex.Lock();
|
||||
resultQueueMutex.lock();
|
||||
resultQueue.push_back(result);
|
||||
resultQueueMutex.Unlock();
|
||||
resultQueueMutex.unlock();
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
void AsyncEngine::step(lua_State *L, int errorhandler)
|
||||
{
|
||||
lua_getglobal(L, "core");
|
||||
resultQueueMutex.Lock();
|
||||
resultQueueMutex.lock();
|
||||
while (!resultQueue.empty()) {
|
||||
LuaJobInfo jobDone = resultQueue.front();
|
||||
resultQueue.pop_front();
|
||||
|
@ -166,14 +166,14 @@ void AsyncEngine::step(lua_State *L, int errorhandler)
|
|||
|
||||
PCALL_RESL(L, lua_pcall(L, 2, 0, errorhandler));
|
||||
}
|
||||
resultQueueMutex.Unlock();
|
||||
resultQueueMutex.unlock();
|
||||
lua_pop(L, 1); // Pop core
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
void AsyncEngine::pushFinishedJobs(lua_State* L) {
|
||||
// Result Table
|
||||
resultQueueMutex.Lock();
|
||||
MutexAutoLock l(resultQueueMutex);
|
||||
|
||||
unsigned int index = 1;
|
||||
lua_createtable(L, resultQueue.size(), 0);
|
||||
|
@ -197,8 +197,6 @@ void AsyncEngine::pushFinishedJobs(lua_State* L) {
|
|||
|
||||
lua_rawseti(L, top, index++);
|
||||
}
|
||||
|
||||
resultQueueMutex.Unlock();
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
@ -214,10 +212,10 @@ void AsyncEngine::prepareEnvironment(lua_State* L, int top)
|
|||
|
||||
/******************************************************************************/
|
||||
AsyncWorkerThread::AsyncWorkerThread(AsyncEngine* jobDispatcher,
|
||||
unsigned int threadNum) :
|
||||
const std::string &name) :
|
||||
Thread(name),
|
||||
ScriptApiBase(),
|
||||
jobDispatcher(jobDispatcher),
|
||||
threadnum(threadNum)
|
||||
jobDispatcher(jobDispatcher)
|
||||
{
|
||||
lua_State *L = getStack();
|
||||
|
||||
|
@ -235,27 +233,17 @@ AsyncWorkerThread::AsyncWorkerThread(AsyncEngine* jobDispatcher,
|
|||
/******************************************************************************/
|
||||
AsyncWorkerThread::~AsyncWorkerThread()
|
||||
{
|
||||
sanity_check(IsRunning() == false);
|
||||
sanity_check(!isRunning());
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
void* AsyncWorkerThread::Thread()
|
||||
void* AsyncWorkerThread::run()
|
||||
{
|
||||
ThreadStarted();
|
||||
|
||||
// Register thread for error logging
|
||||
char number[21];
|
||||
snprintf(number, sizeof(number), "%u", threadnum);
|
||||
log_register_thread(std::string("AsyncWorkerThread_") + number);
|
||||
|
||||
porting::setThreadName((std::string("AsyncWorkTh_") + number).c_str());
|
||||
|
||||
lua_State *L = getStack();
|
||||
|
||||
std::string script = getServer()->getBuiltinLuaPath() + DIR_DELIM + "init.lua";
|
||||
if (!loadScript(script)) {
|
||||
errorstream
|
||||
<< "AsyncWorkerThread execution of async base environment failed!"
|
||||
errorstream << "execution of async base environment failed!"
|
||||
<< std::endl;
|
||||
abort();
|
||||
}
|
||||
|
@ -267,11 +255,11 @@ void* AsyncWorkerThread::Thread()
|
|||
}
|
||||
|
||||
// Main loop
|
||||
while (!StopRequested()) {
|
||||
while (!stopRequested()) {
|
||||
// Wait for job
|
||||
LuaJobInfo toProcess = jobDispatcher->getJob();
|
||||
|
||||
if (toProcess.valid == false || StopRequested()) {
|
||||
if (toProcess.valid == false || stopRequested()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -310,8 +298,6 @@ void* AsyncWorkerThread::Thread()
|
|||
|
||||
lua_pop(L, 1); // Pop core
|
||||
|
||||
log_deregister_thread();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,9 +24,9 @@ with this program; if not, write to the Free Software Foundation, Inc.,
|
|||
#include <deque>
|
||||
#include <map>
|
||||
|
||||
#include "jthread/jthread.h"
|
||||
#include "jthread/jmutex.h"
|
||||
#include "jthread/jsemaphore.h"
|
||||
#include "threading/thread.h"
|
||||
#include "threading/mutex.h"
|
||||
#include "threading/semaphore.h"
|
||||
#include "debug.h"
|
||||
#include "lua.h"
|
||||
#include "cpp_api/s_base.h"
|
||||
|
@ -52,24 +52,15 @@ struct LuaJobInfo {
|
|||
};
|
||||
|
||||
// Asynchronous working environment
|
||||
class AsyncWorkerThread : public JThread, public ScriptApiBase {
|
||||
class AsyncWorkerThread : public Thread, public ScriptApiBase {
|
||||
public:
|
||||
/**
|
||||
* default constructor
|
||||
* @param pointer to job dispatcher
|
||||
*/
|
||||
AsyncWorkerThread(AsyncEngine* jobDispatcher, unsigned int threadNum);
|
||||
|
||||
AsyncWorkerThread(AsyncEngine* jobDispatcher, const std::string &name);
|
||||
virtual ~AsyncWorkerThread();
|
||||
|
||||
void *Thread();
|
||||
void *run();
|
||||
|
||||
private:
|
||||
AsyncEngine *jobDispatcher;
|
||||
|
||||
// Thread number. Used for debug output
|
||||
unsigned int threadnum;
|
||||
|
||||
};
|
||||
|
||||
// Asynchornous thread and job management
|
||||
|
@ -148,13 +139,13 @@ private:
|
|||
unsigned int jobIdCounter;
|
||||
|
||||
// Mutex to protect job queue
|
||||
JMutex jobQueueMutex;
|
||||
Mutex jobQueueMutex;
|
||||
|
||||
// Job queue
|
||||
std::deque<LuaJobInfo> jobQueue;
|
||||
|
||||
// Mutex to protect result queue
|
||||
JMutex resultQueueMutex;
|
||||
Mutex resultQueueMutex;
|
||||
// Result queue
|
||||
std::deque<LuaJobInfo> resultQueue;
|
||||
|
||||
|
@ -162,7 +153,7 @@ private:
|
|||
std::vector<AsyncWorkerThread*> workerThreads;
|
||||
|
||||
// Counter semaphore for job dispatching
|
||||
JSemaphore jobQueueCounter;
|
||||
Semaphore jobQueueCounter;
|
||||
};
|
||||
|
||||
#endif // CPP_API_ASYNC_EVENTS_HEADER
|
||||
|
|
|
@ -28,8 +28,8 @@ extern "C" {
|
|||
}
|
||||
|
||||
#include "irrlichttypes.h"
|
||||
#include "jthread/jmutex.h"
|
||||
#include "jthread/jmutexautolock.h"
|
||||
#include "threading/mutex.h"
|
||||
#include "threading/mutex_auto_lock.h"
|
||||
#include "common/c_types.h"
|
||||
#include "common/c_internal.h"
|
||||
|
||||
|
@ -108,7 +108,7 @@ protected:
|
|||
void objectrefGetOrCreate(lua_State *L, ServerActiveObject *cobj);
|
||||
void objectrefGet(lua_State *L, u16 id);
|
||||
|
||||
JMutex m_luastackmutex;
|
||||
Mutex m_luastackmutex;
|
||||
std::string m_last_run_mod;
|
||||
// Stack index of Lua error handler
|
||||
int m_errorhandler;
|
||||
|
|
|
@ -34,7 +34,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
|
|||
#include "debug.h" // assert()
|
||||
class LockChecker {
|
||||
public:
|
||||
LockChecker(bool* variable) {
|
||||
LockChecker(bool *variable) {
|
||||
assert(*variable == false);
|
||||
|
||||
m_variable = variable;
|
||||
|
@ -44,7 +44,7 @@ public:
|
|||
*m_variable = false;
|
||||
}
|
||||
private:
|
||||
bool* m_variable;
|
||||
bool *m_variable;
|
||||
};
|
||||
|
||||
#define SCRIPTAPI_LOCK_CHECK LockChecker(&(this->m_locked))
|
||||
|
@ -53,7 +53,7 @@ bool* m_variable;
|
|||
#endif
|
||||
|
||||
#define SCRIPTAPI_PRECHECKHEADER \
|
||||
JMutexAutoLock(this->m_luastackmutex); \
|
||||
MutexAutoLock(this->m_luastackmutex); \
|
||||
SCRIPTAPI_LOCK_CHECK; \
|
||||
realityCheck(); \
|
||||
lua_State *L = getStack(); \
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue