Optimize CacheMap

This commit is contained in:
antao 2018-10-25 18:07:29 +08:00
parent 6699429a7a
commit 7e36c92281
4 changed files with 68 additions and 28 deletions

View File

@ -30,8 +30,8 @@
//threads_num:num of threads,1 by default
"threads_num": 16,
//enable_session:false by default
"enable_session": false,
"session_timeout": 0,
"enable_session": true,
"session_timeout": 1200,
//document_root:Root path of HTTP document,defaut path is ./
"document_root": "./",
/* file_types:

View File

@ -84,26 +84,35 @@ class CacheMap
{
_wheels[i].resize(_bucketsNumPerWheel);
}
_timerId = _loop->runEvery(_tickInterval, [=]() {
_ticksCounter++;
size_t t = _ticksCounter;
size_t pow = 1;
for (size_t i = 0; i < _wheelsNum; i++)
{
if ((t % pow) == 0)
if (_tickInterval > 0 &&
_wheelsNum > 0 &&
_bucketsNumPerWheel > 0)
{
_timerId = _loop->runEvery(_tickInterval, [=]() {
_ticksCounter++;
size_t t = _ticksCounter;
size_t pow = 1;
for (size_t i = 0; i < _wheelsNum; i++)
{
CallbackBucket tmp;
if ((t % pow) == 0)
{
std::lock_guard<std::mutex> lock(bucketMutex_);
//use tmp val to make this critical area as short as possible.
_wheels[i].front().swap(tmp);
_wheels[i].pop_front();
_wheels[i].push_back(CallbackBucket());
CallbackBucket tmp;
{
std::lock_guard<std::mutex> lock(bucketMutex_);
//use tmp val to make this critical area as short as possible.
_wheels[i].front().swap(tmp);
_wheels[i].pop_front();
_wheels[i].push_back(CallbackBucket());
}
}
pow = pow * _bucketsNumPerWheel;
}
pow = pow * _bucketsNumPerWheel;
}
});
});
}
else
{
_noWheels = true;
}
};
~CacheMap()
{
@ -184,14 +193,14 @@ class CacheMap
int timeout = 0;
std::lock_guard<std::mutex> lock(mtx_);
if (_map.find(key) != _map.end())
auto iter = _map.find(key);
if (iter != _map.end())
{
timeout = _map[key].timeout;
timeout = iter->second.timeout;
if (timeout > 0)
eraseAfter(timeout, key);
return iter->second.value;
}
if (timeout > 0)
eraseAfter(timeout, key);
return _map[key].value;
}
bool find(const T1 &key)
@ -200,9 +209,10 @@ class CacheMap
bool flag = false;
std::lock_guard<std::mutex> lock(mtx_);
if (_map.find(key) != _map.end())
auto iter = _map.find(key);
if (iter != _map.end())
{
timeout = _map[key].timeout;
timeout = iter->second.timeout;
flag = true;
}
@ -235,6 +245,8 @@ class CacheMap
size_t _wheelsNum;
size_t _bucketsNumPerWheel;
bool _noWheels = false;
void insertEntry(size_t delay, CallbackEntryPtr entryPtr)
{
//protected by bucketMutex;
@ -270,6 +282,8 @@ class CacheMap
}
void eraseAfter(size_t delay, const T1 &key)
{
if (_noWheels)
return;
assert(_map.find(key) != _map.end());
CallbackEntryPtr entryPtr;

View File

@ -473,7 +473,30 @@ void HttpAppFrameworkImpl::run()
}
if (_useSession)
{
_sessionMapPtr = std::unique_ptr<CacheMap<std::string, SessionPtr>>(new CacheMap<std::string, SessionPtr>(&_loop));
if (_sessionTimeout > 0)
{
size_t wheelNum = 1;
size_t bucketNum = 0;
if (_sessionTimeout < 500)
{
bucketNum = _sessionTimeout + 1;
}
else
{
auto tmpTimeout = _sessionTimeout;
bucketNum = 100;
while (tmpTimeout > 100)
{
wheelNum++;
tmpTimeout = tmpTimeout / 100;
}
}
_sessionMapPtr = std::unique_ptr<CacheMap<std::string, SessionPtr>>(new CacheMap<std::string, SessionPtr>(&_loop, 1.0, wheelNum, bucketNum));
}
else if (_sessionTimeout == 0)
{
_sessionMapPtr = std::unique_ptr<CacheMap<std::string, SessionPtr>>(new CacheMap<std::string, SessionPtr>(&_loop, 0, 0, 0));
}
}
_responseCacheMap = std::unique_ptr<CacheMap<std::string, HttpResponsePtr>>(new CacheMap<std::string, HttpResponsePtr>(&_loop, 1.0, 4, 50)); //Max timeout up to about 70 days;
_loop.loop();

View File

@ -31,9 +31,12 @@ int main()
});
trantor::EventLoop mainLoop;
mainLoop.runAt(now.after(3).roundSecond().after(0.0013), [&]() {
(*main_cachePtr)["new"]="new";
if (main_cachePtr->find("1"))
{
LOG_DEBUG << "find item 1";
LOG_DEBUG << "find item 1:" << (*main_cachePtr)["1"];
(*main_cachePtr)["1"] = "22";
LOG_DEBUG << (*main_cachePtr)["1"];
}
else
{