systemd-resolved 关闭
关掉systemd-resolved的步骤
sudo systemctl disable systemd-resolved
sudo systemctl stop systemd-resolved
先停用systemd-resolved, 并取消开机自动启动
接下来修改NetworkManager配置,让它能自动获取dns
编辑文件/etc/NetworkManager/NetworkManager.conf
增加一行
dns=default
关掉systemd-resolved的步骤
sudo systemctl disable systemd-resolved
sudo systemctl stop systemd-resolved
先停用systemd-resolved, 并取消开机自动启动
接下来修改NetworkManager配置,让它能自动获取dns
编辑文件/etc/NetworkManager/NetworkManager.conf
增加一行
dns=default
event.h
#ifndef EVENT_H
#define EVENT_H
#include <fcntl.h>
#include <atomic>
#include <time.h>
#include <semaphore.h>
#include <string>
using std::string;
using std::atomic_bool;
class CEvent
{
private:
CEvent(long lpEventAttributes, bool bManualReset, bool bInitialState, const char* lpName);
~CEvent();
bool Init();
timespec sem_get_time_millsecs(long msecs);
public:
enum
{
WAIT_OBJECT_0 = 0,
WAIT_TIMEOUT = 0x00000102L,
WAIT_FAILED = 0xFFFFFFFF
};
//S_IRWXU
static CEvent* CreateEvent(long lpEventAttributes, bool bManualReset, bool bInitialState, const char* lpName);
static CEvent* CreateEvent(bool bManualReset, bool bInitialState, const char* lpName);
static void CloseHandle(CEvent* &p_event);
static unsigned long WaitForSingleObject(CEvent* p_event, long ms);
/// 触发事件
static bool SetEvent(CEvent* p_event);
/// 复位事件
static bool ResetEvent(CEvent* p_event);
private:
bool _b_manual_reset;
atomic_bool _b_initial_state;
sem_t* _p_named_sem = SEM_FAILED;
sem_t _sem;
string _sem_name;
long _lpEventAttributes;
int _sem_count = 0;
};
#endif
event.cpp
#include "event.h"
#include "unistd.h"
CEvent::CEvent(long lpEventAttributes, bool bManualReset, bool bInitialState, const char* lpName)
{
_lpEventAttributes = lpEventAttributes;
_b_manual_reset = bManualReset;
_b_initial_state = bInitialState;
if (lpName != nullptr) _sem_name = lpName;
}
CEvent::~CEvent()
{
if (_p_named_sem != SEM_FAILED)
{
if (_sem_name.empty())
{
sem_destroy(&_sem);
_p_named_sem = SEM_FAILED;
}
else
{
sem_close(_p_named_sem);
sem_unlink(_sem_name.c_str());
_sem_name.clear();
}
}
}
bool CEvent::Init()
{
if (_b_initial_state)
_sem_count = 1;
else
_sem_count = 0;
if (_sem_name.empty())
{
if (0 != sem_init(&_sem, 0, _sem_count))
{
_p_named_sem = SEM_FAILED;
return false;
}
_p_named_sem = &_sem;
}
else
{
_p_named_sem = sem_open(_sem_name.c_str(), O_CREAT, _lpEventAttributes, _sem_count);
if (_p_named_sem == SEM_FAILED)
{
sem_unlink(_sem_name.c_str());
_sem_name.clear();
return false;
}
}
return true;
}
timespec CEvent::sem_get_time_millsecs(long msecs)
{
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
long secs = msecs / 1000;
msecs = msecs % 1000;
long add = 0;
msecs = msecs * 1000 * 1000 + ts.tv_nsec;
add = msecs / (1000 * 1000 * 1000);
ts.tv_sec += (add + secs);
ts.tv_nsec = msecs % (1000 * 1000 * 1000);
return ts;
}
CEvent* CEvent::CreateEvent(long lpEventAttributes, bool bManualReset, bool bInitialState, const char* lpName)
{
CEvent* p_event = new CEvent(lpEventAttributes, bManualReset, bInitialState, lpName);
if (!p_event->Init())
{
delete p_event;
return nullptr;
}
return p_event;
}
CEvent* CEvent::CreateEvent(bool bManualReset, bool bInitialState, const char* lpName)
{
CEvent* p_event = new CEvent(S_IRWXU, bManualReset, bInitialState, lpName);
if (!p_event->Init())
{
delete p_event;
return nullptr;
}
return p_event;
}
void CEvent::CloseHandle(CEvent* &p_event)
{
if (p_event != nullptr)
delete p_event;
p_event = nullptr;
}
unsigned long CEvent::WaitForSingleObject(CEvent* p_event, long ms)
{
if (p_event == nullptr)
return WAIT_FAILED;
if (p_event->_b_initial_state)
{
if (!p_event->_b_manual_reset)
{
p_event->_b_initial_state = false;
while (sem_getvalue(p_event->_p_named_sem, &p_event->_sem_count) == 0 && p_event->_sem_count > 0)
sem_wait(p_event->_p_named_sem);
}
return WAIT_OBJECT_0;
}
if (ms == 0)
{
int n_ret = sem_wait(p_event->_p_named_sem);
if (n_ret != 0)
return WAIT_FAILED;
}
else
{
int n_ret = 0;
timespec ts = p_event->sem_get_time_millsecs(ms);
while ((n_ret = sem_timedwait(p_event->_p_named_sem, &ts)) == -1 && errno == EINTR)
continue;
if (n_ret == -1)
{
if (errno == ETIMEDOUT)
return WAIT_TIMEOUT;
else
return WAIT_FAILED;
}
}
if (p_event->_b_manual_reset)
p_event->_b_initial_state = true;
return WAIT_OBJECT_0;
}
bool CEvent::SetEvent(CEvent* p_event)
{
if (p_event == nullptr) return false;
int n_ret = sem_post(p_event->_p_named_sem);
if (n_ret != 0)
return false;
return true;
}
bool CEvent::ResetEvent(CEvent* p_event)
{
if (p_event == nullptr) return false;
p_event->_b_initial_state = false;
while (sem_getvalue(p_event->_p_named_sem, &p_event->_sem_count) == 0 && p_event->_sem_count > 0)
sem_wait(p_event->_p_named_sem);
return true;
}
#ifdef EVENT_DEMO
/// g++ event.cpp -DEVENT_DEMO -o event-demo -lpthread
void* thread_func(void* arg)
{
CEvent *p = (CEvent*)arg;
int i = 0;
while(1)
{
i++;
sleep(1);
if(i >= 5)CEvent::SetEvent(p);
printf("sleep %d s\n", i);
}
return nullptr;
}
int main(int argc, char* argv[])
{
printf("event-demo\n");
CEvent *event = CEvent::CreateEvent(true, false, "sec1");
pthread_t tid;
pthread_create(&tid, NULL, thread_func, event);
pthread_detach(tid);
if(CEvent::WaitForSingleObject(event, 10*1000)==CEvent::WAIT_OBJECT_0)
{
printf("wait event ok\n");
CEvent::ResetEvent(event);
}
CEvent::CloseHandle(event);
return 0;
}
#endif
编译命令
g++ event.cpp -DEVENT_DEMO -o event-demo -lpthread
运行结果:
./event-demo
event-demo
sleep 1 s
sleep 2 s
sleep 3 s
sleep 4 s
sleep 5 s
wait event ok
libevent 多线程发送数据 bufferevent_write
update:2021-10-14
需要增加:
evthread_use_windows_threads();
evthread_use_pthreads();
evthread_make_base_notifiable(base);
struct bufferevent*bev = bufferevent_socket_new(base, -1,
BEV_OPT_CLOSE_ON_FREE | BEV_OPT_THREADSAFE );
增加 BEV_OPT_THREADSAFE 可能会更 可靠。
-levent -levent_pthreads
在使用了下面的代码时,会需要线程库
evthread_use_pthreads();
base = event_base_new();
evthread_make_base_notifiable(base);
libevent针对win32平台定义了evthread_use_windows_threads,
libevent针对Linux thread库 定义了evthread_use_pthreads
使用libevent时为了保证线程安全,提供了evthread_use_pthreads函数
他的内部是会分配内存的,但是没有对应的函数来反释放evthread_use_pthreads分配的内存,那么在如下的场景用evthread_use_pthreads就会造成内存泄露
libevent被编译为静态库然后被链接进了一个动态库A,我们在使用dlopen来加载静态库A,在使用时库A的内部是调用了evthread_use_pthreads的,会被分配内存出来,然后使用dlclose卸载掉库A,但是这里evthread_use_pthreads分配的内存并没有被释放掉!!!!然后又用dlopen来加载库A,这时其内部对evthread_use_pthreads的调用会不会分配新的内存,答案是会的,因为之前是整个把A卸载掉了。
如果不卸载掉A库,反复调用evthread_use_pthreads是不会造成泄露的,因为由全局变量在判别是否已经初始化了,但是卸载掉库后全局变量也就不存在了,再次加载全局变量依旧被初始化为0,evthread_use_pthreads会分配内存
当然,我们大多时候只会加载一次库
systemd journal之于systemd犹如syslog之于init,其日志文件保存在 /var/log/journal 目录下。随着时间的流逝,该目录下会积累大量日志文件,占用不少的磁盘空间。如果硬盘容量较小或可用空间紧张,可以考虑清理过期日志释放占用的空间。
本文介绍清理systemd日志的方法。
清理systemd日志
返回目录
清理之前,可查看一下systemd日志所占用的磁盘空间。既可以用常用的 du 命令:
sudo du -sh /var/log/journal/
但更推荐使用systemd日志管理专用命令 journalctl:
journalctl --disk-usage
知道了日志占用的磁盘空间,接下来便可以清理过期日志。开始之前,建议 rotate 当前日志(rotate是日志操作中的一个术语,其归档旧日志,后续日志写入新创建的日志文件中):
sudo journalctl --rotate
journalctl提供了三种清理systemd日志的方式。第一种是清理指定时间之前的日志:
sudo journalctl --vacuum-time=7d
sudo journactl --vacuum-time=2h
sudo journalctl --vacuum-time=10s
第二种是限制日志占用的空间大小:
sudo journalctl --vacuum-size=1G
sudo journalctl --vacuum-size=100M
第三种是保留日志文件个数:
sudo journalctl --vacuum-files=5
不知道 journalctl 管理日志功能之前,本人用过 find 配合 exec (或者管道加xargs)的土办法清理过期日志:
find /var/log/journal -mtime +7 -exec rm -rf {} ;
一劳永逸的办法
返回目录
上文介绍的清理systemd日志方法适合一次性手动管理,重复做就没意思了。一劳永逸的办法是配置systemd journal,让其自动管理日志,不占用过多磁盘空间。
方法是编辑 /etc/systemd/journald.conf 文件,对其中的参数进行设置。例如限制日志最大占用1G空间:
[Journal]
SystemMaxUse=1G
保存配置文件后记得重新加载:sudo systemctl restart systemd-journald
Referenced from:https://tlanyan.me/clear-systemd-journal-logs/