feat: logx support logs rotation based on size limitation. (#1652) (#2167)

* feat: logx support logs rotation based on size limitation. (#1652)

implementation of #1652

Totally compatible with the old logx.LogConf. No effect if users do not change their options.

* feat: logx support logs rotation based on size limitation. (#1652)

implementation of #1652

Totally compatible with the old logx.LogConf. No effect if users do not change their options.

* feat: logx support logs rotation based on size limitation. (#1652)

implementation of #1652

Totally compatible with the old logx.LogConf. No effect if users do not change their options.

* feat: logx support logs rotation based on size limitation. (#1652)

implementation of #1652

Totally compatible with the old logx.LogConf. No effect if users do not change their options.
master
SgtDaJim 2 years ago committed by GitHub
parent f630bc735b
commit 101304be53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,5 +1,12 @@
package logx
type LogRotationRuleType int
const (
LogRotationRuleTypeDaily LogRotationRuleType = iota
LogRotationRuleTypeSizeLimit
)
// A LogConf is a logging config.
type LogConf struct {
ServiceName string `json:",optional"`
@ -11,4 +18,16 @@ type LogConf struct {
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
// MaxBackups represents how many backup log files will be kept. 0 means all files will be kept forever.
// Only take effect when RotationRuleType is `LogRotationRuleTypeSizeLimit`
// NOTE: the level of option `KeepDays` will be higher. Even thougth `MaxBackups` sets 0, log files will
// still be removed if the `KeepDays` limitation is reached.
MaxBackups int `json:",default=0"`
// MaxSize represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`.
// Only take effect when RotationRuleType is `LogRotationRuleTypeSizeLimit`
MaxSize int `json:",default=0"`
// RotationRuleType represents the type of log rotation rule. Default is DailyRotateRule.
// 0: LogRotationRuleTypeDaily
// 1: LogRotationRuleTypeSizeLimit
RotationRuleType LogRotationRuleType `json:",default=0,options=[0,1]"`
}

@ -41,6 +41,9 @@ type (
gzipEnabled bool
logStackCooldownMills int
keepDays int
maxBackups int
maxSize int
rotationRule LogRotationRuleType
}
// LogField is a key-value pair that will be added to the log entry.
@ -294,13 +297,43 @@ func WithGzip() LogOption {
}
}
// WithMaxBackups customizes how many log files backups will be kept.
func WithMaxBackups(count int) LogOption {
return func(opts *logOptions) {
opts.maxBackups = count
}
}
// WithMaxSize customizes how much space the writing log file can take up.
func WithMaxSize(size int) LogOption {
return func(opts *logOptions) {
opts.maxSize = size
}
}
// WithLogRotationRuleType customizes which log rotation rule to use.
func WithLogRotationRuleType(r LogRotationRuleType) LogOption {
return func(opts *logOptions) {
opts.rotationRule = r
}
}
func createOutput(path string) (io.WriteCloser, error) {
if len(path) == 0 {
return nil, ErrLogPathNotSet
}
return NewLogger(path, DefaultRotateRule(path, backupFileDelimiter, options.keepDays,
options.gzipEnabled), options.gzipEnabled)
switch options.rotationRule {
case LogRotationRuleTypeDaily:
return NewLogger(path, DefaultRotateRule(path, backupFileDelimiter, options.keepDays,
options.gzipEnabled), options.gzipEnabled)
case LogRotationRuleTypeSizeLimit:
return NewLogger(path, NewSizeLimitRotateRule(path, backupFileDelimiter, options.keepDays,
options.maxSize, options.maxBackups, options.gzipEnabled), options.gzipEnabled)
default:
return NewLogger(path, DefaultRotateRule(path, backupFileDelimiter, options.keepDays,
options.gzipEnabled), options.gzipEnabled)
}
}
func errorAnySync(v interface{}) {

@ -8,15 +8,18 @@
```go
type LogConf struct {
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
MaxBackups int `json:",default=0"`
MaxSize int `json:",default=0"`
RotationRuleType LogRotationRuleType `json:",default=0,options=[0,1]"`
}
```
@ -37,6 +40,12 @@ type LogConf struct {
- `Compress`: 是否压缩日志文件,只在 `file` 模式下工作
- `KeepDays`:日志文件被保留多少天,在给定的天数之后,过期的文件将被自动删除。对 `console` 模式没有影响
- `StackCooldownMillis`:多少毫秒后再次写入堆栈跟踪。用来避免堆栈跟踪日志过多
- `MaxBackups`: 多少个日志文件备份将被保存。0代表所有备份都被保存。当`RotationRuleType`被设置为`LogRotationRuleTypeSizeLimit`时才会起作用。注意:`KeepDays`选项的优先级会比`MaxBackups`高,即使`MaxBackups`被设置为0当达到`KeepDays`上限时备份文件同样会被删除。
- `MaxSize`: 当前被写入的日志文件最大可占用多少空间。0代表没有上限。单位为`MB`。当`RotationRuleType`被设置为`LogRotationRuleTypeSizeLimit`时才会起作用。
- `RotationRuleType`: 日志轮转策略类型。默认为`LogRotationRuleTypeDaily`按天轮转整形数值0
- `LogRotationRuleTypeDaily`整形数值0: 按天轮转。
- `LogRotationRuleTypeSizeLimit`整形数值1: 按日志大小轮转。
## 打印日志方法

@ -8,15 +8,18 @@ English | [简体中文](readme-cn.md)
```go
type LogConf struct {
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
MaxBackups int `json:",default=0"`
MaxSize int `json:",default=0"`
RotationRuleType LogRotationRuleType `json:",default=0,options=[0,1]"`
}
```
@ -37,6 +40,11 @@ type LogConf struct {
- `Compress`: whether or not to compress log files, only works with `file` mode.
- `KeepDays`: how many days that the log files are kept, after the given days, the outdated files will be deleted automatically. It has no effect on `console` mode.
- `StackCooldownMillis`: how many milliseconds to rewrite stacktrace again. Its used to avoid stacktrace flooding.
- `MaxBackups`: represents how many backup log files will be kept. 0 means all files will be kept forever. Only take effect when RotationRuleType is `LogRotationRuleTypeSizeLimit`. NOTE: the level of option `KeepDays` will be higher. Even thougth `MaxBackups` sets 0, log files will still be removed if the `KeepDays` limitation is reached.
- `MaxSize`: represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`. Only take effect when RotationRuleType is `LogRotationRuleTypeSizeLimit`.
- `RotationRuleType`: represents the type of log rotation rule. Default is LogRotationRuleTypeDaily (int value 0).
- `LogRotationRuleTypeDaily` (int value 0): rotate the logs by day.
- `LogRotationRuleTypeSizeLimit` (int value 1): rotate the logs by size of logs.
## Logging methods

@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
@ -18,11 +19,14 @@ import (
)
const (
dateFormat = "2006-01-02"
hoursPerDay = 24
bufferSize = 100
defaultDirMode = 0o755
defaultFileMode = 0o600
rfc3339DateFormat = time.RFC3339
dateFormat = "2006-01-02"
hoursPerDay = 24
bufferSize = 100
defaultDirMode = 0o755
defaultFileMode = 0o600
gzipExt = ".gz"
megabyte = 1024 * 1024
)
// ErrLogFileClosed is an error that indicates the log file is already closed.
@ -34,7 +38,7 @@ type (
BackupFileName() string
MarkRotated()
OutdatedFiles() []string
ShallRotate() bool
ShallRotate(currentSize, writeLen int) bool
}
// A RotateLogger is a Logger that can rotate log files with given rules.
@ -49,6 +53,8 @@ type (
// can't use threading.RoutineGroup because of cycle import
waitGroup sync.WaitGroup
closeOnce sync.Once
currentSize int
}
// A DailyRotateRule is a rule to daily rotate the log files.
@ -59,6 +65,13 @@ type (
days int
gzip bool
}
// SizeLimitRotateRule a rotation rule that make the log file rotated base on size
SizeLimitRotateRule struct {
DailyRotateRule
maxSize int
maxBackups int
}
)
// DefaultRotateRule is a default log rotating rule, currently DailyRotateRule.
@ -90,7 +103,7 @@ func (r *DailyRotateRule) OutdatedFiles() []string {
var pattern string
if r.gzip {
pattern = fmt.Sprintf("%s%s*.gz", r.filename, r.delimiter)
pattern = fmt.Sprintf("%s%s*%s", r.filename, r.delimiter, gzipExt)
} else {
pattern = fmt.Sprintf("%s%s*", r.filename, r.delimiter)
}
@ -105,7 +118,7 @@ func (r *DailyRotateRule) OutdatedFiles() []string {
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat)
fmt.Fprintf(&buf, "%s%s%s", r.filename, r.delimiter, boundary)
if r.gzip {
buf.WriteString(".gz")
buf.WriteString(gzipExt)
}
boundaryFile := buf.String()
@ -120,10 +133,100 @@ func (r *DailyRotateRule) OutdatedFiles() []string {
}
// ShallRotate checks if the file should be rotated.
func (r *DailyRotateRule) ShallRotate() bool {
func (r *DailyRotateRule) ShallRotate(currentSize, writeLen int) bool {
return len(r.rotatedTime) > 0 && getNowDate() != r.rotatedTime
}
// NewSizeLimitRotateRule returns the rotation rule with size limit
func NewSizeLimitRotateRule(filename, delimiter string, days, maxSize, maxBackups int, gzip bool) RotateRule {
return &SizeLimitRotateRule{
DailyRotateRule: DailyRotateRule{
rotatedTime: getNowDateInRFC3339Format(),
filename: filename,
delimiter: delimiter,
days: days,
gzip: gzip,
},
maxSize: maxSize,
maxBackups: maxBackups,
}
}
func (r *SizeLimitRotateRule) ShallRotate(currentSize, writeLen int) bool {
return r.maxSize > 0 && r.maxSize*megabyte < currentSize+writeLen
}
func (r *SizeLimitRotateRule) parseFilename(file string) (dir, logname, ext, prefix string) {
dir = filepath.Dir(r.filename)
logname = filepath.Base(r.filename)
ext = filepath.Ext(r.filename)
prefix = logname[:len(logname)-len(ext)]
return
}
func (r *SizeLimitRotateRule) BackupFileName() string {
dir := filepath.Dir(r.filename)
_, _, ext, prefix := r.parseFilename(r.filename)
timestamp := getNowDateInRFC3339Format()
return filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, timestamp, ext))
}
func (r *SizeLimitRotateRule) MarkRotated() {
r.rotatedTime = getNowDateInRFC3339Format()
}
func (r *SizeLimitRotateRule) OutdatedFiles() []string {
var pattern string
dir, _, ext, prefix := r.parseFilename(r.filename)
if r.gzip {
pattern = fmt.Sprintf("%s%s%s%s*%s%s", dir, string(filepath.Separator), prefix, r.delimiter, ext, gzipExt)
} else {
pattern = fmt.Sprintf("%s%s%s%s*%s", dir, string(filepath.Separator), prefix, r.delimiter, ext)
}
files, err := filepath.Glob(pattern)
if err != nil {
fmt.Printf("failed to delete outdated log files, error: %s\n", err)
Errorf("failed to delete outdated log files, error: %s", err)
return nil
}
sort.Strings(files)
outdated := make(map[string]lang.PlaceholderType)
// test if too many backups
if r.maxBackups > 0 && len(files) > r.maxBackups {
for _, f := range files[:len(files)-r.maxBackups] {
outdated[f] = lang.Placeholder
}
files = files[len(files)-r.maxBackups:]
}
// test if any too old backups
if r.days > 0 {
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(rfc3339DateFormat)
bf := filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, boundary, ext))
if r.gzip {
bf += gzipExt
}
for _, f := range files {
if f < bf {
outdated[f] = lang.Placeholder
} else {
// Becase the filenames are sorted. No need to keep looping after the first ineligible item showing up.
break
}
}
}
var result []string
for k := range outdated {
result = append(result, k)
}
return result
}
// NewLogger returns a RotateLogger with given filename and rule, etc.
func NewLogger(filename string, rule RotateRule, compress bool) (*RotateLogger, error) {
l := &RotateLogger{
@ -282,15 +385,17 @@ func (l *RotateLogger) startWorker() {
}
func (l *RotateLogger) write(v []byte) {
if l.rule.ShallRotate() {
if l.rule.ShallRotate(l.currentSize, len(v)) {
if err := l.rotate(); err != nil {
log.Println(err)
} else {
l.rule.MarkRotated()
l.currentSize = 0
}
}
if l.fp != nil {
l.fp.Write(v)
l.currentSize += len(v)
}
}
@ -308,6 +413,10 @@ func getNowDate() string {
return time.Now().Format(dateFormat)
}
func getNowDateInRFC3339Format() string {
return time.Now().Format(rfc3339DateFormat)
}
func gzipFile(file string) error {
in, err := os.Open(file)
if err != nil {
@ -315,7 +424,7 @@ func gzipFile(file string) error {
}
defer in.Close()
out, err := os.Create(fmt.Sprintf("%s.gz", file))
out, err := os.Create(fmt.Sprintf("%s%s", file, gzipExt))
if err != nil {
return err
}

@ -29,7 +29,34 @@ func TestDailyRotateRuleOutdatedFiles(t *testing.T) {
func TestDailyRotateRuleShallRotate(t *testing.T) {
var rule DailyRotateRule
rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(dateFormat)
assert.True(t, rule.ShallRotate())
assert.True(t, rule.ShallRotate(0, 0))
}
func TestSizeLimitRotateRuleMarkRotated(t *testing.T) {
var rule SizeLimitRotateRule
rule.MarkRotated()
assert.Equal(t, getNowDateInRFC3339Format(), rule.rotatedTime)
}
func TestSizeLimitRotateRuleOutdatedFiles(t *testing.T) {
var rule SizeLimitRotateRule
assert.Empty(t, rule.OutdatedFiles())
rule.days = 1
assert.Empty(t, rule.OutdatedFiles())
rule.gzip = true
assert.Empty(t, rule.OutdatedFiles())
rule.maxBackups = 0
assert.Empty(t, rule.OutdatedFiles())
}
func TestSizeLimitRotateRuleShallRotate(t *testing.T) {
var rule SizeLimitRotateRule
rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(rfc3339DateFormat)
rule.maxSize = 0
assert.False(t, rule.ShallRotate(0, 0))
rule.maxSize = 100
assert.False(t, rule.ShallRotate(0, 0))
assert.True(t, rule.ShallRotate(99*megabyte, 2*megabyte))
}
func TestRotateLoggerClose(t *testing.T) {
@ -142,3 +169,162 @@ func TestRotateLoggerWrite(t *testing.T) {
func TestLogWriterClose(t *testing.T) {
assert.Nil(t, newLogWriter(nil).Close())
}
func TestRotateLoggerWithSizeLimitRotateRuleClose(t *testing.T) {
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
if len(filename) > 0 {
defer os.Remove(filename)
}
logger, err := NewLogger(filename, new(SizeLimitRotateRule), false)
assert.Nil(t, err)
assert.Nil(t, logger.Close())
}
func TestRotateLoggerGetBackupWithSizeLimitRotateRuleFilename(t *testing.T) {
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
if len(filename) > 0 {
defer os.Remove(filename)
}
logger, err := NewLogger(filename, new(SizeLimitRotateRule), false)
assert.Nil(t, err)
assert.True(t, len(logger.getBackupFilename()) > 0)
logger.backup = ""
assert.True(t, len(logger.getBackupFilename()) > 0)
}
func TestRotateLoggerWithSizeLimitRotateRuleMayCompressFile(t *testing.T) {
old := os.Stdout
os.Stdout = os.NewFile(0, os.DevNull)
defer func() {
os.Stdout = old
}()
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
if len(filename) > 0 {
defer os.Remove(filename)
}
logger, err := NewLogger(filename, new(SizeLimitRotateRule), false)
assert.Nil(t, err)
logger.maybeCompressFile(filename)
_, err = os.Stat(filename)
assert.Nil(t, err)
}
func TestRotateLoggerWithSizeLimitRotateRuleMayCompressFileTrue(t *testing.T) {
old := os.Stdout
os.Stdout = os.NewFile(0, os.DevNull)
defer func() {
os.Stdout = old
}()
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
logger, err := NewLogger(filename, new(SizeLimitRotateRule), true)
assert.Nil(t, err)
if len(filename) > 0 {
defer os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
}
logger.maybeCompressFile(filename)
_, err = os.Stat(filename)
assert.NotNil(t, err)
}
func TestRotateLoggerWithSizeLimitRotateRuleRotate(t *testing.T) {
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
logger, err := NewLogger(filename, new(SizeLimitRotateRule), true)
assert.Nil(t, err)
if len(filename) > 0 {
defer func() {
os.Remove(logger.getBackupFilename())
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
}()
}
err = logger.rotate()
switch v := err.(type) {
case *os.LinkError:
// avoid rename error on docker container
assert.Equal(t, syscall.EXDEV, v.Err)
case *os.PathError:
// ignore remove error for tests,
// files are cleaned in GitHub actions.
assert.Equal(t, "remove", v.Op)
default:
assert.Nil(t, err)
}
}
func TestRotateLoggerWithSizeLimitRotateRuleWrite(t *testing.T) {
filename, err := fs.TempFilenameWithText("foo")
assert.Nil(t, err)
rule := new(SizeLimitRotateRule)
logger, err := NewLogger(filename, rule, true)
assert.Nil(t, err)
if len(filename) > 0 {
defer func() {
os.Remove(logger.getBackupFilename())
os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz")
}()
}
// the following write calls cannot be changed to Write, because of DATA RACE.
logger.write([]byte(`foo`))
rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat)
logger.write([]byte(`bar`))
logger.Close()
logger.write([]byte(`baz`))
}
func BenchmarkRotateLogger(b *testing.B) {
filename := "./test.log"
filename2 := "./test2.log"
dailyRotateRuleLogger, err1 := NewLogger(
filename,
DefaultRotateRule(
filename,
backupFileDelimiter,
1,
true,
),
true,
)
if err1 != nil {
b.Logf("Failed to new daily rotate rule logger: %v", err1)
b.FailNow()
}
sizeLimitRotateRuleLogger, err2 := NewLogger(
filename2,
NewSizeLimitRotateRule(
filename,
backupFileDelimiter,
1,
100,
10,
true,
),
true,
)
if err2 != nil {
b.Logf("Failed to new size limit rotate rule logger: %v", err1)
b.FailNow()
}
defer func() {
dailyRotateRuleLogger.Close()
sizeLimitRotateRuleLogger.Close()
os.Remove(filename)
os.Remove(filename2)
}()
b.Run("daily rotate rule", func(b *testing.B) {
for i := 0; i < b.N; i++ {
dailyRotateRuleLogger.write([]byte("testing\ntesting\n"))
}
})
b.Run("size limit rotate rule", func(b *testing.B) {
for i := 0; i < b.N; i++ {
sizeLimitRotateRuleLogger.write([]byte("testing\ntesting\n"))
}
})
}

@ -63,15 +63,15 @@ func (w *atomicWriter) Load() Writer {
func (w *atomicWriter) Store(v Writer) {
w.lock.Lock()
defer w.lock.Unlock()
w.writer = v
w.lock.Unlock()
}
func (w *atomicWriter) Swap(v Writer) Writer {
w.lock.Lock()
defer w.lock.Unlock()
old := w.writer
w.writer = v
w.lock.Unlock()
return old
}
@ -109,6 +109,14 @@ func newFileWriter(c LogConf) (Writer, error) {
if c.KeepDays > 0 {
opts = append(opts, WithKeepDays(c.KeepDays))
}
if c.MaxBackups > 0 {
opts = append(opts, WithMaxBackups(c.MaxBackups))
}
if c.MaxSize > 0 {
opts = append(opts, WithMaxSize(c.MaxSize))
}
opts = append(opts, WithLogRotationRuleType(c.RotationRuleType))
accessFile := path.Join(c.Path, accessFilename)
errorFile := path.Join(c.Path, errorFilename)

Loading…
Cancel
Save