This commit is contained in:
zhaoxiaorong
2025-02-07 13:01:38 +08:00
parent ebcdfe1ee8
commit 57a0d8ae81
52 changed files with 3313 additions and 0 deletions

View File

@@ -0,0 +1,220 @@
package elastic
import (
"bytes"
"context"
"encoding/json"
"log"
"sync/atomic"
"time"
"git.apinb.com/bsm-sdk/core/vars"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/esapi"
"github.com/elastic/go-elasticsearch/v8/esutil"
)
type ES struct {
Client *elasticsearch.Client
}
func NewElastic(endpoints []string, username, password string) (*ES, error) {
cfg := elasticsearch.Config{
Addresses: endpoints,
Username: username,
Password: password,
}
var err error
client, err := elasticsearch.NewClient(cfg)
if err != nil {
return nil, err
}
return &ES{
Client: client,
}, nil
}
// idx 为空,默认随机唯一字符串
//
// doc := map[string]interface{}{
// "title": "中国",
// "content": "中国早日统一台湾",
// "time": time.Now().Unix(),
// "date": time.Now(),
// }
func (es *ES) CreateDocument(index string, id string, doc *interface{}) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(doc); err != nil {
log.Println("Elastic NewEncoder:", err)
}
// Set up the request object.
req := esapi.IndexRequest{
Index: index,
DocumentID: id,
Body: &buf,
Refresh: "true",
}
// Perform the request with the client.
res, err := req.Do(context.Background(), es.Client)
if err != nil {
log.Println("Elastic Error:", res.String())
}
defer res.Body.Close()
if res.IsError() {
log.Println("Elastic Error:", res.String())
}
}
// 批量写入文档。
// Action field configures the operation to perform (index, create, delete, update)
// create 如果文档不存在就创建,但如果文档存在就返回错误
// index 如果文档不存在就创建,如果文档存在就更新
// update 更新一个文档,如果文档不存在就返回错误
// delete 删除一个文档如果要删除的文档id不存在就返回错误
func (es *ES) Batch(index string, documens []map[string]interface{}, action string) {
log.SetFlags(0)
var (
countSuccessful uint64
err error
)
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
//
// Create the BulkIndexer
//
// NOTE: For optimal performance, consider using a third-party JSON decoding package.
// See an example in the "benchmarks" folder.
//
bi, err := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: index, // The default index name
Client: es.Client, // The Elasticsearch client
NumWorkers: vars.ESNumWorkers, // The number of worker goroutines
FlushBytes: vars.ESFlushBytes, // The flush threshold in bytes
FlushInterval: 30 * time.Second, // The periodic flush interval
})
if err != nil {
log.Fatalf("Error creating the indexer: %s", err)
}
for _, doc := range documens {
id := doc["id"].(string)
// Prepare the data payload: encode article to JSON
data, err := json.Marshal(doc)
if err != nil {
log.Fatalf("Cannot encode documen %s: %s", id, err)
}
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
//
// Add an item to the BulkIndexer
//
err = bi.Add(
context.Background(),
esutil.BulkIndexerItem{
Action: action,
// DocumentID is the (optional) document ID
DocumentID: id,
// Body is an `io.Reader` with the payload
Body: bytes.NewReader(data),
// OnSuccess is called for each successful operation
OnSuccess: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem) {
atomic.AddUint64(&countSuccessful, 1)
},
// OnFailure is called for each failed operation
OnFailure: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem, err error) {
if err != nil {
log.Printf("ERROR: %s", err)
} else {
log.Printf("ERROR: %s: %s", res.Error.Type, res.Error.Reason)
}
},
},
)
if err != nil {
log.Printf("Unexpected error: %s", err)
}
// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
}
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// Close the indexer
//
if err := bi.Close(context.Background()); err != nil {
log.Printf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumFailed > 0 {
log.Printf("Indexed [%d] documents with [%d] errors", stats.NumFlushed, stats.NumFailed)
} else {
log.Printf("Successfully indexed [%d] documents", stats.NumFlushed)
}
}
func (es *ES) Search(index string, query map[string]interface{}) (res *esapi.Response, err error) {
var buf bytes.Buffer
if err = json.NewEncoder(&buf).Encode(query); err != nil {
return
}
// Perform the search request.
res, err = es.Client.Search(
es.Client.Search.WithContext(context.Background()),
es.Client.Search.WithIndex(index),
es.Client.Search.WithBody(&buf),
es.Client.Search.WithTrackTotalHits(true),
es.Client.Search.WithFrom(0),
es.Client.Search.WithSize(10),
es.Client.Search.WithSort("time:desc"),
es.Client.Search.WithPretty(),
)
if err != nil {
return
}
defer res.Body.Close()
return
}
// 删除 index 根据 索引名 id
func (es *ES) Delete(index, idx string) (res *esapi.Response, err error) {
res, err = es.Client.Delete(
index, // Index name
idx, // Document ID
es.Client.Delete.WithRefresh("true"),
)
if err != nil {
return
}
defer res.Body.Close()
return
}
func (es *ES) DeleteByQuery(index []string, query map[string]interface{}) (res *esapi.Response, err error) {
var buf bytes.Buffer
if err = json.NewEncoder(&buf).Encode(query); err != nil {
return
}
// Perform the search request.
res, err = es.Client.DeleteByQuery(
index,
&buf,
)
if err != nil {
return
}
defer res.Body.Close()
return
}

View File

@@ -0,0 +1,97 @@
fork from https://github.com/liyuan1125/gorm-cache
```go
package main
import (
"context"
"fmt"
"github.com/go-redis/redis/v8"
"github.com/liyuan1125/gorm-cache"
redis2 "github.com/liyuan1125/gorm-cache/store/redis"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"os"
"time"
)
var (
db *gorm.DB
redisClient *redis.Client
cachePlugin *cache.Cache
)
func newDb() {
dsn := "root:123456@tcp(127.0.0.1:3306)/gorm?charset=utf8&parseTime=True&loc=Local"
var err error
db, err = gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
fmt.Println(err.Error())
return
}
redisClient = redis.NewClient(&redis.Options{Addr: ":6379"})
cacheConfig := &cache.Config{
Store: redis2.NewWithDb(redisClient), // OR redis2.New(&redis.Options{Addr:"6379"})
Serializer: &cache.DefaultJSONSerializer{},
}
cachePlugin = cache.New(cacheConfig)
if err = db.Use(cachePlugin); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
func basic() {
var username string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("username", &username)
fmt.Println(username)
// output gorm
}
func customKey() {
var nickname string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
ctx = cache.NewKey(ctx, "nickname")
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("nickname", &nickname)
fmt.Println(nickname)
// output gormwithmysql
}
func useTag() {
var nickname string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
ctx = cache.NewTag(ctx, "users")
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("nickname", &nickname)
fmt.Println(nickname)
// output gormwithmysql
}
func main() {
newDb()
basic()
customKey()
useTag()
ctx := context.Background()
fmt.Println(redisClient.Keys(ctx, "*").Val())
fmt.Println(cachePlugin.RemoveFromTag(ctx, "users"))
}
```

View File

@@ -0,0 +1,200 @@
package cache
import (
"context"
"hash/fnv"
"os"
"strconv"
"time"
"gorm.io/gorm"
"gorm.io/gorm/callbacks"
)
type Config struct {
Store Store
Prefix string
Serializer Serializer
}
type (
Serializer interface {
Serialize(v any) ([]byte, error)
Deserialize(data []byte, v any) error
}
Store interface {
// Set 写入缓存数据
Set(ctx context.Context, key string, value any, ttl time.Duration) error
// Get 获取缓存数据
Get(ctx context.Context, key string) ([]byte, error)
// SaveTagKey 将缓存key写入tag
SaveTagKey(ctx context.Context, tag, key string) error
// RemoveFromTag 根据缓存tag删除缓存
RemoveFromTag(ctx context.Context, tag string) error
}
)
type Cache struct {
store Store
// Serializer 序列化
Serializer Serializer
// prefix 缓存前缀
prefix string
}
// New
// @param conf
// @date 2022-07-02 08:09:52
func New(conf *Config) *Cache {
if conf.Store == nil {
os.Exit(1)
}
if conf.Serializer == nil {
conf.Serializer = &DefaultJSONSerializer{}
}
return &Cache{
store: conf.Store,
prefix: conf.Prefix,
Serializer: conf.Serializer,
}
}
// Name
// @date 2022-07-02 08:09:48
func (p *Cache) Name() string {
return "gorm:cache"
}
// Initialize
// @param tx
// @date 2022-07-02 08:09:47
func (p *Cache) Initialize(tx *gorm.DB) error {
return tx.Callback().Query().Replace("gorm:query", p.Query)
}
// generateKey
// @param key
// @date 2022-07-02 08:09:46
func generateKey(key string) string {
hash := fnv.New64a()
_, _ = hash.Write([]byte(key))
return strconv.FormatUint(hash.Sum64(), 36)
}
// Query
// @param tx
// @date 2022-07-02 08:09:38
func (p *Cache) Query(tx *gorm.DB) {
ctx := tx.Statement.Context
var ttl time.Duration
var hasTTL bool
if ttl, hasTTL = FromExpiration(ctx); !hasTTL {
callbacks.Query(tx)
return
}
var (
key string
hasKey bool
)
// 调用 Gorm的方法生产SQL
callbacks.BuildQuerySQL(tx)
// 是否有自定义key
if key, hasKey = FromKey(ctx); !hasKey {
key = p.prefix + generateKey(tx.Statement.SQL.String())
}
// 查询缓存数据
if err := p.QueryCache(ctx, key, tx.Statement.Dest); err == nil {
return
}
// 查询数据库
p.QueryDB(tx)
if tx.Error != nil {
return
}
// 写入缓存
if err := p.SaveCache(ctx, key, tx.Statement.Dest, ttl); err != nil {
tx.Logger.Error(ctx, err.Error())
return
}
if tag, hasTag := FromTag(ctx); hasTag {
_ = p.store.SaveTagKey(ctx, tag, key)
}
}
// QueryDB 查询数据库数据
// 这里重写Query方法 是不想执行 callbacks.BuildQuerySQL 两遍
func (p *Cache) QueryDB(tx *gorm.DB) {
if tx.Error != nil || tx.DryRun {
return
}
rows, err := tx.Statement.ConnPool.QueryContext(tx.Statement.Context, tx.Statement.SQL.String(), tx.Statement.Vars...)
if err != nil {
_ = tx.AddError(err)
return
}
defer func() {
_ = tx.AddError(rows.Close())
}()
gorm.Scan(rows, tx, 0)
}
// QueryCache 查询缓存数据
// @param ctx
// @param key
// @param dest
func (p *Cache) QueryCache(ctx context.Context, key string, dest any) error {
values, err := p.store.Get(ctx, key)
if err != nil {
return err
}
switch dest.(type) {
case *int64:
dest = 0
}
return p.Serializer.Deserialize(values, dest)
}
// SaveCache 写入缓存数据
func (p *Cache) SaveCache(ctx context.Context, key string, dest any, ttl time.Duration) error {
values, err := p.Serializer.Serialize(dest)
if err != nil {
return err
}
return p.store.Set(ctx, key, values, ttl)
}
// RemoveFromTag 根据tag删除缓存数据
// @param ctx
// @param tag
// @date 2022-07-02 08:08:59
func (p *Cache) RemoveFromTag(ctx context.Context, tag string) error {
return p.store.RemoveFromTag(ctx, tag)
}

View File

@@ -0,0 +1,88 @@
package cache
import (
"context"
"time"
)
type (
// queryCacheCtx
queryCacheCtx struct{}
// queryCacheKeyCtx
queryCacheKeyCtx struct{}
// queryCacheTagCtx
queryCacheTagCtx struct{}
)
// NewKey
// @param ctx
// @param key
// @date 2022-07-02 08:11:44
func NewKey(ctx context.Context, key string) context.Context {
return context.WithValue(ctx, queryCacheKeyCtx{}, key)
}
// NewTag
// @param ctx
// @param key
// @date 2022-07-02 08:11:43
func NewTag(ctx context.Context, key string) context.Context {
return context.WithValue(ctx, queryCacheTagCtx{}, key)
}
// NewExpiration
// @param ctx
// @param ttl
// @date 2022-07-02 08:11:41
func NewExpiration(ctx context.Context, ttl time.Duration) context.Context {
return context.WithValue(ctx, queryCacheCtx{}, ttl)
}
// FromExpiration
// @param ctx
// @date 2022-07-02 08:11:40
func FromExpiration(ctx context.Context) (time.Duration, bool) {
value := ctx.Value(queryCacheCtx{})
if value != nil {
if t, ok := value.(time.Duration); ok {
return t, true
}
}
return 0, false
}
// FromKey
// @param ctx
// @date 2022-07-02 08:11:39
func FromKey(ctx context.Context) (string, bool) {
value := ctx.Value(queryCacheKeyCtx{})
if value != nil {
if t, ok := value.(string); ok {
return t, true
}
}
return "", false
}
// FromTag
// @param ctx
// @date 2022-07-02 08:11:37
func FromTag(ctx context.Context) (string, bool) {
value := ctx.Value(queryCacheTagCtx{})
if value != nil {
if t, ok := value.(string); ok {
return t, true
}
}
return "", false
}

View File

@@ -0,0 +1,11 @@
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas=
github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
gorm.io/driver/mysql v1.3.4 h1:/KoBMgsUHC3bExsekDcmNYaBnfH2WNeFuXqqrqMc98Q=
gorm.io/driver/mysql v1.3.4/go.mod h1:s4Tq0KmD0yhPGHbZEwg1VPlH0vT/GBHJZorPzhcxBUE=
gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.23.6 h1:KFLdNgri4ExFFGTRGGFWON2P1ZN28+9SJRN8voOoYe0=
gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=

View File

@@ -0,0 +1,92 @@
package main
import (
"context"
"fmt"
"github.com/go-redis/redis/v8"
"github.com/liyuan1125/gorm-cache"
redis2 "github.com/liyuan1125/gorm-cache/store/redis"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"os"
"time"
)
var (
db *gorm.DB
redisClient *redis.Client
cachePlugin *cache.Cache
)
func newDb() {
dsn := "root:123456@tcp(127.0.0.1:3306)/gorm?charset=utf8&parseTime=True&loc=Local"
var err error
db, err = gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
fmt.Println(err.Error())
return
}
redisClient = redis.NewClient(&redis.Options{Addr: ":6379"})
cacheConfig := &cache.Config{
Store: redis2.NewWithDb(redisClient), // OR redis2.New(&redis.Options{Addr:"6379"})
Serializer: &cache.DefaultJSONSerializer{},
}
cachePlugin = cache.New(cacheConfig)
if err = db.Use(cachePlugin); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
func basic() {
var username string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("username", &username)
fmt.Println(username)
// output gorm
}
func customKey() {
var nickname string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
ctx = cache.NewKey(ctx, "nickname")
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("nickname", &nickname)
fmt.Println(nickname)
// output gormwithmysql
}
func useTag() {
var nickname string
ctx := context.Background()
ctx = cache.NewExpiration(ctx, time.Hour)
ctx = cache.NewTag(ctx, "users")
db.Table("users").WithContext(ctx).Where("id = 1").Limit(1).Pluck("nickname", &nickname)
fmt.Println(nickname)
// output gormwithmysql
}
func main() {
newDb()
basic()
customKey()
useTag()
ctx := context.Background()
fmt.Println(redisClient.Keys(ctx, "*").Val())
fmt.Println(cachePlugin.RemoveFromTag(ctx, "users"))
}

View File

@@ -0,0 +1,23 @@
package cache
import (
"encoding/json"
)
type DefaultJSONSerializer struct{}
// Serialize
// @param v
// @date 2022-07-02 08:12:26
func (d *DefaultJSONSerializer) Serialize(v any) ([]byte, error) {
return json.Marshal(v)
}
// Deserialize
// @param data
// @param v
// @date 2022-07-02 08:12:25
func (d *DefaultJSONSerializer) Deserialize(data []byte, v any) error {
return json.Unmarshal(data, v)
}

View File

@@ -0,0 +1,68 @@
package redis
import (
"context"
"time"
"github.com/redis/go-redis/v9"
)
type Store struct {
store *redis.Client
}
// New
// @param conf
// @date 2022-07-02 08:12:14
func New(conf *redis.Options) *Store {
cli := redis.NewClient(conf)
return &Store{store: cli}
}
// NewWithDb
// @param tx
// @date 2022-07-02 08:12:12
func NewWithDb(tx *redis.Client) *Store {
return &Store{store: tx}
}
// Set
// @param ctx
// @param key
// @param value
// @param ttl
// @date 2022-07-02 08:12:11
func (r *Store) Set(ctx context.Context, key string, value any, ttl time.Duration) error {
return r.store.Set(ctx, key, value, ttl).Err()
}
// Get
// @param ctx
// @param key
// @date 2022-07-02 08:12:09
func (r *Store) Get(ctx context.Context, key string) ([]byte, error) {
return r.store.Get(ctx, key).Bytes()
}
// RemoveFromTag
// @param ctx
// @param tag
// @date 2022-07-02 08:12:08
func (r *Store) RemoveFromTag(ctx context.Context, tag string) error {
keys, err := r.store.SMembers(ctx, tag).Result()
if err != nil {
return err
}
return r.store.Del(ctx, keys...).Err()
}
// SaveTagKey
// @param ctx
// @param tag
// @param key
// @date 2022-07-02 08:12:05
func (r *Store) SaveTagKey(ctx context.Context, tag, key string) error {
return r.store.SAdd(ctx, tag, key).Err()
}

66
database/kv/pebble.go Normal file
View File

@@ -0,0 +1,66 @@
package kv
import (
"github.com/cockroachdb/pebble"
)
var Impl *KvImpl
type KvImpl struct {
PebbleDB *pebble.DB
}
func NewPebble(datadir string) *KvImpl {
db, err := pebble.Open(datadir, &pebble.Options{})
if err != nil {
panic(err)
}
return &KvImpl{
PebbleDB: db,
}
}
func (db *KvImpl) PebbleSet(key, val string) error {
return db.PebbleDB.Set([]byte(key), []byte(val), pebble.Sync)
}
func (db *KvImpl) PebbleGet(key string) ([]byte, error) {
value, _, err := db.PebbleDB.Get([]byte(key))
if err != nil {
return nil, err
}
return value, nil
}
func (db *KvImpl) PebbleFetch(prefixKey string) (result map[string]string, err error) {
keyUpperBound := func(b []byte) []byte {
end := make([]byte, len(b))
copy(end, b)
for i := len(end) - 1; i >= 0; i-- {
end[i] = end[i] + 1
if end[i] != 0 {
return end[:i+1]
}
}
return nil // no upper-bound
}
prefixIterOptions := func(prefix []byte) *pebble.IterOptions {
return &pebble.IterOptions{
LowerBound: prefix,
UpperBound: keyUpperBound(prefix),
}
}
iter, err := db.PebbleDB.NewIter(prefixIterOptions([]byte(prefixKey)))
if err != nil {
return nil, err
}
for iter.First(); iter.Valid(); iter.Next() {
result[string(iter.Key())] = string(iter.Value())
}
return
}

30
database/sql/ext.go Normal file
View File

@@ -0,0 +1,30 @@
package sql
import "strings"
// key,value To like sql
func Like(key, val string) string {
if val == "" {
return ""
}
key = strings.TrimSpace(key)
val = strings.TrimSpace(val)
return key + " LIKE '%" + val + "%'"
}
// map strings to like sqls
func Likes(in map[string]string) string {
var ar []string
for key, val := range in {
sql := Like(key, val)
if sql != "" {
ar = append(ar, sql)
}
}
if len(ar) == 0 {
return ""
}
return strings.Join(ar, " AND ")
}

View File

@@ -0,0 +1,54 @@
package sql
import (
"git.apinb.com/bsm-sdk/core/types"
"git.apinb.com/bsm-sdk/core/vars"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)
// new grom db.
func NewPostgreSql(dsn string, options *types.SqlOptions) (*gorm.DB, error) {
var err error
//set connection default val.
if options == nil {
options = &types.SqlOptions{
MaxIdleConns: vars.SqlOptionMaxIdleConns,
MaxOpenConns: vars.SqlOptionMaxIdleConns,
ConnMaxLifetime: vars.SqlOptionConnMaxLifetime,
LogStdout: false,
Debug: true,
}
}
gormDb, err := gorm.Open(postgres.New(postgres.Config{
DSN: dsn,
// PreferSimpleProtocol: true, disables implicit prepared statement usage
}), &gorm.Config{
//Logger:newLogger,
DisableForeignKeyConstraintWhenMigrating: true,
NamingStrategy: schema.NamingStrategy{
SingularTable: true, // 使用单数表名,启用该选项,此时,`User` 的表名应该是 `t_user`
}})
if err != nil {
return nil, err
}
if options.Debug {
gormDb = gormDb.Debug()
}
// 获取通用数据库对象 sql.DB ,然后使用其提供的功能
sqlDB, _ := gormDb.DB()
// SetMaxIdleConns 用于设置连接池中空闲连接的最大数量。
sqlDB.SetMaxIdleConns(options.MaxIdleConns)
// SetMaxOpenConns 设置打开数据库连接的最大数量。
sqlDB.SetMaxOpenConns(options.MaxOpenConns)
// SetConnMaxLifetime 设置了连接可复用的最大时间。
sqlDB.SetConnMaxLifetime(options.ConnMaxLifetime)
return gormDb, nil
}