chore: add dbfs to migration

This commit is contained in:
Jason Song 2022-11-29 17:19:26 +08:00
parent c1fce2cf7d
commit 8f2efdaf60
No known key found for this signature in database
GPG Key ID: 8402EEEE4511A8B5
3 changed files with 39 additions and 19 deletions

View File

@ -37,7 +37,7 @@ type file struct {
var _ File = (*file)(nil) var _ File = (*file)(nil)
func (f *file) readAt(fileMeta *FileMeta, offset int64, p []byte) (n int, err error) { func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err error) {
if offset >= fileMeta.FileSize { if offset >= fileMeta.FileSize {
return 0, io.EOF return 0, io.EOF
} }
@ -55,7 +55,7 @@ func (f *file) readAt(fileMeta *FileMeta, offset int64, p []byte) (n int, err er
if needRead <= 0 { if needRead <= 0 {
return 0, io.EOF return 0, io.EOF
} }
var fileData FileData var fileData dbfsData
ok, err := db.GetEngine(f.ctx).Where("meta_id = ? AND blob_offset = ?", f.metaID, blobOffset).Get(&fileData) ok, err := db.GetEngine(f.ctx).Where("meta_id = ? AND blob_offset = ?", f.metaID, blobOffset).Get(&fileData)
if err != nil { if err != nil {
return 0, err return 0, err
@ -128,12 +128,12 @@ func (f *file) Write(p []byte) (n int, err error) {
buf = buf[:readBytes] buf = buf[:readBytes]
} }
fileData := FileData{ fileData := dbfsData{
MetaID: fileMeta.ID, MetaID: fileMeta.ID,
BlobOffset: blobOffset, BlobOffset: blobOffset,
BlobData: buf, BlobData: buf,
} }
if res, err := db.GetEngine(f.ctx).Exec("UPDATE file_data SET revision=revision+1, blob_data=? WHERE meta_id=? AND blob_offset=?", buf, fileMeta.ID, blobOffset); err != nil { if res, err := db.GetEngine(f.ctx).Exec("UPDATE dbfs_data SET revision=revision+1, blob_data=? WHERE meta_id=? AND blob_offset=?", buf, fileMeta.ID, blobOffset); err != nil {
return written, err return written, err
} else if updated, err := res.RowsAffected(); err != nil { } else if updated, err := res.RowsAffected(); err != nil {
return written, err return written, err
@ -151,7 +151,7 @@ func (f *file) Write(p []byte) (n int, err error) {
p = p[needWrite:] p = p[needWrite:]
} }
fileMetaUpdate := FileMeta{ fileMetaUpdate := dbfsMeta{
ModifyTimestamp: timeToFileTimestamp(time.Now()), ModifyTimestamp: timeToFileTimestamp(time.Now()),
} }
if needUpdateSize { if needUpdateSize {
@ -198,8 +198,8 @@ func timeToFileTimestamp(t time.Time) int64 {
return t.UnixMicro() return t.UnixMicro()
} }
func (f *file) loadMetaByPath() (*FileMeta, error) { func (f *file) loadMetaByPath() (*dbfsMeta, error) {
var fileMeta FileMeta var fileMeta dbfsMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil { if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
return nil, err return nil, err
} else if ok { } else if ok {
@ -262,7 +262,7 @@ func (f *file) createEmpty() error {
return os.ErrExist return os.ErrExist
} }
now := time.Now() now := time.Now()
_, err := db.GetEngine(f.ctx).Insert(&FileMeta{ _, err := db.GetEngine(f.ctx).Insert(&dbfsMeta{
FullPath: f.fullPath, FullPath: f.fullPath,
BlockSize: f.blockSize, BlockSize: f.blockSize,
CreateTimestamp: timeToFileTimestamp(now), CreateTimestamp: timeToFileTimestamp(now),
@ -282,10 +282,10 @@ func (f *file) truncate() error {
return os.ErrNotExist return os.ErrNotExist
} }
return db.WithTx(f.ctx, func(ctx context.Context) error { return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Exec("UPDATE file_meta SET file_size = 0 WHERE id = ?", f.metaID); err != nil { if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET file_size = 0 WHERE id = ?", f.metaID); err != nil {
return err return err
} }
if _, err := db.GetEngine(ctx).Delete(&FileData{MetaID: f.metaID}); err != nil { if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
return err return err
} }
return nil return nil
@ -298,7 +298,7 @@ func (f *file) renameTo(newPath string) error {
} }
newPath = buildPath(newPath) newPath = buildPath(newPath)
return db.WithTx(f.ctx, func(ctx context.Context) error { return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Exec("UPDATE file_meta SET full_path = ? WHERE id = ?", newPath, f.metaID); err != nil { if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET full_path = ? WHERE id = ?", newPath, f.metaID); err != nil {
return err return err
} }
return nil return nil
@ -310,10 +310,10 @@ func (f *file) delete() error {
return os.ErrNotExist return os.ErrNotExist
} }
return db.WithTx(f.ctx, func(ctx context.Context) error { return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Delete(&FileMeta{ID: f.metaID}); err != nil { if _, err := db.GetEngine(ctx).Delete(&dbfsMeta{ID: f.metaID}); err != nil {
return err return err
} }
if _, err := db.GetEngine(ctx).Delete(&FileData{MetaID: f.metaID}); err != nil { if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
return err return err
} }
return nil return nil
@ -331,8 +331,8 @@ func (f *file) size() (int64, error) {
return fileMeta.FileSize, nil return fileMeta.FileSize, nil
} }
func findFileMetaByID(ctx context.Context, metaID int64) (*FileMeta, error) { func findFileMetaByID(ctx context.Context, metaID int64) (*dbfsMeta, error) {
var fileMeta FileMeta var fileMeta dbfsMeta
if ok, err := db.GetEngine(ctx).Where("id = ?", metaID).Get(&fileMeta); err != nil { if ok, err := db.GetEngine(ctx).Where("id = ?", metaID).Get(&fileMeta); err != nil {
return nil, err return nil, err
} else if ok { } else if ok {

View File

@ -11,7 +11,7 @@ import (
"code.gitea.io/gitea/models/db" "code.gitea.io/gitea/models/db"
) )
type FileMeta struct { type dbfsMeta struct {
ID int64 `xorm:"pk autoincr"` ID int64 `xorm:"pk autoincr"`
FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"` FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
BlockSize int64 `xorm:"BIGINT NOT NULL"` BlockSize int64 `xorm:"BIGINT NOT NULL"`
@ -20,7 +20,7 @@ type FileMeta struct {
ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"` ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
} }
type FileData struct { type dbfsData struct {
ID int64 `xorm:"pk autoincr"` ID int64 `xorm:"pk autoincr"`
Revision int64 `xorm:"BIGINT NOT NULL"` Revision int64 `xorm:"BIGINT NOT NULL"`
MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"` MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
@ -30,8 +30,8 @@ type FileData struct {
} }
func init() { func init() {
db.RegisterModel(new(FileMeta)) db.RegisterModel(new(dbfsMeta))
db.RegisterModel(new(FileData)) db.RegisterModel(new(dbfsData))
} }
func OpenFile(ctx context.Context, name string, flag int) (File, error) { func OpenFile(ctx context.Context, name string, flag int) (File, error) {

View File

@ -144,6 +144,24 @@ func addBotTables(x *xorm.Engine) error {
Updated timeutil.TimeStamp `xorm:"updated"` Updated timeutil.TimeStamp `xorm:"updated"`
} }
type dbfsMeta struct {
ID int64 `xorm:"pk autoincr"`
FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
BlockSize int64 `xorm:"BIGINT NOT NULL"`
FileSize int64 `xorm:"BIGINT NOT NULL"`
CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
}
type dbfsData struct {
ID int64 `xorm:"pk autoincr"`
Revision int64 `xorm:"BIGINT NOT NULL"`
MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobSize int64 `xorm:"BIGINT NOT NULL"`
BlobData []byte `xorm:"BLOB NOT NULL"`
}
return x.Sync( return x.Sync(
new(BotRunner), new(BotRunner),
new(BotRunnerToken), new(BotRunnerToken),
@ -153,5 +171,7 @@ func addBotTables(x *xorm.Engine) error {
new(BotRunIndex), new(BotRunIndex),
new(BotTask), new(BotTask),
new(BotTaskStep), new(BotTaskStep),
new(dbfsMeta),
new(dbfsData),
) )
} }