mirror of
				https://gitee.com/gitea/gitea
				synced 2025-11-04 16:40:24 +08:00 
			
		
		
		
	Fix merging artifact chunks error when minio storage basepath is set (#28555)
Related to https://github.com/go-gitea/gitea/issues/28279 When merging artifact chunks, it lists chunks from storage. When storage is minio, chunk's path contains `MINIO_BASE_PATH` that makes merging break. <del>So trim the `MINIO_BASE_PATH` when handle chunks.</del> Update the chunk file's basename to retain necessary information. It ensures that the directory in the chunk's path remains unaffected.
This commit is contained in:
		@@ -26,10 +26,11 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
 | 
			
		||||
	contentRange := ctx.Req.Header.Get("Content-Range")
 | 
			
		||||
	start, end, length := int64(0), int64(0), int64(0)
 | 
			
		||||
	if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
 | 
			
		||||
		log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
 | 
			
		||||
		return -1, fmt.Errorf("parse content range error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	// build chunk store path
 | 
			
		||||
	storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end)
 | 
			
		||||
	storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
 | 
			
		||||
	// use io.TeeReader to avoid reading all body to md5 sum.
 | 
			
		||||
	// it writes data to hasher after reading end
 | 
			
		||||
	// if hash is not matched, delete the read-end result
 | 
			
		||||
@@ -58,6 +59,7 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type chunkFileItem struct {
 | 
			
		||||
	RunID      int64
 | 
			
		||||
	ArtifactID int64
 | 
			
		||||
	Start      int64
 | 
			
		||||
	End        int64
 | 
			
		||||
@@ -67,9 +69,12 @@ type chunkFileItem struct {
 | 
			
		||||
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
 | 
			
		||||
	storageDir := fmt.Sprintf("tmp%d", runID)
 | 
			
		||||
	var chunks []*chunkFileItem
 | 
			
		||||
	if err := st.IterateObjects(storageDir, func(path string, obj storage.Object) error {
 | 
			
		||||
		item := chunkFileItem{Path: path}
 | 
			
		||||
		if _, err := fmt.Sscanf(path, filepath.Join(storageDir, "%d-%d-%d.chunk"), &item.ArtifactID, &item.Start, &item.End); err != nil {
 | 
			
		||||
	if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
 | 
			
		||||
		baseName := filepath.Base(fpath)
 | 
			
		||||
		// when read chunks from storage, it only contains storage dir and basename,
 | 
			
		||||
		// no matter the subdirectory setting in storage config
 | 
			
		||||
		item := chunkFileItem{Path: storageDir + "/" + baseName}
 | 
			
		||||
		if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
 | 
			
		||||
			return fmt.Errorf("parse content range error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		chunks = append(chunks, &item)
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user