libf 3 месяцев назад
Родитель
Сommit
f9fa73c3d2
7 измененных файлов с 303 добавлено и 136 удалено
  1. 1 1
      go.mod
  2. 2 2
      go.sum
  3. 96 0
      graph/edgeinfo.go
  4. 31 0
      graph/nodeid.go
  5. 130 45
      importer/importer.go
  6. 42 87
      importer/odbcimporter.go
  7. 1 1
      main.go

+ 1 - 1
go.mod

@@ -4,7 +4,7 @@ go 1.20
 
 require (
 	git.wecise.com/wecise/odb-go v0.0.0-20250206115649-e441a00d8837
-	git.wecise.com/wecise/util v0.0.0-20250206115410-1d1fcd308b9c
+	git.wecise.com/wecise/util v0.0.0-20250207094530-58e32065187c
 	github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e
 	github.com/spf13/cast v1.7.0
 )

+ 2 - 2
go.sum

@@ -1,7 +1,7 @@
 git.wecise.com/wecise/odb-go v0.0.0-20250206115649-e441a00d8837 h1:drVImCwSwQkFL6fUIu30VzosQIFn4S0oMnDL0KRNi90=
 git.wecise.com/wecise/odb-go v0.0.0-20250206115649-e441a00d8837/go.mod h1:V8ZATNv1dapTbBKakWi2uhGTMvtShpIK1V8ks3mQGS0=
-git.wecise.com/wecise/util v0.0.0-20250206115410-1d1fcd308b9c h1:YYv9HZbrJTlIhxricwmwhSdH+v3iikv0k0ihxLZEZII=
-git.wecise.com/wecise/util v0.0.0-20250206115410-1d1fcd308b9c/go.mod h1:2YXWE9m5mNgAu40zpYrL3woGz6S8CoHAW/CJeWXaIko=
+git.wecise.com/wecise/util v0.0.0-20250207094530-58e32065187c h1:/vu0uQEnqKITft0ULqqgmZCRN42maUuzn3Y2mUH72hk=
+git.wecise.com/wecise/util v0.0.0-20250207094530-58e32065187c/go.mod h1:2YXWE9m5mNgAu40zpYrL3woGz6S8CoHAW/CJeWXaIko=
 github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
 github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
 github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao=

+ 96 - 0
graph/edgeinfo.go

@@ -0,0 +1,96 @@
+package graph
+
+import (
+	"encoding/json"
+	"sync"
+
+	"git.wecise.com/wecise/cgimport/schema"
+	"git.wecise.com/wecise/util/deepcopy"
+	"git.wecise.com/wecise/util/merrs"
+	"github.com/spf13/cast"
+)
+
+type EdgeInfo struct {
+	FromOid    string
+	EdgeType   string
+	ToOid      string
+	Attributes map[string]string
+}
+
+var edgeinfosmutex sync.RWMutex
+var edgeinfos = map[string]map[string]map[string][]string{} // fromid - edgetype - toid - relationinfo
+
+func CacheEdgeInfo(data map[string]any) (err error) {
+	ei, e := ParseEdgeInfo(data)
+	if e != nil {
+		return e
+	}
+	edgeinfosmutex.Lock()
+	defer edgeinfosmutex.Unlock()
+	eitypes := edgeinfos[ei.FromOid]
+	if eitypes == nil {
+		eitypes = map[string]map[string][]string{}
+	} else {
+		eitypes = deepcopy.DeepCopy(eitypes).(map[string]map[string][]string)
+	}
+	edgeinfos[ei.FromOid] = eitypes
+	eis := eitypes[ei.EdgeType]
+	if eis == nil {
+		eis = map[string][]string{}
+		eitypes[ei.EdgeType] = eis
+	}
+	eis["_all"] = append(eis["_all"], ei.ToOid)
+	eabs, _ := json.Marshal(ei.Attributes)
+	eis[ei.ToOid] = append(eis[ei.ToOid], string(eabs))
+	return
+}
+
+func GetEdgeInfo(fromoid string) map[string]map[string][]string {
+	edgeinfosmutex.RLock()
+	defer edgeinfosmutex.RUnlock()
+	return edgeinfos[fromoid]
+}
+
+func ParseEdgeInfo(data map[string]any) (ei *EdgeInfo, err error) {
+	extraattr := map[string]string{}
+	fromuid := ""
+	touid := ""
+	edgetype := ""
+	for k, v := range data {
+		switch k {
+		case "FROMUNIQUEID":
+			fromuid = cast.ToString(v)
+		case "TOUNIQUEID":
+			touid = cast.ToString(v)
+		case "EDGETYPE":
+			edgetype = cast.ToString(v)
+		default:
+			extraattr[k] = cast.ToString(v)
+		}
+	}
+	if fromuid == "" {
+		databs, _ := json.MarshalIndent(data, "", "  ")
+		return nil, merrs.NewError("not found valid fromuniqueid in data ", merrs.SSMap{"data": string(databs)})
+	}
+	if touid == "" {
+		databs, _ := json.MarshalIndent(data, "", "  ")
+		return nil, merrs.NewError("not found valid touniqueid in data ", merrs.SSMap{"data": string(databs)})
+	}
+	if edgetype == "" {
+		databs, _ := json.MarshalIndent(data, "", "  ")
+		return nil, merrs.NewError("not found valid edgetype in data ", merrs.SSMap{"data": string(databs)})
+	}
+	edgetype = schema.Relations[edgetype]
+	if edgetype == "" {
+		databs, _ := json.MarshalIndent(data, "", "  ")
+		return nil, merrs.NewError("not found valid edgetype in data ", merrs.SSMap{"data": string(databs)})
+	}
+	foid := ToNodeId("level1", fromuid)
+	toid := ToNodeId("level1", touid)
+	return &EdgeInfo{
+		FromOid:    foid,
+		ToOid:      toid,
+		EdgeType:   edgetype,
+		Attributes: extraattr,
+	}, nil
+}

+ 31 - 0
graph/nodeid.go

@@ -0,0 +1,31 @@
+package graph
+
+import (
+	"encoding/base64"
+	"encoding/json"
+
+	"git.wecise.com/wecise/util/merrs"
+	"github.com/spf13/cast"
+)
+
+func GetNodeId(classaliasname string, data map[string]any) (oid, suid string, err error) {
+	uid := data["uniqueId"]
+	if uid == nil {
+		uid = data["UNIQUEID"]
+		if uid == nil {
+			databs, _ := json.MarshalIndent(data, "", "  ")
+			return "", "", merrs.NewError("not found uniqueid in data ", merrs.SSMap{"data": string(databs)})
+		}
+	}
+	suid = cast.ToString(uid)
+	if suid == "" {
+		databs, _ := json.MarshalIndent(data, "", "  ")
+		return "", "", merrs.NewError("not found valid uniqueid in data ", merrs.SSMap{"data": string(databs)})
+	}
+	return ToNodeId(classaliasname, suid), suid, nil
+}
+
+func ToNodeId(classaliasname string, suid string) string {
+	suid64 := base64.RawURLEncoding.EncodeToString([]byte(suid))
+	return classaliasname + ":" + suid64
+}

+ 130 - 45
importer/importer.go

@@ -6,11 +6,13 @@ import (
 	"io"
 	"os"
 	"path/filepath"
+	"regexp"
 	"strings"
 	"sync"
 	"sync/atomic"
 	"time"
 
+	"git.wecise.com/wecise/cgimport/graph"
 	"git.wecise.com/wecise/cgimport/odbc"
 	"git.wecise.com/wecise/cgimport/reader"
 	"git.wecise.com/wecise/cgimport/schema"
@@ -23,11 +25,15 @@ var mcfg = odbc.Config
 var logger = odbc.Logger
 
 type Importer struct {
-	datapath     string
-	parallel     int
-	reload       bool
-	importrc     *rc.RoutinesController
-	odbcimporter *ODBCImporter
+	datapath         string
+	parallel         int
+	reload           bool
+	importstatus     *CGIStatus
+	fileimportrc     *rc.RoutinesController
+	odbcqueryrc      *rc.RoutinesController
+	odbcimporter     *ODBCImporter
+	starttime        time.Time
+	currentstarttime time.Time
 }
 
 func ImportDir(datapath string, parallel int, reload bool) (totalfilescount, totalrecordscount int64, totalusetime time.Duration, filescount, recordscount int64, usetime time.Duration, err error) {
@@ -35,23 +41,17 @@ func ImportDir(datapath string, parallel int, reload bool) (totalfilescount, tot
 		datapath:     datapath,
 		parallel:     parallel,
 		reload:       reload,
-		importrc:     rc.NewRoutinesController("", 100),
+		importstatus: NewCGIStatus(),
+		fileimportrc: rc.NewRoutinesController("", parallel),
+		odbcqueryrc:  rc.NewRoutinesController("", parallel*10),
 		odbcimporter: NewODBCImporter(),
 	}
 	return importer.Import()
 }
 
 func (importer *Importer) Import() (totalfilescount, totalrecordscount int64, totalusetime time.Duration, filescount, recordscount int64, usetime time.Duration, err error) {
-	var cgirc = rc.NewRoutinesController("", importer.parallel)
-	var wg sync.WaitGroup
-	fw, e := filewalker.NewFileWalker([]string{importer.datapath}, ".*")
-	if e != nil {
-		err = e
-		return
-	}
-	cgistatus := NewCGIStatus()
 	if odbc.DevPhase&odbc.DP_PROCESSCONTINUE != 0 && !importer.reload {
-		err = cgistatus.Load()
+		err = importer.importstatus.Load()
 		if err != nil {
 			return
 		}
@@ -68,45 +68,126 @@ func (importer *Importer) Import() (totalfilescount, totalrecordscount int64, to
 	if err != nil {
 		return
 	}
-	totalfilescount = int64(len(cgistatus.ImportStatus))
-	for _, v := range cgistatus.ImportStatus {
+	totalfilescount = int64(len(importer.importstatus.ImportStatus))
+	for _, v := range importer.importstatus.ImportStatus {
 		totalrecordscount += v.RecordsCount
 	}
-	totalusetime = cgistatus.TotalUseTime
-	st := time.Now().Add(-totalusetime)
-	cst := time.Now()
+	totalusetime = importer.importstatus.TotalUseTime
+	importer.starttime = time.Now().Add(-totalusetime)
+	importer.currentstarttime = time.Now()
+
+	reedgefile := regexp.MustCompile("(?i).*edge.*.csv")
+	fc, rc, ut, e := importer.ImportEdgeFiles(reedgefile)
+	if e != nil {
+		err = e
+		return
+	}
+	totalfilescount += fc
+	totalrecordscount += rc
+	filescount += fc
+	recordscount += rc
+	usetime += ut
+	totalusetime = importer.importstatus.TotalUseTime
+	fc, rc, ut, e = importer.ImportNonEdgeFiles(reedgefile)
+	if e != nil {
+		err = e
+		return
+	}
+	totalfilescount += fc
+	totalrecordscount += rc
+	filescount += fc
+	recordscount += rc
+	usetime += ut
+	totalusetime = importer.importstatus.TotalUseTime
+
+	importer.importstatus.WaitSaveDone()
+	importer.alldone()
+	return
+}
+
+func (importer *Importer) ImportEdgeFiles(reedgefile *regexp.Regexp) (filescount, recordscount int64, usetime time.Duration, err error) {
+	return importer.ImportFiles(func(basedir string, fpath string) FWOP {
+		if !reedgefile.MatchString(filepath.Base(fpath)) {
+			// 忽略非EDGE文件
+			return FWOP_IGNORE
+		}
+		return FWOP_CONTINUE
+	})
+}
+
+func (importer *Importer) ImportNonEdgeFiles(reedgefile *regexp.Regexp) (filescount, recordscount int64, usetime time.Duration, err error) {
+	return importer.ImportFiles(func(basedir string, fpath string) FWOP {
+		if reedgefile.MatchString(filepath.Base(fpath)) {
+			// 忽略EDGE文件
+			return FWOP_IGNORE
+		}
+		return FWOP_CONTINUE
+	})
+}
+
+type FWOP int
+
+const (
+	FWOP_IGNORE FWOP = iota + 1
+	FWOP_BREAK
+	FWOP_CONTINUE
+)
+
+func (importer *Importer) ImportFiles(fwop func(basedir string, fpath string) FWOP) (filescount, recordscount int64, usetime time.Duration, err error) {
 	// 遍历文件目录
+	var wg sync.WaitGroup
+	fw, e := filewalker.NewFileWalker([]string{importer.datapath}, ".*")
+	if e != nil {
+		err = e
+		return
+	}
 	e = fw.List(func(basedir string, fpath string) bool {
 		if err != nil {
+			// 前方发生错误,结束遍历
+			return false
+		}
+		if strings.Contains(fpath, string(filepath.Separator)) {
+			// 忽略子目录,fw.List有序,目录排在文件后面,遇到子目录即可结束遍历
 			return false
 		}
+		switch fwop(basedir, fpath) {
+		case FWOP_IGNORE:
+			// 忽略当前文件,继续处理下一文件
+			return true
+		case FWOP_BREAK:
+			// 结束遍历
+			return false
+		case FWOP_CONTINUE:
+		default:
+		}
+		// 继续处理当前文件
 		filename := filepath.Join(basedir, fpath)
 		wg.Add(1)
-		cgirc.ConcurCall(1,
+		// 并发处理
+		importer.fileimportrc.ConcurCall(1,
 			func() {
 				defer wg.Done()
-				cgistatus.mutex.RLock()
-				importstatus := cgistatus.ImportStatus[filename]
-				cgistatus.mutex.RUnlock()
+				importer.importstatus.mutex.RLock()
+				importstatus := importer.importstatus.ImportStatus[filename]
+				importer.importstatus.mutex.RUnlock()
+				importedrecordscount := int64(0)
 				if importstatus != nil {
+					importedrecordscount = importstatus.RecordsCount
 					return
 				}
-				records, e := importer.ImportFile(filename)
+				records, e := importer.ImportFile(filename, importedrecordscount)
 				if e != nil {
 					err = e
 					return
 				}
 				atomic.AddInt64(&filescount, 1)
 				atomic.AddInt64(&recordscount, records)
-				atomic.AddInt64(&totalfilescount, 1)
-				atomic.AddInt64(&totalrecordscount, records)
-				usetime = time.Since(cst)
-				totalusetime = time.Since(st)
-				cgistatus.mutex.Lock()
-				cgistatus.ImportStatus[filename] = &ImportStatus{RecordsCount: records}
-				cgistatus.TotalUseTime = totalusetime
-				cgistatus.mutex.Unlock()
-				cgistatus.Save()
+				usetime = time.Since(importer.currentstarttime)
+				importer.importstatus.mutex.Lock()
+				importer.importstatus.ImportStatus[filename] = &ImportStatus{RecordsCount: importedrecordscount + records}
+				importer.importstatus.TotalUseTime = time.Since(importer.starttime)
+				importer.importstatus.mutex.Unlock()
+				importer.importstatus.Save()
 			},
 		)
 		return true
@@ -120,21 +201,19 @@ func (importer *Importer) Import() (totalfilescount, totalrecordscount int64, to
 		}
 		return
 	}
-	cgistatus.WaitSaveDone()
-	importer.alldone()
 	return
 }
 
-func (importer *Importer) ImportFile(filepath string) (blockcount int64, err error) {
+func (importer *Importer) ImportFile(filepath string, skiprecordscount int64) (blockcount int64, err error) {
 	f, e := os.Open(filepath)
 	if e != nil {
 		return blockcount, merrs.NewError(e, merrs.SSMaps{{"filename": filepath}})
 	}
 	defer f.Close()
-	return importer.importReader(filepath, f)
+	return importer.importReader(filepath, f, skiprecordscount)
 }
 
-func (importer *Importer) importReader(filename string, buf io.Reader) (blockcount int64, err error) {
+func (importer *Importer) importReader(filename string, buf io.Reader, skiprecordscount int64) (blockcount int64, err error) {
 	var filetype schema.FileType
 	switch {
 	case strings.Contains(filename, "_L1_"):
@@ -168,6 +247,7 @@ func (importer *Importer) importReader(filename string, buf io.Reader) (blockcou
 	var wg sync.WaitGroup
 	defer importer.done()
 	defer wg.Wait()
+	n := int64(0)
 	for {
 		if err != nil {
 			break
@@ -179,8 +259,12 @@ func (importer *Importer) importReader(filename string, buf io.Reader) (blockcou
 		if block == nil {
 			return
 		}
+		n++
+		if n <= skiprecordscount {
+			continue
+		}
 		wg.Add(1)
-		e = importer.importrc.ConcurCall(1, func() {
+		e = importer.odbcqueryrc.ConcurCall(1, func() {
 			defer wg.Done()
 			e = importer.importRecord(block, line, filename, filetype, linecount)
 			if e != nil {
@@ -207,11 +291,12 @@ func (importer *Importer) importRecord(record map[string]any, line string, filen
 	var classname string
 	switch filetype {
 	case schema.FT_EDGE:
-		err = importer.odbcimporter.InsertEdge(record)
-		if err != nil {
-			err = merrs.NewError(err, merrs.SSMaps{{"filename": filename}, {"linecount": fmt.Sprint(linecount)}, {"line": line}})
-			return
-		}
+		// err = importer.odbcimporter.InsertEdge(record)
+		// if err != nil {
+		// 	err = merrs.NewError(err, merrs.SSMaps{{"filename": filename}, {"linecount": fmt.Sprint(linecount)}, {"line": line}})
+		// 	return
+		// }
+		graph.CacheEdgeInfo(record)
 	default:
 		classname = string(filetype)
 		err = importer.odbcimporter.InsertData(classname, record)

+ 42 - 87
importer/odbcimporter.go

@@ -1,7 +1,6 @@
 package importer
 
 import (
-	"encoding/base64"
 	"encoding/json"
 	"fmt"
 	"regexp"
@@ -10,6 +9,7 @@ import (
 	"sync/atomic"
 	"time"
 
+	"git.wecise.com/wecise/cgimport/graph"
 	"git.wecise.com/wecise/cgimport/odbc"
 	"git.wecise.com/wecise/cgimport/schema"
 	"git.wecise.com/wecise/odb-go/odb"
@@ -81,68 +81,35 @@ func (odbci *ODBCImporter) ReviseClassStruct() (err error) {
 	return
 }
 
-func (odbci *ODBCImporter) InsertEdge(data map[string]any) (err error) {
-	extraattr := map[string]string{}
-	fromuid := ""
-	touid := ""
-	edgetype := ""
-	for k, v := range data {
-		switch k {
-		case "FROMUNIQUEID":
-			fromuid = cast.ToString(v)
-		case "TOUNIQUEID":
-			touid = cast.ToString(v)
-		case "EDGETYPE":
-			edgetype = cast.ToString(v)
-		default:
-			extraattr[k] = cast.ToString(v)
-		}
-	}
-	if fromuid == "" {
-		databs, _ := json.MarshalIndent(data, "", "  ")
-		return merrs.NewError("not found valid fromuniqueid in data ", merrs.SSMap{"data": string(databs)})
-	}
-	if touid == "" {
-		databs, _ := json.MarshalIndent(data, "", "  ")
-		return merrs.NewError("not found valid touniqueid in data ", merrs.SSMap{"data": string(databs)})
-	}
-	if edgetype == "" {
-		databs, _ := json.MarshalIndent(data, "", "  ")
-		return merrs.NewError("not found valid edgetype in data ", merrs.SSMap{"data": string(databs)})
-	}
-	return odbci.insertEdge(edgetype, fromuid, touid, extraattr, data)
-}
-
-func (odbci *ODBCImporter) insertEdge(edgetype, fromuid, touid string, extraattr map[string]string, data map[string]any) (err error) {
-	edgetype = schema.Relations[edgetype]
-	if edgetype == "" {
-		databs, _ := json.MarshalIndent(data, "", "  ")
-		return merrs.NewError("not found valid edgetype in data ", merrs.SSMap{"data": string(databs)})
-	}
-	if odbci.client != nil {
-		foid := get_object_id_from_cache("level1", fromuid)
-		toid := to_object_id("level1", touid)
-		eabs, _ := json.Marshal(extraattr)
-		// quadmql := `quad "` + foid + `" ` + edgetype + ` + "` + toid + `" ` + string(eabs)
-		// _, err = odbci.client.Query(quadmql).Do()
-		// if err != nil {
-		// 	err = merrs.NewError(err, merrs.SSMaps{{"mql": quadmql}})
-		// 	logger.Error(err)
-		// 	return
-		// }
-		updatemql := "update " + "level1" + " set " + " contain=?" + " where id='" + foid + "'"
-		_, err = odbci.client.Query(updatemql, map[string][]string{
-			"_all": {toid},
-			toid:   {string(eabs)},
-		}).Do()
-		if err != nil {
-			err = merrs.NewError(err, merrs.SSMaps{{"mql": updatemql}})
-			return
-		}
-		logger.Info("relation immport " + foid + "->" + toid)
-	}
-	return
-}
+// func (odbci *ODBCImporter) InsertEdge(data map[string]any) (err error) {
+// 	ei, e := graph.ParseEdgeInfo(data)
+// 	if e != nil {
+// 		return e
+// 	}
+// 	if odbci.client != nil {
+// 		// foid := get_object_id_from_cache("level1", fromuid)
+// 		// toid := to_object_id("level1", touid)
+// 		// eabs, _ := json.Marshal(extraattr)
+// 		// quadmql := `quad "` + foid + `" ` + edgetype + ` + "` + toid + `" ` + string(eabs)
+// 		// _, err = odbci.client.Query(quadmql).Do()
+// 		// if err != nil {
+// 		// 	err = merrs.NewError(err, merrs.SSMaps{{"mql": quadmql}})
+// 		// 	logger.Error(err)
+// 		// 	return
+// 		// }
+// 		updatemql := "update " + "level1" + " set " + " contain=contain+?" + " where id='" + foid + "'"
+// 		_, err = odbci.client.Query(updatemql, map[string][]string{
+// 			"_all": {toid},
+// 			toid:   {string(eabs)},
+// 		}).Do()
+// 		if err != nil {
+// 			err = merrs.NewError(err, merrs.SSMaps{{"mql": updatemql}})
+// 			return
+// 		}
+// 		logger.Info("relation immport " + foid + "->" + toid)
+// 	}
+// 	return
+// }
 
 var cm_object_id_cache = cmap.New[string, chan string]()
 
@@ -172,27 +139,6 @@ func push_object_id_into_cache(choid chan string, oid string) {
 	}
 }
 
-func object_id(classaliasname string, data map[string]any) (oid, suid string, err error) {
-	uid := data["uniqueId"]
-	if uid == nil {
-		uid = data["UNIQUEID"]
-		if uid == nil {
-			databs, _ := json.MarshalIndent(data, "", "  ")
-			return "", "", merrs.NewError("not found uniqueid in data ", merrs.SSMap{"data": string(databs)})
-		}
-	}
-	suid = cast.ToString(uid)
-	if suid == "" {
-		databs, _ := json.MarshalIndent(data, "", "  ")
-		return "", "", merrs.NewError("not found valid uniqueid in data ", merrs.SSMap{"data": string(databs)})
-	}
-	return to_object_id(classaliasname, suid), suid, nil
-}
-func to_object_id(classaliasname string, suid string) string {
-	suid64 := base64.RawURLEncoding.EncodeToString([]byte(suid))
-	return classaliasname + ":" + suid64
-}
-
 // 插入数据
 func (odbci *ODBCImporter) InsertData(classname string, data map[string]any) (err error) {
 	cdi := classdatainfos.GetIFPresent(classname)
@@ -203,11 +149,20 @@ func (odbci *ODBCImporter) InsertData(classname string, data map[string]any) (er
 		return merrs.NewError("class no fields to insert " + classname)
 	}
 	innerdata := &InnerData{}
-	innerdata.oid, innerdata.suid, err = object_id(cdi.Classaliasname, data)
+	innerdata.oid, innerdata.suid, err = graph.GetNodeId(cdi.Classaliasname, data)
 	if err != nil {
 		return
 	}
-	innerdata.depend = referencedata(classname, data)
+	if cdi.Classaliasname == "level1" {
+		ei := graph.GetEdgeInfo(innerdata.oid)
+		if ei != nil {
+			innerdata.contain = ei["contain"]
+			innerdata.depend = ei["depend"]
+			innerdata.topology = ei["topology"]
+		}
+	} else {
+		innerdata.depend = referencedata(classname, data)
+	}
 	return odbci.insertData(classname, cdi, innerdata, data)
 }
 
@@ -250,7 +205,7 @@ func referencedata(classname string, data map[string]any) (depend map[string][]s
 						case "level8":
 							toclassname = "level7"
 						}
-						toid := to_object_id(toclassname, suid)
+						toid := graph.ToNodeId(toclassname, suid)
 						m := map[string]string{"_direction": "out"}
 						mbs, _ := json.Marshal(m)
 						depend = map[string][]string{

+ 1 - 1
main.go

@@ -33,7 +33,7 @@ func main() {
 	// 文件目录
 	datapath := mcfg.GetString("datapath", mcfg.GetString("cgi.datapath", "data"))
 	// 并发数
-	parallel := mcfg.GetInt("parallel", mcfg.GetInt("cgi.parallel", 10))
+	parallel := mcfg.GetInt("parallel", mcfg.GetInt("cgi.parallel", 20))
 	//
 	reload := mcfg.GetBool("reload") || mcfg.GetString("reload") == "reload"
 	//