2020-02-06 17:16:34 +02:00
package cmd
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
2020-08-07 13:03:38 +02:00
"path"
2020-02-06 17:16:34 +02:00
"path/filepath"
"regexp"
2021-06-23 15:05:00 +02:00
"strconv"
2020-02-06 17:16:34 +02:00
"strings"
"time"
2020-10-05 17:46:44 +02:00
"github.com/pkg/errors"
2020-09-02 15:00:55 +02:00
"github.com/GoogleContainerTools/container-diff/pkg/util"
2020-02-06 17:16:34 +02:00
"github.com/SAP/jenkins-library/pkg/command"
piperDocker "github.com/SAP/jenkins-library/pkg/docker"
"github.com/SAP/jenkins-library/pkg/log"
2020-08-07 13:03:38 +02:00
StepResults "github.com/SAP/jenkins-library/pkg/piperutils"
2020-02-06 17:16:34 +02:00
"github.com/SAP/jenkins-library/pkg/protecode"
"github.com/SAP/jenkins-library/pkg/telemetry"
2021-06-23 15:05:00 +02:00
"github.com/SAP/jenkins-library/pkg/toolrecord"
2020-02-06 17:16:34 +02:00
)
2020-09-02 10:41:12 +02:00
const (
2021-09-01 13:21:48 +02:00
webReportPath = "%s/#/product/%v/"
2020-09-02 10:41:12 +02:00
scanResultFile = "protecodescan_vulns.json"
stepResultFile = "protecodeExecuteScan.json"
)
2020-02-06 17:16:34 +02:00
var reportPath = "./"
var cachePath = "./cache"
var cacheProtecodeImagePath = "/protecode/Image"
var cacheProtecodePath = "/protecode"
2020-09-02 10:41:12 +02:00
func protecodeExecuteScan ( config protecodeExecuteScanOptions , telemetryData * telemetry . CustomData , influx * protecodeExecuteScanInflux ) {
2020-02-06 17:16:34 +02:00
c := command . Command { }
// reroute command output to loging framework
2020-05-06 13:35:40 +02:00
c . Stdout ( log . Writer ( ) )
c . Stderr ( log . Writer ( ) )
2020-02-06 17:16:34 +02:00
dClient := createDockerClient ( & config )
2021-03-18 11:32:03 +02:00
influx . step_data . fields . protecode = false
2020-09-02 10:41:12 +02:00
if err := runProtecodeScan ( & config , influx , dClient ) ; err != nil {
2020-10-05 17:46:44 +02:00
log . Entry ( ) . WithError ( err ) . Fatal ( "Failed to execute protecode scan." )
2020-09-02 10:41:12 +02:00
}
2021-03-18 11:32:03 +02:00
influx . step_data . fields . protecode = true
2020-02-06 17:16:34 +02:00
}
func runProtecodeScan ( config * protecodeExecuteScanOptions , influx * protecodeExecuteScanInflux , dClient piperDocker . Download ) error {
2020-08-12 14:57:11 +02:00
correctDockerConfigEnvVar ( config )
2020-02-06 17:16:34 +02:00
var fileName , filePath string
2020-10-05 17:46:44 +02:00
var err error
2020-02-06 17:16:34 +02:00
//create client for sending api request
log . Entry ( ) . Debug ( "Create protecode client" )
client := createClient ( config )
2021-06-17 09:40:21 +02:00
if len ( config . FetchURL ) == 0 && len ( config . FilePath ) == 0 {
2020-02-06 17:16:34 +02:00
log . Entry ( ) . Debugf ( "Get docker image: %v, %v, %v, %v" , config . ScanImage , config . DockerRegistryURL , config . FilePath , config . IncludeLayers )
2020-10-05 17:46:44 +02:00
fileName , filePath , err = getDockerImage ( dClient , config )
if err != nil {
return errors . Wrap ( err , "failed to get Docker image" )
}
2020-02-06 17:16:34 +02:00
if len ( config . FilePath ) <= 0 {
( * config ) . FilePath = filePath
log . Entry ( ) . Debugf ( "Filepath for upload image: %v" , config . FilePath )
}
2021-06-17 09:40:21 +02:00
} else if len ( config . FilePath ) > 0 {
parts := strings . Split ( config . FilePath , "/" )
pathFragment := strings . Join ( parts [ : len ( parts ) - 1 ] , "/" )
if len ( pathFragment ) > 0 {
( * config ) . FilePath = pathFragment
} else {
( * config ) . FilePath = "./"
}
fileName = parts [ len ( parts ) - 1 ]
2021-09-13 11:13:48 +02:00
} else if len ( config . FetchURL ) > 0 {
// Get filename from a fetch URL
fileName = filepath . Base ( config . FetchURL )
log . Entry ( ) . Debugf ( "[DEBUG] ===> Filepath from fetch URL: %v" , fileName )
2020-02-06 17:16:34 +02:00
}
log . Entry ( ) . Debug ( "Execute protecode scan" )
2020-09-02 10:41:12 +02:00
if err := executeProtecodeScan ( influx , client , config , fileName , writeReportToFile ) ; err != nil {
return err
}
2020-02-06 17:16:34 +02:00
defer os . Remove ( config . FilePath )
2020-09-02 10:41:12 +02:00
if err := os . RemoveAll ( filepath . Join ( cachePath , cacheProtecodePath ) ) ; err != nil {
2020-02-06 17:16:34 +02:00
log . Entry ( ) . Warnf ( "Error during cleanup folder %v" , err )
}
return nil
}
2020-04-08 12:55:46 +02:00
// TODO: extract to version utils
2020-02-06 17:16:34 +02:00
func handleArtifactVersion ( artifactVersion string ) string {
matches , _ := regexp . MatchString ( "([\\d\\.]){1,}-[\\d]{14}([\\Wa-z\\d]{41})?" , artifactVersion )
if matches {
split := strings . SplitN ( artifactVersion , "." , 2 )
2020-04-08 12:55:46 +02:00
log . Entry ( ) . WithField ( "old" , artifactVersion ) . WithField ( "new" , split [ 0 ] ) . Debug ( "Trimming version to major version digit." )
2020-02-06 17:16:34 +02:00
return split [ 0 ]
}
return artifactVersion
}
2020-10-05 17:46:44 +02:00
func getDockerImage ( dClient piperDocker . Download , config * protecodeExecuteScanOptions ) ( string , string , error ) {
2020-02-06 17:16:34 +02:00
cacheImagePath := filepath . Join ( cachePath , cacheProtecodeImagePath )
deletePath := filepath . Join ( cachePath , cacheProtecodePath )
err := os . RemoveAll ( deletePath )
os . Mkdir ( cacheImagePath , 600 )
imageSource , err := dClient . GetImageSource ( )
if err != nil {
2020-10-05 17:46:44 +02:00
log . SetErrorCategory ( log . ErrorConfiguration )
return "" , "" , errors . Wrap ( err , "failed to get docker image" )
2020-02-06 17:16:34 +02:00
}
image , err := dClient . DownloadImageToPath ( imageSource , cacheImagePath )
if err != nil {
2020-10-05 17:46:44 +02:00
return "" , "" , errors . Wrap ( err , "failed to download docker image" )
2020-02-06 17:16:34 +02:00
}
2020-09-02 15:00:55 +02:00
var fileName string
if util . IsTar ( config . ScanImage ) {
fileName = config . ScanImage
} else {
fileName = getTarName ( config )
tarFilePath := filepath . Join ( cachePath , fileName )
tarFile , err := os . Create ( tarFilePath )
2020-02-06 17:16:34 +02:00
if err != nil {
2020-10-05 17:46:44 +02:00
log . SetErrorCategory ( log . ErrorCustom )
return "" , "" , errors . Wrap ( err , "failed to create tar for the docker image" )
2020-09-02 15:00:55 +02:00
}
defer tarFile . Close ( )
if err := os . Chmod ( tarFilePath , 0644 ) ; err != nil {
2020-10-05 17:46:44 +02:00
log . SetErrorCategory ( log . ErrorCustom )
return "" , "" , errors . Wrap ( err , "failed to set permissions on tar for the docker image" )
2020-09-02 15:00:55 +02:00
}
if err = dClient . TarImage ( tarFile , image ) ; err != nil {
2020-10-05 17:46:44 +02:00
return "" , "" , errors . Wrap ( err , "failed to tar the docker image" )
2020-02-06 17:16:34 +02:00
}
}
resultFilePath := config . FilePath
if len ( config . FilePath ) <= 0 {
resultFilePath = cachePath
}
2020-10-05 17:46:44 +02:00
return fileName , resultFilePath , nil
2020-02-06 17:16:34 +02:00
}
2020-09-02 10:41:12 +02:00
func executeProtecodeScan ( influx * protecodeExecuteScanInflux , client protecode . Protecode , config * protecodeExecuteScanOptions , fileName string , writeReportToFile func ( resp io . ReadCloser , reportFileName string ) error ) error {
2021-09-13 11:13:48 +02:00
log . Entry ( ) . Debugf ( "[DEBUG] ===> Load existing product Group:%v, VerifyOnly:%v, Filename:%v, replaceProductId:%v" , config . Group , config . VerifyOnly , fileName , config . ReplaceProductID )
2021-06-15 22:29:24 +02:00
productID := - 1
2021-09-13 11:13:48 +02:00
// If replaceProductId is not provided then switch to automatic existing product detection
if config . ReplaceProductID > 0 {
log . Entry ( ) . Infof ( "replaceProductID has been provided (%v) and checking ..." , config . ReplaceProductID )
// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID has been provided and required to verify it: %v", config.ReplaceProductID)
// Validate provided product id, if not valid id then throw an error
if client . VerifyProductID ( config . ReplaceProductID ) {
log . Entry ( ) . Infof ( "replaceProductID has been checked and it's valid" )
// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID exists")
productID = config . ReplaceProductID
} else {
log . Entry ( ) . Debugf ( "[DEBUG] ===> ReplaceProductID doesn't exist" )
return fmt . Errorf ( "ERROR -> the product id is not valid '%d'" , config . ReplaceProductID )
}
} else {
// Get existing product id by filename
log . Entry ( ) . Infof ( "replaceProductID is not provided and automatic search starts from group: %v ... " , config . Group )
// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID hasn't provided and automatic search starts... ")
productID = client . LoadExistingProduct ( config . Group , fileName )
2021-06-15 22:29:24 +02:00
}
2021-09-13 11:13:48 +02:00
log . Entry ( ) . Infof ( "Automatic search completed and found following product id: %v" , productID )
// log.Entry().Debugf("[DEBUG] ===> Returned productID: %v", productID)
// check if no existing is found
productID = uploadScanOrDeclareFetch ( * config , productID , client , fileName )
log . Entry ( ) . Debugf ( "[DEBUG] ===> After 'uploadScanOrDeclareFetch' returned productID: %v" , productID )
2020-02-06 17:16:34 +02:00
if productID <= 0 {
2020-09-02 10:41:12 +02:00
return fmt . Errorf ( "the product id is not valid '%d'" , productID )
2020-02-06 17:16:34 +02:00
}
2021-09-13 11:13:48 +02:00
2020-02-06 17:16:34 +02:00
//pollForResult
log . Entry ( ) . Debugf ( "Poll for scan result %v" , productID )
result := client . PollForResult ( productID , config . TimeoutMinutes )
2020-09-02 10:41:12 +02:00
// write results to file
2020-02-06 17:16:34 +02:00
jsonData , _ := json . Marshal ( result )
2020-09-02 10:41:12 +02:00
ioutil . WriteFile ( filepath . Join ( reportPath , scanResultFile ) , jsonData , 0644 )
2020-02-06 17:16:34 +02:00
//check if result is ok else notify
2020-09-02 10:41:12 +02:00
if protecode . HasFailed ( result ) {
log . SetErrorCategory ( log . ErrorService )
return fmt . Errorf ( "protecode scan failed: %v/products/%v" , config . ServerURL , productID )
2020-02-06 17:16:34 +02:00
}
2020-09-02 10:41:12 +02:00
2020-02-06 17:16:34 +02:00
//loadReport
log . Entry ( ) . Debugf ( "Load report %v for %v" , config . ReportFileName , productID )
resp := client . LoadReport ( config . ReportFileName , productID )
//save report to filesystem
2020-09-02 10:41:12 +02:00
if err := writeReportToFile ( * resp , config . ReportFileName ) ; err != nil {
log . Entry ( ) . Warningf ( "failed to write report: %s" , err )
2020-02-06 17:16:34 +02:00
}
//clean scan from server
log . Entry ( ) . Debugf ( "Delete scan %v for %v" , config . CleanupMode , productID )
client . DeleteScan ( config . CleanupMode , productID )
//count vulnerabilities
2020-09-02 10:41:12 +02:00
log . Entry ( ) . Debug ( "Parse scan result" )
2020-02-06 17:16:34 +02:00
parsedResult , vulns := client . ParseResultForInflux ( result . Result , config . ExcludeCVEs )
log . Entry ( ) . Debug ( "Write report to filesystem" )
2020-09-02 10:41:12 +02:00
if err := protecode . WriteReport (
protecode . ReportData {
ServerURL : config . ServerURL ,
FailOnSevereVulnerabilities : config . FailOnSevereVulnerabilities ,
ExcludeCVEs : config . ExcludeCVEs ,
Target : config . ReportFileName ,
Vulnerabilities : vulns ,
ProductID : fmt . Sprintf ( "%v" , productID ) ,
} , reportPath , stepResultFile , parsedResult , ioutil . WriteFile ) ; err != nil {
log . Entry ( ) . Warningf ( "failed to write report: %v" , err )
}
log . Entry ( ) . Debug ( "Write influx data" )
setInfluxData ( influx , parsedResult )
2020-02-06 17:16:34 +02:00
2020-08-07 13:03:38 +02:00
// write reports JSON
reports := [ ] StepResults . Path {
{ Target : config . ReportFileName , Mandatory : true } ,
2020-09-02 10:41:12 +02:00
{ Target : stepResultFile , Mandatory : true } ,
{ Target : scanResultFile , Mandatory : true } ,
2020-08-07 13:03:38 +02:00
}
// write links JSON
2021-06-23 15:05:00 +02:00
webuiURL := fmt . Sprintf ( webReportPath , config . ServerURL , productID )
2020-08-07 13:03:38 +02:00
links := [ ] StepResults . Path {
2021-06-23 15:05:00 +02:00
{ Name : "Protecode WebUI" , Target : webuiURL } ,
2020-08-07 13:03:38 +02:00
{ Name : "Protecode Report" , Target : path . Join ( "artifact" , config . ReportFileName ) , Scope : "job" } ,
}
2021-06-23 15:05:00 +02:00
2021-07-12 12:20:25 +02:00
// write custom report
scanReport := protecode . CreateCustomReport ( fileName , productID , parsedResult , vulns )
paths , err := protecode . WriteCustomReports ( scanReport , fileName , fmt . Sprint ( productID ) )
if err != nil {
// do not fail - consider failing later on
log . Entry ( ) . Warning ( "failed to create custom HTML/MarkDown file ..." , err )
} else {
reports = append ( reports , paths ... )
}
2021-06-23 15:05:00 +02:00
// create toolrecord file
toolRecordFileName , err := createToolRecordProtecode ( "./" , config , productID , webuiURL )
if err != nil {
// do not fail until the framework is well established
log . Entry ( ) . Warning ( "TR_PROTECODE: Failed to create toolrecord file ..." , err )
} else {
reports = append ( reports , StepResults . Path { Target : toolRecordFileName } )
}
2020-08-07 13:03:38 +02:00
StepResults . PersistReportsAndLinks ( "protecodeExecuteScan" , "" , reports , links )
2020-09-02 10:41:12 +02:00
if config . FailOnSevereVulnerabilities && protecode . HasSevereVulnerabilities ( result . Result , config . ExcludeCVEs ) {
log . SetErrorCategory ( log . ErrorCompliance )
return fmt . Errorf ( "the product is not compliant" )
}
return nil
2020-02-06 17:16:34 +02:00
}
func setInfluxData ( influx * protecodeExecuteScanInflux , result map [ string ] int ) {
2021-03-10 17:00:53 +02:00
influx . protecode_data . fields . historical_vulnerabilities = result [ "historical_vulnerabilities" ]
influx . protecode_data . fields . triaged_vulnerabilities = result [ "triaged_vulnerabilities" ]
influx . protecode_data . fields . excluded_vulnerabilities = result [ "excluded_vulnerabilities" ]
influx . protecode_data . fields . minor_vulnerabilities = result [ "minor_vulnerabilities" ]
influx . protecode_data . fields . major_vulnerabilities = result [ "major_vulnerabilities" ]
influx . protecode_data . fields . vulnerabilities = result [ "vulnerabilities" ]
2020-02-06 17:16:34 +02:00
}
func createClient ( config * protecodeExecuteScanOptions ) protecode . Protecode {
var duration time . Duration = time . Duration ( time . Minute * 1 )
if len ( config . TimeoutMinutes ) > 0 {
dur , err := time . ParseDuration ( fmt . Sprintf ( "%vm" , config . TimeoutMinutes ) )
if err != nil {
log . Entry ( ) . Warnf ( "Failed to parse timeout %v, switched back to default timeout %v minutes" , config . TimeoutMinutes , duration )
} else {
duration = dur
}
}
pc := protecode . Protecode { }
protecodeOptions := protecode . Options {
ServerURL : config . ServerURL ,
Logger : log . Entry ( ) . WithField ( "package" , "SAP/jenkins-library/pkg/protecode" ) ,
Duration : duration ,
2020-04-20 16:44:01 +02:00
Username : config . Username ,
2020-02-06 17:16:34 +02:00
Password : config . Password ,
}
pc . SetOptions ( protecodeOptions )
return pc
}
2020-09-02 10:41:12 +02:00
2020-02-06 17:16:34 +02:00
func createDockerClient ( config * protecodeExecuteScanOptions ) piperDocker . Download {
dClientOptions := piperDocker . ClientOptions { ImageName : config . ScanImage , RegistryURL : config . DockerRegistryURL , LocalPath : config . FilePath , IncludeLayers : config . IncludeLayers }
dClient := & piperDocker . Client { }
dClient . SetOptions ( dClientOptions )
return dClient
}
2021-09-13 11:13:48 +02:00
func uploadScanOrDeclareFetch ( config protecodeExecuteScanOptions , productID int , client protecode . Protecode , fileName string ) int {
//check if the LoadExistingProduct) before returns an valid product id, than skip this
//if !hasExisting(productID, config.VerifyOnly) {
log . Entry ( ) . Debugf ( "[DEBUG] ===> In uploadScanOrDeclareFetch: %v" , productID )
// check if product doesn't exist then create a new one.
if productID <= 0 {
log . Entry ( ) . Infof ( "New product creation started ... " )
// log.Entry().Debugf("[DEBUG] ===> New product creation started: %v", productID)
productID = uploadFile ( config , productID , client , fileName , false )
log . Entry ( ) . Infof ( "New product has been successfully created: %v" , productID )
// log.Entry().Debugf("[DEBUG] ===> After uploading [productID < 0] file returned productID: %v", productID)
return productID
// In case product already exists and "VerifyOnly (reuseExisting)" is false then we replace binary without creating a new product.
} else if ( productID > 0 ) && ! config . VerifyOnly {
log . Entry ( ) . Infof ( "Product already exists and 'VerifyOnly (reuseExisting)' is false then product (%v) binary and scan result will be replaced without creating a new product." , productID )
// log.Entry().Debugf("[DEBUG] ===> Replace binary entry point started %v", productID)
productID = uploadFile ( config , productID , client , fileName , true )
// log.Entry().Debugf("[DEBUG] ===> After uploading file [(productID > 0) && !config.VerifyOnly] returned productID: %v", productID)
return productID
// If product already exists and "reuseExisting" option is enabled then return the latest similar scan result.
} else {
log . Entry ( ) . Infof ( "VerifyOnly (reuseExisting) option is enabled and returned productID: %v" , productID )
// log.Entry().Debugf("[DEBUG] ===> VerifyOnly (reuseExisting) option is enabled and returned productID: %v", productID)
return productID
}
}
func uploadFile ( config protecodeExecuteScanOptions , productID int , client protecode . Protecode , fileName string , replaceBinary bool ) int {
2021-06-15 22:29:24 +02:00
if len ( config . FetchURL ) > 0 {
log . Entry ( ) . Debugf ( "Declare fetch url %v" , config . FetchURL )
2021-09-13 11:13:48 +02:00
resultData := client . DeclareFetchURL ( config . CleanupMode , config . Group , config . FetchURL , productID , replaceBinary )
productID = resultData . Result . ProductID
2021-06-15 22:29:24 +02:00
} else {
log . Entry ( ) . Debugf ( "Upload file path: %v" , config . FilePath )
if len ( config . FilePath ) <= 0 {
2021-09-13 11:13:48 +02:00
log . Entry ( ) . Fatalf ( "There is no file path configured for upload : %v" , config . FilePath )
2021-06-15 22:29:24 +02:00
}
pathToFile := filepath . Join ( config . FilePath , fileName )
if ! ( fileExists ( pathToFile ) ) {
log . Entry ( ) . Fatalf ( "There is no file for upload: %v" , pathToFile )
2020-02-06 17:16:34 +02:00
}
2021-06-15 22:29:24 +02:00
combinedFileName := fileName
if len ( config . PullRequestName ) > 0 {
combinedFileName = fmt . Sprintf ( "%v_%v" , config . PullRequestName , fileName )
}
2021-09-13 11:13:48 +02:00
resultData := client . UploadScanFile ( config . CleanupMode , config . Group , pathToFile , combinedFileName , productID , replaceBinary )
productID = resultData . Result . ProductID
log . Entry ( ) . Debugf ( "[DEBUG] ===> uploadFile return FINAL product id: %v" , productID )
2020-02-06 17:16:34 +02:00
}
2021-09-13 11:13:48 +02:00
return productID
2020-02-06 17:16:34 +02:00
}
func fileExists ( filename string ) bool {
info , err := os . Stat ( filename )
if os . IsNotExist ( err ) {
return false
}
return ! info . IsDir ( )
}
2021-09-13 11:13:48 +02:00
func hasExisting ( productID int , verifyOnly bool ) bool {
if ( productID > 0 ) || verifyOnly {
return true
}
return false
}
2020-02-06 17:16:34 +02:00
var writeReportToFile = func ( resp io . ReadCloser , reportFileName string ) error {
filePath := filepath . Join ( reportPath , reportFileName )
f , err := os . Create ( filePath )
if err == nil {
defer f . Close ( )
_ , err = io . Copy ( f , resp )
}
return err
}
2020-05-06 16:07:10 +02:00
2020-08-12 14:57:11 +02:00
func correctDockerConfigEnvVar ( config * protecodeExecuteScanOptions ) {
path := config . DockerConfigJSON
2020-05-06 16:07:10 +02:00
if len ( path ) > 0 {
2020-08-11 14:42:08 +02:00
log . Entry ( ) . Infof ( "Docker credentials configuration: %v" , path )
2020-05-06 16:07:10 +02:00
path , _ := filepath . Abs ( path )
2020-08-11 14:42:08 +02:00
// use parent directory
2020-05-06 16:07:10 +02:00
path = filepath . Dir ( path )
os . Setenv ( "DOCKER_CONFIG" , path )
2020-08-11 14:42:08 +02:00
} else {
log . Entry ( ) . Info ( "Docker credentials configuration: NONE" )
2020-05-06 16:07:10 +02:00
}
}
2020-09-02 15:00:55 +02:00
func getTarName ( config * protecodeExecuteScanOptions ) string {
// remove original version
2021-05-05 19:52:13 +02:00
fileName := strings . TrimSuffix ( config . ScanImage , ":" + config . Version )
2021-01-26 10:59:10 +02:00
// remove sha digest if exists
sha256 := "@sha256"
if index := strings . Index ( fileName , sha256 ) ; index > - 1 {
fileName = fileName [ : index ]
}
2020-09-02 15:00:55 +02:00
// append trimmed version
2021-05-05 19:52:13 +02:00
if version := handleArtifactVersion ( config . Version ) ; len ( version ) > 0 {
2020-09-02 15:00:55 +02:00
fileName = fileName + "_" + version
}
fileName = strings . ReplaceAll ( fileName , "/" , "_" )
return fileName + ".tar"
}
2021-06-23 15:05:00 +02:00
// create toolrecord file for protecode
// todo: check if group and product names can be retrieved
func createToolRecordProtecode ( workspace string , config * protecodeExecuteScanOptions , productID int , webuiURL string ) ( string , error ) {
record := toolrecord . New ( workspace , "protecode" , config . ServerURL )
2021-07-23 08:48:48 +02:00
groupURL := config . ServerURL + "/#/groups/" + config . Group
2021-06-23 15:05:00 +02:00
err := record . AddKeyData ( "group" ,
config . Group ,
2021-07-23 08:48:48 +02:00
config . Group , // todo figure out display name
groupURL )
2021-06-23 15:05:00 +02:00
if err != nil {
return "" , err
}
err = record . AddKeyData ( "product" ,
strconv . Itoa ( productID ) ,
2021-07-23 08:48:48 +02:00
strconv . Itoa ( productID ) , // todo figure out display name
2021-06-23 15:05:00 +02:00
webuiURL )
if err != nil {
return "" , err
}
err = record . Persist ( )
if err != nil {
return "" , err
}
return record . GetFileName ( ) , nil
}