mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2024-11-24 08:02:18 +02:00
use generated migration
This commit is contained in:
parent
823175605f
commit
cea842bed5
@ -1,3 +0,0 @@
|
||||
package ddl
|
||||
|
||||
//go:generate go-bindata -pkg ddl -o ddl_gen.go sqlite3/ mysql/ postgres/
|
29
store/datastore/ddl/migrate.go
Normal file
29
store/datastore/ddl/migrate.go
Normal file
@ -0,0 +1,29 @@
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/drone/drone/store/datastore/ddl/mysql"
|
||||
"github.com/drone/drone/store/datastore/ddl/postgres"
|
||||
"github.com/drone/drone/store/datastore/ddl/sqlite"
|
||||
)
|
||||
|
||||
// Supported database drivers
|
||||
const (
|
||||
DriverSqlite = "sqlite3"
|
||||
DriverMysql = "mysql"
|
||||
DriverPostgres = "postgres"
|
||||
)
|
||||
|
||||
// Migrate performs the database migration. If the migration fails
|
||||
// and error is returned.
|
||||
func Migrate(driver string, db *sql.DB) error {
|
||||
switch driver {
|
||||
case DriverMysql:
|
||||
return mysql.Migrate(db)
|
||||
case DriverPostgres:
|
||||
return postgres.Migrate(db)
|
||||
default:
|
||||
return sqlite.Migrate(db)
|
||||
}
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_conceal = false;
|
||||
UPDATE team_secrets SET team_secret_conceal = false;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_conceal;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;
|
@ -1,8 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_error VARCHAR(500);
|
||||
UPDATE builds SET build_error = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_error;
|
@ -1,18 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_config_path VARCHAR(255);
|
||||
ALTER TABLE builds ADD COLUMN build_sender VARCHAR(255);
|
||||
ALTER TABLE builds ADD COLUMN build_reviewer VARCHAR(255);
|
||||
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
|
||||
|
||||
UPDATE repos SET repo_config_path = '.drone.yml';
|
||||
UPDATE builds SET build_reviewer = '';
|
||||
UPDATE builds SET build_reviewed = 0;
|
||||
UPDATE builds SET build_sender = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_config_path;
|
||||
ALTER TABLE builds DROP COLUMN build_sender;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewer;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewed;
|
@ -1,45 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name VARCHAR(250)
|
||||
,proc_state VARCHAR(250)
|
||||
,proc_error VARCHAR(500)
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine VARCHAR(250)
|
||||
,proc_platform VARCHAR(250)
|
||||
,proc_environ VARCHAR(2000)
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
||||
|
||||
CREATE TABLE files (
|
||||
file_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name VARCHAR(250)
|
||||
,file_mime VARCHAR(250)
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data MEDIUMBLOB
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
);
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX file_build_ix;
|
||||
DROP INDEX file_proc_ix;
|
||||
DROP TABLE files;
|
||||
|
||||
DROP INDEX proc_build_ix;
|
||||
DROP TABLE procs;
|
@ -1,11 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE tasks (
|
||||
task_id VARCHAR(255) PRIMARY KEY
|
||||
,task_data MEDIUMBLOB
|
||||
,task_labels MEDIUMBLOB
|
||||
);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE tasks;
|
@ -1,124 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,user_login VARCHAR(255)
|
||||
,user_token VARCHAR(500)
|
||||
,user_secret VARCHAR(500)
|
||||
,user_expiry INTEGER
|
||||
,user_email VARCHAR(500)
|
||||
,user_avatar VARCHAR(500)
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash VARCHAR(500)
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner VARCHAR(255)
|
||||
,repo_name VARCHAR(255)
|
||||
,repo_full_name VARCHAR(255)
|
||||
,repo_avatar VARCHAR(500)
|
||||
,repo_link VARCHAR(1000)
|
||||
,repo_clone VARCHAR(1000)
|
||||
,repo_branch VARCHAR(500)
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash VARCHAR(500)
|
||||
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `keys` (
|
||||
key_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,key_repo_id INTEGER
|
||||
,key_public MEDIUMBLOB
|
||||
,key_private MEDIUMBLOB
|
||||
|
||||
,UNIQUE(key_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS builds (
|
||||
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event VARCHAR(500)
|
||||
,build_status VARCHAR(500)
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit VARCHAR(500)
|
||||
,build_branch VARCHAR(500)
|
||||
,build_ref VARCHAR(500)
|
||||
,build_refspec VARCHAR(1000)
|
||||
,build_remote VARCHAR(500)
|
||||
,build_title VARCHAR(1000)
|
||||
,build_message VARCHAR(2000)
|
||||
,build_timestamp INTEGER
|
||||
,build_author VARCHAR(500)
|
||||
,build_avatar VARCHAR(1000)
|
||||
,build_email VARCHAR(500)
|
||||
,build_link VARCHAR(1000)
|
||||
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jobs (
|
||||
job_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,job_node_id INTEGER
|
||||
,job_build_id INTEGER
|
||||
,job_number INTEGER
|
||||
,job_status VARCHAR(500)
|
||||
,job_exit_code INTEGER
|
||||
,job_started INTEGER
|
||||
,job_enqueued INTEGER
|
||||
,job_finished INTEGER
|
||||
,job_environment VARCHAR(2000)
|
||||
|
||||
,UNIQUE(job_build_id, job_number)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_job_build ON jobs (job_build_id);
|
||||
CREATE INDEX ix_job_node ON jobs (job_node_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data MEDIUMBLOB
|
||||
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
node_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,node_addr VARCHAR(1024)
|
||||
,node_arch VARCHAR(50)
|
||||
,node_cert MEDIUMBLOB
|
||||
,node_key MEDIUMBLOB
|
||||
,node_ca MEDIUMBLOB
|
||||
);
|
||||
|
||||
|
||||
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
|
||||
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE nodes;
|
||||
DROP TABLE logs;
|
||||
DROP TABLE jobs;
|
||||
DROP TABLE builds;
|
||||
DROP TABLE `keys`;
|
||||
DROP TABLE repos;
|
||||
DROP TABLE users;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_scm VARCHAR(25);
|
||||
ALTER TABLE builds ADD COLUMN build_deploy VARCHAR(500);
|
||||
|
||||
UPDATE repos SET repo_scm = 'git' WHERE repo_scm = null;
|
||||
UPDATE builds SET build_deploy = '' WHERE build_deploy = null;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_scm;
|
||||
ALTER TABLE builds DROP COLUMN build_deploy;
|
@ -1,32 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name VARCHAR(255)
|
||||
,secret_value MEDIUMBLOB
|
||||
,secret_images VARCHAR(2000)
|
||||
,secret_events VARCHAR(2000)
|
||||
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr VARCHAR(255)
|
||||
,registry_email VARCHAR(500)
|
||||
,registry_username VARCHAR(2000)
|
||||
,registry_password VARCHAR(2000)
|
||||
,registry_token VARCHAR(2000)
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_secrets_repo;
|
||||
DROP INDEX ix_registry_repo;
|
@ -1,9 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
|
||||
|
||||
UPDATE jobs SET job_error = '' WHERE job_error = null;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE jobs DROP COLUMN job_error;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
|
||||
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
|
||||
|
||||
UPDATE builds SET build_signed = false;
|
||||
UPDATE builds SET build_verified = false;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_signed;
|
||||
ALTER TABLE builds DROP COLUMN build_verified;
|
@ -1,19 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE team_secrets (
|
||||
team_secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,team_secret_key VARCHAR(255)
|
||||
,team_secret_name VARCHAR(255)
|
||||
,team_secret_value MEDIUMBLOB
|
||||
,team_secret_images VARCHAR(2000)
|
||||
,team_secret_events VARCHAR(2000)
|
||||
|
||||
,UNIQUE(team_secret_name, team_secret_key)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_team_secrets_key;
|
||||
DROP TABLE team_secrets;
|
@ -1,7 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_parent;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_skip_verify = false;
|
||||
UPDATE team_secrets SET team_secret_skip_verify = false;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;
|
3
store/datastore/ddl/mysql/ddl.go
Normal file
3
store/datastore/ddl/mysql/ddl.go
Normal file
@ -0,0 +1,3 @@
|
||||
package mysql
|
||||
|
||||
//go:generate togo ddl -package mysql -dialect mysql
|
16
store/datastore/ddl/mysql/files/001_create_table_users.sql
Normal file
16
store/datastore/ddl/mysql/files/001_create_table_users.sql
Normal file
@ -0,0 +1,16 @@
|
||||
-- name: create-table-users
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,user_login VARCHAR(250)
|
||||
,user_token VARCHAR(500)
|
||||
,user_secret VARCHAR(500)
|
||||
,user_expiry INTEGER
|
||||
,user_email VARCHAR(500)
|
||||
,user_avatar VARCHAR(500)
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash VARCHAR(500)
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
26
store/datastore/ddl/mysql/files/002_create_table_repos.sql
Normal file
26
store/datastore/ddl/mysql/files/002_create_table_repos.sql
Normal file
@ -0,0 +1,26 @@
|
||||
-- name: create-table-repos
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner VARCHAR(250)
|
||||
,repo_name VARCHAR(250)
|
||||
,repo_full_name VARCHAR(250)
|
||||
,repo_avatar VARCHAR(500)
|
||||
,repo_link VARCHAR(1000)
|
||||
,repo_clone VARCHAR(1000)
|
||||
,repo_branch VARCHAR(500)
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash VARCHAR(500)
|
||||
,repo_scm VARCHAR(50)
|
||||
,repo_config_path VARCHAR(500)
|
||||
,repo_gated BOOLEAN
|
||||
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
44
store/datastore/ddl/mysql/files/003_create_table_builds.sql
Normal file
44
store/datastore/ddl/mysql/files/003_create_table_builds.sql
Normal file
@ -0,0 +1,44 @@
|
||||
-- name: create-table-builds
|
||||
|
||||
CREATE TABLE IF NOT EXISTS builds (
|
||||
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event VARCHAR(500)
|
||||
,build_status VARCHAR(500)
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit VARCHAR(500)
|
||||
,build_branch VARCHAR(500)
|
||||
,build_ref VARCHAR(500)
|
||||
,build_refspec VARCHAR(1000)
|
||||
,build_remote VARCHAR(500)
|
||||
,build_title VARCHAR(1000)
|
||||
,build_message VARCHAR(2000)
|
||||
,build_timestamp INTEGER
|
||||
,build_author VARCHAR(500)
|
||||
,build_avatar VARCHAR(1000)
|
||||
,build_email VARCHAR(500)
|
||||
,build_link VARCHAR(1000)
|
||||
,build_deploy VARCHAR(500)
|
||||
,build_signed BOOLEAN
|
||||
,build_verified BOOLEAN
|
||||
,build_parent INTEGER
|
||||
,build_error VARCHAR(500)
|
||||
,build_reviewer VARCHAR(250)
|
||||
,build_reviewed INTEGER
|
||||
,build_sender VARCHAR(250)
|
||||
,build_config_id INTEGER
|
||||
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-builds-repo
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
-- name: create-index-builds-author
|
||||
|
||||
CREATE INDEX ix_build_author ON builds (build_author);
|
24
store/datastore/ddl/mysql/files/004_create_table_procs.sql
Normal file
24
store/datastore/ddl/mysql/files/004_create_table_procs.sql
Normal file
@ -0,0 +1,24 @@
|
||||
-- name: create-table-procs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name VARCHAR(250)
|
||||
,proc_state VARCHAR(250)
|
||||
,proc_error VARCHAR(500)
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine VARCHAR(250)
|
||||
,proc_platform VARCHAR(250)
|
||||
,proc_environ VARCHAR(2000)
|
||||
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
-- name: create-index-procs-build
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
@ -0,0 +1,9 @@
|
||||
-- name: create-table-logs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data MEDIUMBLOB
|
||||
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
22
store/datastore/ddl/mysql/files/006_create_table_files.sql
Normal file
22
store/datastore/ddl/mysql/files/006_create_table_files.sql
Normal file
@ -0,0 +1,22 @@
|
||||
-- name: create-table-files
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
file_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name VARCHAR(250)
|
||||
,file_mime VARCHAR(250)
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data MEDIUMBLOB
|
||||
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
);
|
||||
|
||||
-- name: create-index-files-builds
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
|
||||
-- name: create-index-files-procs
|
||||
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
18
store/datastore/ddl/mysql/files/007_create_table_secets.sql
Normal file
18
store/datastore/ddl/mysql/files/007_create_table_secets.sql
Normal file
@ -0,0 +1,18 @@
|
||||
-- name: create-table-secrets
|
||||
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name VARCHAR(250)
|
||||
,secret_value MEDIUMBLOB
|
||||
,secret_images VARCHAR(2000)
|
||||
,secret_events VARCHAR(2000)
|
||||
,secret_skip_verify BOOLEAN
|
||||
,secret_conceal BOOLEAN
|
||||
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-secrets-repo
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
@ -0,0 +1,17 @@
|
||||
-- name: create-table-registry
|
||||
|
||||
CREATE TABLE IF NOT EXISTS registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr VARCHAR(250)
|
||||
,registry_email VARCHAR(500)
|
||||
,registry_username VARCHAR(2000)
|
||||
,registry_password VARCHAR(2000)
|
||||
,registry_token VARCHAR(2000)
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-registry-repo
|
||||
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
@ -1,6 +1,6 @@
|
||||
-- +migrate Up
|
||||
-- name: create-table-config
|
||||
|
||||
CREATE TABLE config (
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
config_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,config_repo_id INTEGER
|
||||
,config_hash VARCHAR(250)
|
||||
@ -8,10 +8,3 @@ CREATE TABLE config (
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
|
||||
UPDATE builds set build_config_id = 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE config;
|
@ -0,0 +1,7 @@
|
||||
-- name: create-table-tasks
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id VARCHAR(250) PRIMARY KEY
|
||||
,task_data MEDIUMBLOB
|
||||
,task_labels MEDIUMBLOB
|
||||
);
|
@ -1,8 +1,8 @@
|
||||
-- +migrate Up
|
||||
-- name: create-table-agents
|
||||
|
||||
CREATE TABLE agents (
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,agent_addr VARCHAR(255)
|
||||
,agent_addr VARCHAR(250)
|
||||
,agent_platform VARCHAR(500)
|
||||
,agent_capacity INTEGER
|
||||
,agent_created INTEGER
|
||||
@ -10,8 +10,3 @@ CREATE TABLE agents (
|
||||
|
||||
,UNIQUE(agent_addr)
|
||||
);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE agents;
|
@ -1,9 +1,6 @@
|
||||
-- +migrate Up
|
||||
-- name: create-table-senders
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
|
||||
UPDATE repos SET repo_gated = false;
|
||||
|
||||
CREATE TABLE senders (
|
||||
CREATE TABLE IF NOT EXISTS senders (
|
||||
sender_id INTEGER PRIMARY KEY AUTO_INCREMENT
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login VARCHAR(250)
|
||||
@ -13,10 +10,6 @@ CREATE TABLE senders (
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
-- name: create-index-sender-repos
|
||||
|
||||
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_gated;
|
||||
DROP INDEX sender_repo_ix;
|
||||
DROP TABLE senders;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_conceal = false;
|
||||
UPDATE team_secrets SET team_secret_conceal = false;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_conceal;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;
|
@ -1,8 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_error VARCHAR(500);
|
||||
UPDATE builds SET build_error = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_error;
|
@ -1,18 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_config_path VARCHAR(255);
|
||||
ALTER TABLE builds ADD COLUMN build_reviewer VARCHAR(255);
|
||||
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
|
||||
ALTER TABLE builds ADD COLUMN build_sender VARCHAR(255);
|
||||
|
||||
UPDATE repos SET repo_config_path = '.drone.yml';
|
||||
UPDATE builds SET build_reviewer = '';
|
||||
UPDATE builds SET build_reviewed = 0;
|
||||
UPDATE builds SET build_sender = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_config_path;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewer;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewed;
|
||||
ALTER TABLE builds DROP COLUMN build_sender;
|
@ -1,47 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE procs (
|
||||
proc_id SERIAL PRIMARY KEY
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name VARCHAR(250)
|
||||
,proc_state VARCHAR(250)
|
||||
,proc_error VARCHAR(500)
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine VARCHAR(250)
|
||||
,proc_platform VARCHAR(250)
|
||||
,proc_environ VARCHAR(2000)
|
||||
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
||||
|
||||
CREATE TABLE files (
|
||||
file_id SERIAL PRIMARY KEY
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name VARCHAR(250)
|
||||
,file_mime VARCHAR(250)
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BYTEA
|
||||
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
);
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX file_build_ix;
|
||||
DROP INDEX file_proc_ix;
|
||||
DROP TABLE files;
|
||||
|
||||
DROP INDEX proc_build_ix;
|
||||
DROP TABLE procs;
|
@ -1,22 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
|
||||
UPDATE repos SET repo_gated = false;
|
||||
|
||||
CREATE TABLE senders (
|
||||
sender_id SERIAL PRIMARY KEY
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login VARCHAR(250)
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_gated;
|
||||
DROP INDEX sender_repo_ix;
|
||||
DROP TABLE senders;
|
@ -1,11 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE tasks (
|
||||
task_id VARCHAR(255) PRIMARY KEY
|
||||
,task_data BYTEA
|
||||
,task_labels BYTEA
|
||||
);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE tasks;
|
@ -1,17 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE config (
|
||||
config_id SERIAL PRIMARY KEY
|
||||
,config_repo_id INTEGER
|
||||
,config_hash VARCHAR(250)
|
||||
,config_data BYTEA
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
|
||||
UPDATE builds set build_config_id = 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE config;
|
@ -1,126 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE users (
|
||||
user_id SERIAL PRIMARY KEY
|
||||
,user_login VARCHAR(40)
|
||||
,user_token VARCHAR(128)
|
||||
,user_secret VARCHAR(128)
|
||||
,user_expiry INTEGER
|
||||
,user_email VARCHAR(256)
|
||||
,user_avatar VARCHAR(256)
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash VARCHAR(128)
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
||||
|
||||
CREATE TABLE repos (
|
||||
repo_id SERIAL PRIMARY KEY
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner VARCHAR(255)
|
||||
,repo_name VARCHAR(255)
|
||||
,repo_full_name VARCHAR(511)
|
||||
,repo_avatar VARCHAR(500)
|
||||
,repo_link VARCHAR(1000)
|
||||
,repo_clone VARCHAR(1000)
|
||||
,repo_branch VARCHAR(500)
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash VARCHAR(500)
|
||||
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
||||
|
||||
CREATE TABLE keys (
|
||||
key_id SERIAL PRIMARY KEY
|
||||
,key_repo_id INTEGER
|
||||
,key_public BYTEA
|
||||
,key_private BYTEA
|
||||
|
||||
,UNIQUE(key_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE builds (
|
||||
build_id SERIAL PRIMARY KEY
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event VARCHAR(25)
|
||||
,build_status VARCHAR(25)
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit VARCHAR(40)
|
||||
,build_branch VARCHAR(256)
|
||||
,build_ref VARCHAR(512)
|
||||
,build_refspec VARCHAR(512)
|
||||
,build_remote VARCHAR(512)
|
||||
,build_title VARCHAR(1000)
|
||||
,build_message VARCHAR(2000)
|
||||
,build_timestamp INTEGER
|
||||
,build_author VARCHAR(40)
|
||||
,build_avatar VARCHAR(1000)
|
||||
,build_email VARCHAR(500)
|
||||
,build_link VARCHAR(1000)
|
||||
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
CREATE TABLE jobs (
|
||||
job_id SERIAL PRIMARY KEY
|
||||
,job_node_id INTEGER
|
||||
,job_build_id INTEGER
|
||||
,job_number INTEGER
|
||||
,job_status VARCHAR(25)
|
||||
,job_exit_code INTEGER
|
||||
,job_started INTEGER
|
||||
,job_enqueued INTEGER
|
||||
,job_finished INTEGER
|
||||
,job_environment VARCHAR(2000)
|
||||
|
||||
,UNIQUE(job_build_id, job_number)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_job_build ON jobs (job_build_id);
|
||||
CREATE INDEX ix_job_node ON jobs (job_node_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id SERIAL PRIMARY KEY
|
||||
,log_job_id INTEGER
|
||||
,log_data BYTEA
|
||||
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
node_id SERIAL PRIMARY KEY
|
||||
,node_addr VARCHAR(1024)
|
||||
,node_arch VARCHAR(50)
|
||||
,node_cert BYTEA
|
||||
,node_key BYTEA
|
||||
,node_ca BYTEA
|
||||
);
|
||||
|
||||
|
||||
INSERT INTO nodes (node_addr, node_arch, node_cert, node_key, node_ca) VALUES
|
||||
('unix:///var/run/docker.sock', 'linux_amd64', '', '', ''),
|
||||
('unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE nodes;
|
||||
DROP TABLE logs;
|
||||
DROP TABLE jobs;
|
||||
DROP TABLE builds;
|
||||
DROP TABLE keys;
|
||||
DROP TABLE stars;
|
||||
DROP TABLE repos;
|
||||
DROP TABLE users;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_scm VARCHAR(25);
|
||||
ALTER TABLE builds ADD COLUMN build_deploy VARCHAR(500);
|
||||
|
||||
UPDATE repos SET repo_scm = 'git';
|
||||
UPDATE builds SET build_deploy = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_scm;
|
||||
ALTER TABLE builds DROP COLUMN build_deploy;
|
@ -1,32 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE secrets (
|
||||
secret_id SERIAL PRIMARY KEY
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name VARCHAR(500)
|
||||
,secret_value BYTEA
|
||||
,secret_images VARCHAR(2000)
|
||||
,secret_events VARCHAR(2000)
|
||||
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE registry (
|
||||
registry_id SERIAL PRIMARY KEY
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr VARCHAR(500)
|
||||
,registry_email VARCHAR(500)
|
||||
,registry_username VARCHAR(2000)
|
||||
,registry_password VARCHAR(2000)
|
||||
,registry_token VARCHAR(2000)
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_secrets_repo;
|
||||
DROP INDEX ix_registry_repo;
|
@ -1,9 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
|
||||
|
||||
UPDATE jobs SET job_error = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE jobs DROP COLUMN job_error;
|
@ -1,16 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
|
||||
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
|
||||
|
||||
UPDATE builds SET build_signed = false;
|
||||
UPDATE builds SET build_verified = false;
|
||||
|
||||
CREATE INDEX ix_build_status_running ON builds (build_status)
|
||||
WHERE build_status IN ('pending', 'running');
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_signed;
|
||||
ALTER TABLE builds DROP COLUMN build_verified;
|
||||
DROP INDEX ix_build_status_running;
|
@ -1,19 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE team_secrets (
|
||||
team_secret_id SERIAL PRIMARY KEY
|
||||
,team_secret_key VARCHAR(255)
|
||||
,team_secret_name VARCHAR(255)
|
||||
,team_secret_value BYTEA
|
||||
,team_secret_images VARCHAR(2000)
|
||||
,team_secret_events VARCHAR(2000)
|
||||
|
||||
,UNIQUE(team_secret_name, team_secret_key)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_team_secrets_key;
|
||||
DROP TABLE team_secrets;
|
@ -1,7 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_parent;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_skip_verify = false;
|
||||
UPDATE team_secrets SET team_secret_skip_verify = false;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;
|
3
store/datastore/ddl/postgres/ddl.go
Normal file
3
store/datastore/ddl/postgres/ddl.go
Normal file
@ -0,0 +1,3 @@
|
||||
package postgres
|
||||
|
||||
//go:generate togo ddl -package postgres -dialect postgres
|
@ -0,0 +1,16 @@
|
||||
-- name: create-table-users
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
user_id SERIAL PRIMARY KEY
|
||||
,user_login VARCHAR(250)
|
||||
,user_token VARCHAR(500)
|
||||
,user_secret VARCHAR(500)
|
||||
,user_expiry INTEGER
|
||||
,user_email VARCHAR(500)
|
||||
,user_avatar VARCHAR(500)
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash VARCHAR(500)
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
@ -0,0 +1,26 @@
|
||||
-- name: create-table-repos
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repos (
|
||||
repo_id SERIAL PRIMARY KEY
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner VARCHAR(250)
|
||||
,repo_name VARCHAR(250)
|
||||
,repo_full_name VARCHAR(250)
|
||||
,repo_avatar VARCHAR(500)
|
||||
,repo_link VARCHAR(1000)
|
||||
,repo_clone VARCHAR(1000)
|
||||
,repo_branch VARCHAR(500)
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash VARCHAR(500)
|
||||
,repo_scm VARCHAR(50)
|
||||
,repo_config_path VARCHAR(500)
|
||||
,repo_gated BOOLEAN
|
||||
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
@ -0,0 +1,44 @@
|
||||
-- name: create-table-builds
|
||||
|
||||
CREATE TABLE IF NOT EXISTS builds (
|
||||
build_id SERIAL PRIMARY KEY
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event VARCHAR(500)
|
||||
,build_status VARCHAR(500)
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit VARCHAR(500)
|
||||
,build_branch VARCHAR(500)
|
||||
,build_ref VARCHAR(500)
|
||||
,build_refspec VARCHAR(1000)
|
||||
,build_remote VARCHAR(500)
|
||||
,build_title VARCHAR(1000)
|
||||
,build_message VARCHAR(2000)
|
||||
,build_timestamp INTEGER
|
||||
,build_author VARCHAR(500)
|
||||
,build_avatar VARCHAR(1000)
|
||||
,build_email VARCHAR(500)
|
||||
,build_link VARCHAR(1000)
|
||||
,build_deploy VARCHAR(500)
|
||||
,build_signed BOOLEAN
|
||||
,build_verified BOOLEAN
|
||||
,build_parent INTEGER
|
||||
,build_error VARCHAR(500)
|
||||
,build_reviewer VARCHAR(250)
|
||||
,build_reviewed INTEGER
|
||||
,build_sender VARCHAR(250)
|
||||
,build_config_id INTEGER
|
||||
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-builds-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
-- name: create-index-builds-author
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);
|
@ -0,0 +1,24 @@
|
||||
-- name: create-table-procs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS procs (
|
||||
proc_id SERIAL PRIMARY KEY
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name VARCHAR(250)
|
||||
,proc_state VARCHAR(250)
|
||||
,proc_error VARCHAR(500)
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine VARCHAR(250)
|
||||
,proc_platform VARCHAR(250)
|
||||
,proc_environ VARCHAR(2000)
|
||||
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
-- name: create-index-procs-build
|
||||
|
||||
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);
|
@ -0,0 +1,9 @@
|
||||
-- name: create-table-logs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id SERIAL PRIMARY KEY
|
||||
,log_job_id INTEGER
|
||||
,log_data BYTEA
|
||||
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
@ -0,0 +1,22 @@
|
||||
-- name: create-table-files
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
file_id SERIAL PRIMARY KEY
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name VARCHAR(250)
|
||||
,file_mime VARCHAR(250)
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BYTEA
|
||||
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
);
|
||||
|
||||
-- name: create-index-files-builds
|
||||
|
||||
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
|
||||
|
||||
-- name: create-index-files-procs
|
||||
|
||||
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);
|
@ -0,0 +1,18 @@
|
||||
-- name: create-table-secrets
|
||||
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
secret_id SERIAL PRIMARY KEY
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name VARCHAR(250)
|
||||
,secret_value BYTEA
|
||||
,secret_images VARCHAR(2000)
|
||||
,secret_events VARCHAR(2000)
|
||||
,secret_skip_verify BOOLEAN
|
||||
,secret_conceal BOOLEAN
|
||||
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-secrets-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);
|
@ -0,0 +1,17 @@
|
||||
-- name: create-table-registry
|
||||
|
||||
CREATE TABLE IF NOT EXISTS registry (
|
||||
registry_id SERIAL PRIMARY KEY
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr VARCHAR(250)
|
||||
,registry_email VARCHAR(500)
|
||||
,registry_username VARCHAR(2000)
|
||||
,registry_password VARCHAR(2000)
|
||||
,registry_token VARCHAR(2000)
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-registry-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);
|
@ -0,0 +1,10 @@
|
||||
-- name: create-table-config
|
||||
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
config_id SERIAL PRIMARY KEY
|
||||
,config_repo_id INTEGER
|
||||
,config_hash VARCHAR(250)
|
||||
,config_data BYTEA
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
@ -0,0 +1,7 @@
|
||||
-- name: create-table-tasks
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id VARCHAR(250) PRIMARY KEY
|
||||
,task_data BYTEA
|
||||
,task_labels BYTEA
|
||||
);
|
@ -1,8 +1,8 @@
|
||||
-- +migrate Up
|
||||
-- name: create-table-agents
|
||||
|
||||
CREATE TABLE agents (
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id SERIAL PRIMARY KEY
|
||||
,agent_addr VARCHAR(500)
|
||||
,agent_addr VARCHAR(250)
|
||||
,agent_platform VARCHAR(500)
|
||||
,agent_capacity INTEGER
|
||||
,agent_created INTEGER
|
||||
@ -10,8 +10,3 @@ CREATE TABLE agents (
|
||||
|
||||
,UNIQUE(agent_addr)
|
||||
);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE agents;
|
@ -0,0 +1,15 @@
|
||||
-- name: create-table-senders
|
||||
|
||||
CREATE TABLE IF NOT EXISTS senders (
|
||||
sender_id SERIAL PRIMARY KEY
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login VARCHAR(250)
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
-- name: create-index-sender-repos
|
||||
|
||||
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);
|
3
store/datastore/ddl/sqlite/ddl.go
Normal file
3
store/datastore/ddl/sqlite/ddl.go
Normal file
@ -0,0 +1,3 @@
|
||||
package sqlite
|
||||
|
||||
//go:generate togo ddl -package sqlite -dialect sqlite3
|
15
store/datastore/ddl/sqlite/files/001_create_table_users.sql
Normal file
15
store/datastore/ddl/sqlite/files/001_create_table_users.sql
Normal file
@ -0,0 +1,15 @@
|
||||
-- name: create-table-users
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
user_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,user_login TEXT
|
||||
,user_token TEXT
|
||||
,user_secret TEXT
|
||||
,user_expiry INTEGER
|
||||
,user_email TEXT
|
||||
,user_avatar TEXT
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash TEXT
|
||||
,UNIQUE(user_login)
|
||||
);
|
25
store/datastore/ddl/sqlite/files/002_create_table_repos.sql
Normal file
25
store/datastore/ddl/sqlite/files/002_create_table_repos.sql
Normal file
@ -0,0 +1,25 @@
|
||||
-- name: create-table-repos
|
||||
|
||||
CREATE TABLE IF NOT EXISTS repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner TEXT
|
||||
,repo_name TEXT
|
||||
,repo_full_name TEXT
|
||||
,repo_avatar TEXT
|
||||
,repo_link TEXT
|
||||
,repo_clone TEXT
|
||||
,repo_branch TEXT
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash TEXT
|
||||
,repo_scm TEXT
|
||||
,repo_config_path TEXT
|
||||
,repo_gated BOOLEAN
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
48
store/datastore/ddl/sqlite/files/003_create_table_builds.sql
Normal file
48
store/datastore/ddl/sqlite/files/003_create_table_builds.sql
Normal file
@ -0,0 +1,48 @@
|
||||
-- name: create-table-builds
|
||||
|
||||
CREATE TABLE IF NOT EXISTS builds (
|
||||
build_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event TEXT
|
||||
,build_status TEXT
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit TEXT
|
||||
,build_branch TEXT
|
||||
,build_ref TEXT
|
||||
,build_refspec TEXT
|
||||
,build_remote TEXT
|
||||
,build_title TEXT
|
||||
,build_message TEXT
|
||||
,build_timestamp INTEGER
|
||||
,build_author TEXT
|
||||
,build_avatar TEXT
|
||||
,build_email TEXT
|
||||
,build_link TEXT
|
||||
,build_deploy TEXT
|
||||
,build_signed BOOLEAN
|
||||
,build_verified BOOLEAN
|
||||
,build_parent INTEGER
|
||||
,build_error TEXT
|
||||
,build_reviewer TEXT
|
||||
,build_reviewed INTEGER
|
||||
,build_sender TEXT
|
||||
,build_config_id INTEGER
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-builds-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
-- name: create-index-builds-author
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);
|
||||
|
||||
-- name: create-index-builds-status
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_build_status_running ON builds (build_status)
|
||||
WHERE build_status IN ('pending', 'running');
|
23
store/datastore/ddl/sqlite/files/004_create_table_procs.sql
Normal file
23
store/datastore/ddl/sqlite/files/004_create_table_procs.sql
Normal file
@ -0,0 +1,23 @@
|
||||
-- name: create-table-procs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name TEXT
|
||||
,proc_state TEXT
|
||||
,proc_error TEXT
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine TEXT
|
||||
,proc_platform TEXT
|
||||
,proc_environ TEXT
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
-- name: create-index-procs-build
|
||||
|
||||
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);
|
@ -0,0 +1,8 @@
|
||||
-- name: create-table-logs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data BLOB
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
21
store/datastore/ddl/sqlite/files/006_create_table_files.sql
Normal file
21
store/datastore/ddl/sqlite/files/006_create_table_files.sql
Normal file
@ -0,0 +1,21 @@
|
||||
-- name: create-table-files
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
file_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name TEXT
|
||||
,file_mime TEXT
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BLOB
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
);
|
||||
|
||||
-- name: create-index-files-builds
|
||||
|
||||
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
|
||||
|
||||
-- name: create-index-files-procs
|
||||
|
||||
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);
|
17
store/datastore/ddl/sqlite/files/007_create_table_secets.sql
Normal file
17
store/datastore/ddl/sqlite/files/007_create_table_secets.sql
Normal file
@ -0,0 +1,17 @@
|
||||
-- name: create-table-secrets
|
||||
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name TEXT
|
||||
,secret_value TEXT
|
||||
,secret_images TEXT
|
||||
,secret_events TEXT
|
||||
,secret_skip_verify BOOLEAN
|
||||
,secret_conceal BOOLEAN
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-secrets-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);
|
@ -0,0 +1,17 @@
|
||||
-- name: create-table-registry
|
||||
|
||||
CREATE TABLE IF NOT EXISTS registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr TEXT
|
||||
,registry_username TEXT
|
||||
,registry_password TEXT
|
||||
,registry_email TEXT
|
||||
,registry_token TEXT
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-registry-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);
|
@ -0,0 +1,9 @@
|
||||
-- name: create-table-config
|
||||
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
config_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,config_repo_id INTEGER
|
||||
,config_hash TEXT
|
||||
,config_data BLOB
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
@ -0,0 +1,7 @@
|
||||
-- name: create-table-tasks
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id TEXT PRIMARY KEY
|
||||
,task_data BLOB
|
||||
,task_labels BLOB
|
||||
);
|
@ -1,6 +1,6 @@
|
||||
-- +migrate Up
|
||||
-- name: create-table-agents
|
||||
|
||||
CREATE TABLE agents (
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,agent_addr TEXT
|
||||
,agent_platform TEXT
|
||||
@ -10,8 +10,3 @@ CREATE TABLE agents (
|
||||
|
||||
,UNIQUE(agent_addr)
|
||||
);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE agents;
|
@ -0,0 +1,15 @@
|
||||
-- name: create-table-senders
|
||||
|
||||
CREATE TABLE IF NOT EXISTS senders (
|
||||
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login TEXT
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
-- name: create-index-sender-repos
|
||||
|
||||
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);
|
242
store/datastore/ddl/sqlite/files/XXX_init.xxx
Normal file
242
store/datastore/ddl/sqlite/files/XXX_init.xxx
Normal file
@ -0,0 +1,242 @@
|
||||
-- name: create-table-users
|
||||
|
||||
CREATE TABLE users (
|
||||
user_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,user_login TEXT
|
||||
,user_token TEXT
|
||||
,user_secret TEXT
|
||||
,user_expiry INTEGER
|
||||
,user_email TEXT
|
||||
,user_avatar TEXT
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash TEXT
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-repos
|
||||
|
||||
CREATE TABLE repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner TEXT
|
||||
,repo_name TEXT
|
||||
,repo_full_name TEXT
|
||||
,repo_avatar TEXT
|
||||
,repo_link TEXT
|
||||
,repo_clone TEXT
|
||||
,repo_branch TEXT
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash TEXT
|
||||
,repo_scm TEXT
|
||||
,repo_config_path TEXT
|
||||
,repo_gated BOOLEAN
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-secrets
|
||||
|
||||
CREATE TABLE secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name TEXT
|
||||
,secret_value TEXT
|
||||
,secret_images TEXT
|
||||
,secret_events TEXT
|
||||
,secret_skip_verify BOOLEAN
|
||||
,secret_conceal BOOLEAN
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-secrets-repo
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-registry
|
||||
|
||||
CREATE TABLE registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr TEXT
|
||||
,registry_username TEXT
|
||||
,registry_password TEXT
|
||||
,registry_email TEXT
|
||||
,registry_token TEXT
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-registry-repo
|
||||
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-builds
|
||||
|
||||
CREATE TABLE builds (
|
||||
build_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event TEXT
|
||||
,build_status TEXT
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit TEXT
|
||||
,build_branch TEXT
|
||||
,build_ref TEXT
|
||||
,build_refspec TEXT
|
||||
,build_remote TEXT
|
||||
,build_title TEXT
|
||||
,build_message TEXT
|
||||
,build_timestamp INTEGER
|
||||
,build_author TEXT
|
||||
,build_avatar TEXT
|
||||
,build_email TEXT
|
||||
,build_link TEXT
|
||||
,build_deploy TEXT
|
||||
,build_signed BOOLEAN
|
||||
,build_verified BOOLEAN
|
||||
,build_parent INTEGER
|
||||
,build_error TEXT
|
||||
,build_reviewer TEXT
|
||||
,build_reviewed INTEGER
|
||||
,build_sender TEXT
|
||||
,build_config_id INTEGER
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-builds-repo
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
-- name: create-index-builds-author
|
||||
|
||||
CREATE INDEX ix_build_author ON builds (build_author);
|
||||
|
||||
-- name: create-index-builds-status
|
||||
|
||||
CREATE INDEX ix_build_status_running ON builds (build_status)
|
||||
WHERE build_status IN ('pending', 'running');
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-procs
|
||||
|
||||
CREATE TABLE procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name TEXT
|
||||
,proc_state TEXT
|
||||
,proc_error TEXT
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine TEXT
|
||||
,proc_platform TEXT
|
||||
,proc_environ TEXT
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
-- name: create-index-procs-build
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-logs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data BLOB
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-files
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
file_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name TEXT
|
||||
,file_mime TEXT
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BLOB
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
,FOREIGN KEY(file_proc_id) REFERENCES procs (proc_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- name: create-index-files-builds
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
|
||||
-- name: create-index-files-procs
|
||||
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-senders
|
||||
|
||||
CREATE TABLE IF NOT EXISTS senders (
|
||||
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login BOOLEAN
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
-- name: create-index-sender-repos
|
||||
|
||||
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-config
|
||||
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
config_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,config_repo_id INTEGER
|
||||
,config_hash TEXT
|
||||
,config_data BLOB
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-tasks
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id TEXT PRIMARY KEY
|
||||
,task_data BLOB
|
||||
,task_labels BLOB
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-agents
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,agent_addr TEXT
|
||||
,agent_platform TEXT
|
||||
,agent_capacity INTEGER
|
||||
,agent_created INTEGER
|
||||
,agent_updated INTEGER
|
||||
|
||||
,UNIQUE(agent_addr)
|
||||
);
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_conceal = 0;
|
||||
UPDATE team_secrets SET team_secret_conceal = 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_conceal;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;
|
@ -1,8 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_error TEXT;
|
||||
UPDATE builds SET build_error = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_error;
|
@ -1,18 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_config_path TEXT;
|
||||
ALTER TABLE builds ADD COLUMN build_reviewer TEXT;
|
||||
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
|
||||
ALTER TABLE builds ADD COLUMN build_sender TEXT;
|
||||
|
||||
UPDATE repos SET repo_config_path = '.drone.yml';
|
||||
UPDATE builds SET build_reviewer = '';
|
||||
UPDATE builds SET build_reviewed = 0;
|
||||
UPDATE builds SET build_sender = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_config_path;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewer;
|
||||
ALTER TABLE builds DROP COLUMN build_reviewed;
|
||||
ALTER TABLE builds DROP COLUMN build_sender;
|
@ -1,46 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name TEXT
|
||||
,proc_state TEXT
|
||||
,proc_error TEXT
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine TEXT
|
||||
,proc_platform TEXT
|
||||
,proc_environ TEXT
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
||||
|
||||
CREATE TABLE files (
|
||||
file_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name TEXT
|
||||
,file_mime TEXT
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BLOB
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
,FOREIGN KEY(file_proc_id) REFERENCES procs (proc_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX file_build_ix;
|
||||
DROP INDEX file_proc_ix;
|
||||
DROP TABLE files;
|
||||
|
||||
DROP INDEX proc_build_ix;
|
||||
DROP TABLE procs;
|
@ -1,22 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
|
||||
UPDATE repos SET repo_gated = 0;
|
||||
|
||||
CREATE TABLE senders (
|
||||
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login BOOLEAN
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_gated;
|
||||
DROP INDEX sender_repo_ix;
|
||||
DROP TABLE senders;
|
@ -1,11 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE tasks (
|
||||
task_id TEXT PRIMARY KEY
|
||||
,task_data BLOB
|
||||
,task_labels BLOB
|
||||
);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE tasks;
|
@ -1,17 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE config (
|
||||
config_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,config_repo_id INTEGER
|
||||
,config_hash TEXT
|
||||
,config_data BLOB
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
|
||||
UPDATE builds set build_config_id = 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE config;
|
@ -1,135 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE users (
|
||||
user_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,user_login TEXT
|
||||
,user_token TEXT
|
||||
,user_secret TEXT
|
||||
,user_expiry INTEGER
|
||||
,user_email TEXT
|
||||
,user_avatar TEXT
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash TEXT
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
||||
|
||||
CREATE TABLE repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner TEXT
|
||||
,repo_name TEXT
|
||||
,repo_full_name TEXT
|
||||
,repo_avatar TEXT
|
||||
,repo_link TEXT
|
||||
,repo_clone TEXT
|
||||
,repo_branch TEXT
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash TEXT
|
||||
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
||||
|
||||
CREATE TABLE stars (
|
||||
star_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,star_repo_id INTEGER
|
||||
,star_user_id INTEGER
|
||||
|
||||
,UNIQUE(star_repo_id, star_user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_star_user ON stars (star_user_id);
|
||||
|
||||
CREATE TABLE keys (
|
||||
key_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,key_repo_id INTEGER
|
||||
,key_public BLOB
|
||||
,key_private BLOB
|
||||
|
||||
,UNIQUE(key_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE builds (
|
||||
build_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event TEXT
|
||||
,build_status TEXT
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit TEXT
|
||||
,build_branch TEXT
|
||||
,build_ref TEXT
|
||||
,build_refspec TEXT
|
||||
,build_remote TEXT
|
||||
,build_title TEXT
|
||||
,build_message TEXT
|
||||
,build_timestamp INTEGER
|
||||
,build_author TEXT
|
||||
,build_avatar TEXT
|
||||
,build_email TEXT
|
||||
,build_link TEXT
|
||||
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
CREATE INDEX ix_build_author ON builds (build_author);
|
||||
|
||||
CREATE TABLE jobs (
|
||||
job_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,job_node_id INTEGER
|
||||
,job_build_id INTEGER
|
||||
,job_number INTEGER
|
||||
,job_status TEXT
|
||||
,job_exit_code INTEGER
|
||||
,job_enqueued INTEGER
|
||||
,job_started INTEGER
|
||||
,job_finished INTEGER
|
||||
,job_environment TEXT
|
||||
|
||||
,UNIQUE(job_build_id, job_number)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_job_build ON jobs (job_build_id);
|
||||
CREATE INDEX ix_job_node ON jobs (job_node_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data BLOB
|
||||
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
node_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,node_addr TEXT
|
||||
,node_arch TEXT
|
||||
,node_cert BLOB
|
||||
,node_key BLOB
|
||||
,node_ca BLOB
|
||||
);
|
||||
|
||||
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
|
||||
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP TABLE nodes;
|
||||
DROP TABLE logs;
|
||||
DROP TABLE jobs;
|
||||
DROP TABLE builds;
|
||||
DROP TABLE keys;
|
||||
DROP TABLE stars;
|
||||
DROP TABLE repos;
|
||||
DROP TABLE users;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_scm TEXT;
|
||||
ALTER TABLE builds ADD COLUMN build_deploy TEXT;
|
||||
|
||||
UPDATE repos SET repo_scm = 'git';
|
||||
UPDATE builds SET build_deploy = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE repos DROP COLUMN repo_scm;
|
||||
ALTER TABLE builds DROP COLUMN build_deploy;
|
@ -1,34 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name TEXT
|
||||
,secret_value TEXT
|
||||
,secret_images TEXT
|
||||
,secret_events TEXT
|
||||
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
CREATE TABLE registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr TEXT
|
||||
,registry_username TEXT
|
||||
,registry_password TEXT
|
||||
,registry_email TEXT
|
||||
,registry_token TEXT
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_secrets_repo;
|
||||
DROP INDEX ix_registry_repo;
|
||||
DROP TABLE secrets;
|
||||
DROP TABLE registry;
|
@ -1,9 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE jobs ADD COLUMN job_error TEXT;
|
||||
|
||||
UPDATE jobs SET job_error = '';
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE jobs DROP COLUMN job_error;
|
@ -1,16 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
|
||||
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
|
||||
|
||||
UPDATE builds SET build_signed = 0;
|
||||
UPDATE builds SET build_verified = 0;
|
||||
|
||||
CREATE INDEX ix_build_status_running ON builds (build_status)
|
||||
WHERE build_status IN ('pending', 'running');
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_signed;
|
||||
ALTER TABLE builds DROP COLUMN build_verified;
|
||||
DROP INDEX ix_build_status_running;
|
@ -1,19 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
CREATE TABLE team_secrets (
|
||||
team_secret_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,team_secret_key TEXT
|
||||
,team_secret_name TEXT
|
||||
,team_secret_value TEXT
|
||||
,team_secret_images TEXT
|
||||
,team_secret_events TEXT
|
||||
|
||||
,UNIQUE(team_secret_name, team_secret_key)
|
||||
);
|
||||
|
||||
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
DROP INDEX ix_team_secrets_key;
|
||||
DROP TABLE team_secrets;
|
@ -1,7 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE builds DROP COLUMN build_parent;
|
@ -1,12 +0,0 @@
|
||||
-- +migrate Up
|
||||
|
||||
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
|
||||
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
|
||||
|
||||
UPDATE secrets SET secret_skip_verify = 0;
|
||||
UPDATE team_secrets SET team_secret_skip_verify = 0;
|
||||
|
||||
-- +migrate Down
|
||||
|
||||
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
|
||||
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/drone/drone/store"
|
||||
"github.com/drone/drone/store/datastore/ddl"
|
||||
"github.com/rubenv/sql-migrate"
|
||||
"github.com/russross/meddler"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
@ -116,13 +115,7 @@ func pingDatabase(db *sql.DB) (err error) {
|
||||
// helper function to setup the databsae by performing
|
||||
// automated database migration steps.
|
||||
func setupDatabase(driver string, db *sql.DB) error {
|
||||
var migrations = &migrate.AssetMigrationSource{
|
||||
Asset: ddl.Asset,
|
||||
AssetDir: ddl.AssetDir,
|
||||
Dir: driver,
|
||||
}
|
||||
_, err := migrate.Exec(db, driver, migrations, migrate.Up)
|
||||
return err
|
||||
return ddl.Migrate(driver, db)
|
||||
}
|
||||
|
||||
// helper function to setup the meddler default driver
|
||||
|
245
vendor/github.com/rubenv/sql-migrate/README.md
generated
vendored
245
vendor/github.com/rubenv/sql-migrate/README.md
generated
vendored
@ -1,245 +0,0 @@
|
||||
# sql-migrate
|
||||
|
||||
> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose).
|
||||
|
||||
[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate)
|
||||
|
||||
Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate).
|
||||
|
||||
## Features
|
||||
|
||||
* Usable as a CLI tool or as a library
|
||||
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp))
|
||||
* Can embed migrations into your application
|
||||
* Migrations are defined with SQL for full flexibility
|
||||
* Atomic migrations
|
||||
* Up/down migrations to allow rollback
|
||||
* Supports multiple database types in one project
|
||||
|
||||
## Installation
|
||||
|
||||
To install the library and command line program, use the following:
|
||||
|
||||
```bash
|
||||
go get github.com/rubenv/sql-migrate/...
|
||||
```
|
||||
|
||||
## Usage
|
||||
### As a standalone tool
|
||||
```
|
||||
$ sql-migrate --help
|
||||
usage: sql-migrate [--version] [--help] <command> [<args>]
|
||||
|
||||
Available commands are:
|
||||
down Undo a database migration
|
||||
redo Reapply the last migration
|
||||
status Show migration status
|
||||
up Migrates the database to the most recent version available
|
||||
```
|
||||
|
||||
Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments:
|
||||
|
||||
```yml
|
||||
development:
|
||||
dialect: sqlite3
|
||||
datasource: test.db
|
||||
dir: migrations/sqlite3
|
||||
|
||||
production:
|
||||
dialect: postgres
|
||||
datasource: dbname=myapp sslmode=disable
|
||||
dir: migrations/postgres
|
||||
table: migrations
|
||||
```
|
||||
|
||||
The `table` setting is optional and will default to `gorp_migrations`.
|
||||
|
||||
The environment that will be used can be specified with the `-env` flag (defaults to `development`).
|
||||
|
||||
Use the `--help` flag in combination with any of the commands to get an overview of its usage:
|
||||
|
||||
```
|
||||
$ sql-migrate up --help
|
||||
Usage: sql-migrate up [options] ...
|
||||
|
||||
Migrates the database to the most recent version available.
|
||||
|
||||
Options:
|
||||
|
||||
-config=config.yml Configuration file to use.
|
||||
-env="development" Environment.
|
||||
-limit=0 Limit the number of migrations (0 = unlimited).
|
||||
-dryrun Don't apply migrations, just print them.
|
||||
```
|
||||
|
||||
The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter.
|
||||
|
||||
The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
|
||||
|
||||
Use the `status` command to see the state of the applied migrations:
|
||||
|
||||
```bash
|
||||
$ sql-migrate status
|
||||
+---------------+-----------------------------------------+
|
||||
| MIGRATION | APPLIED |
|
||||
+---------------+-----------------------------------------+
|
||||
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
|
||||
| 2_record.sql | no |
|
||||
+---------------+-----------------------------------------+
|
||||
```
|
||||
|
||||
### As a library
|
||||
Import sql-migrate into your application:
|
||||
|
||||
```go
|
||||
import "github.com/rubenv/sql-migrate"
|
||||
```
|
||||
|
||||
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
|
||||
|
||||
```go
|
||||
// Hardcoded strings in memory:
|
||||
migrations := &migrate.MemoryMigrationSource{
|
||||
Migrations: []*migrate.Migration{
|
||||
&migrate.Migration{
|
||||
Id: "123",
|
||||
Up: []string{"CREATE TABLE people (id int)"},
|
||||
Down: []string{"DROP TABLE people"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// OR: Read migrations from a folder:
|
||||
migrations := &migrate.FileMigrationSource{
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
// OR: Use migrations from bindata:
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "migrations",
|
||||
}
|
||||
```
|
||||
|
||||
Then use the `Exec` function to upgrade your database:
|
||||
|
||||
```go
|
||||
db, err := sql.Open("sqlite3", filename)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
|
||||
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
fmt.Printf("Applied %d migrations!\n", n)
|
||||
```
|
||||
|
||||
Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails.
|
||||
|
||||
Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation.
|
||||
|
||||
## Writing migrations
|
||||
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
|
||||
|
||||
```sql
|
||||
-- +migrate Up
|
||||
-- SQL in section 'Up' is executed when this migration is applied
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
-- SQL section 'Down' is executed when this migration is rolled back
|
||||
DROP TABLE people;
|
||||
```
|
||||
|
||||
You can put multiple statements in each block, as long as you end them with a semicolon (`;`).
|
||||
|
||||
If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries:
|
||||
|
||||
```sql
|
||||
-- +migrate Up
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
-- +migrate StatementBegin
|
||||
CREATE OR REPLACE FUNCTION do_something()
|
||||
returns void AS $$
|
||||
DECLARE
|
||||
create_query text;
|
||||
BEGIN
|
||||
-- Do something here
|
||||
END;
|
||||
$$
|
||||
language plpgsql;
|
||||
-- +migrate StatementEnd
|
||||
|
||||
-- +migrate Down
|
||||
DROP FUNCTION do_something();
|
||||
DROP TABLE people;
|
||||
```
|
||||
|
||||
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
|
||||
|
||||
## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata)
|
||||
If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files.
|
||||
|
||||
Just write your migration files as usual, as a set of SQL files in a folder.
|
||||
|
||||
Then use bindata to generate a `.go` file with the migrations embedded:
|
||||
|
||||
```bash
|
||||
go-bindata -pkg myapp -o bindata.go db/migrations/
|
||||
```
|
||||
|
||||
The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives).
|
||||
|
||||
Use the `AssetMigrationSource` in your application to find the migrations:
|
||||
|
||||
```go
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
```
|
||||
|
||||
Both `Asset` and `AssetDir` are functions provided by bindata.
|
||||
|
||||
Then proceed as usual.
|
||||
|
||||
## Extending
|
||||
Adding a new migration source means implementing `MigrationSource`.
|
||||
|
||||
```go
|
||||
type MigrationSource interface {
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
```
|
||||
|
||||
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field.
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (C) 2014-2015 by Ruben Vermeersch <ruben@rocketeer.be>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
199
vendor/github.com/rubenv/sql-migrate/doc.go
generated
vendored
199
vendor/github.com/rubenv/sql-migrate/doc.go
generated
vendored
@ -1,199 +0,0 @@
|
||||
/*
|
||||
|
||||
SQL Schema migration tool for Go.
|
||||
|
||||
Key features:
|
||||
|
||||
* Usable as a CLI tool or as a library
|
||||
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp)
|
||||
* Can embed migrations into your application
|
||||
* Migrations are defined with SQL for full flexibility
|
||||
* Atomic migrations
|
||||
* Up/down migrations to allow rollback
|
||||
* Supports multiple database types in one project
|
||||
|
||||
Installation
|
||||
|
||||
To install the library and command line program, use the following:
|
||||
|
||||
go get github.com/rubenv/sql-migrate/...
|
||||
|
||||
Command-line tool
|
||||
|
||||
The main command is called sql-migrate.
|
||||
|
||||
$ sql-migrate --help
|
||||
usage: sql-migrate [--version] [--help] <command> [<args>]
|
||||
|
||||
Available commands are:
|
||||
down Undo a database migration
|
||||
redo Reapply the last migration
|
||||
status Show migration status
|
||||
up Migrates the database to the most recent version available
|
||||
|
||||
Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments:
|
||||
|
||||
development:
|
||||
dialect: sqlite3
|
||||
datasource: test.db
|
||||
dir: migrations/sqlite3
|
||||
|
||||
production:
|
||||
dialect: postgres
|
||||
datasource: dbname=myapp sslmode=disable
|
||||
dir: migrations/postgres
|
||||
table: migrations
|
||||
|
||||
The `table` setting is optional and will default to `gorp_migrations`.
|
||||
|
||||
The environment that will be used can be specified with the -env flag (defaults to development).
|
||||
|
||||
Use the --help flag in combination with any of the commands to get an overview of its usage:
|
||||
|
||||
$ sql-migrate up --help
|
||||
Usage: sql-migrate up [options] ...
|
||||
|
||||
Migrates the database to the most recent version available.
|
||||
|
||||
Options:
|
||||
|
||||
-config=config.yml Configuration file to use.
|
||||
-env="development" Environment.
|
||||
-limit=0 Limit the number of migrations (0 = unlimited).
|
||||
-dryrun Don't apply migrations, just print them.
|
||||
|
||||
The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter.
|
||||
|
||||
The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
|
||||
|
||||
Use the status command to see the state of the applied migrations:
|
||||
|
||||
$ sql-migrate status
|
||||
+---------------+-----------------------------------------+
|
||||
| MIGRATION | APPLIED |
|
||||
+---------------+-----------------------------------------+
|
||||
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
|
||||
| 2_record.sql | no |
|
||||
+---------------+-----------------------------------------+
|
||||
|
||||
Library
|
||||
|
||||
Import sql-migrate into your application:
|
||||
|
||||
import "github.com/rubenv/sql-migrate"
|
||||
|
||||
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
|
||||
|
||||
// Hardcoded strings in memory:
|
||||
migrations := &migrate.MemoryMigrationSource{
|
||||
Migrations: []*migrate.Migration{
|
||||
&migrate.Migration{
|
||||
Id: "123",
|
||||
Up: []string{"CREATE TABLE people (id int)"},
|
||||
Down: []string{"DROP TABLE people"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// OR: Read migrations from a folder:
|
||||
migrations := &migrate.FileMigrationSource{
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
// OR: Use migrations from bindata:
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "migrations",
|
||||
}
|
||||
|
||||
Then use the Exec function to upgrade your database:
|
||||
|
||||
db, err := sql.Open("sqlite3", filename)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
|
||||
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
// Handle errors!
|
||||
}
|
||||
fmt.Printf("Applied %d migrations!\n", n)
|
||||
|
||||
Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails.
|
||||
|
||||
The full set of capabilities can be found in the API docs below.
|
||||
|
||||
Writing migrations
|
||||
|
||||
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
|
||||
|
||||
-- +migrate Up
|
||||
-- SQL in section 'Up' is executed when this migration is applied
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
|
||||
-- +migrate Down
|
||||
-- SQL section 'Down' is executed when this migration is rolled back
|
||||
DROP TABLE people;
|
||||
|
||||
You can put multiple statements in each block, as long as you end them with a semicolon (;).
|
||||
|
||||
If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries:
|
||||
|
||||
-- +migrate Up
|
||||
CREATE TABLE people (id int);
|
||||
|
||||
-- +migrate StatementBegin
|
||||
CREATE OR REPLACE FUNCTION do_something()
|
||||
returns void AS $$
|
||||
DECLARE
|
||||
create_query text;
|
||||
BEGIN
|
||||
-- Do something here
|
||||
END;
|
||||
$$
|
||||
language plpgsql;
|
||||
-- +migrate StatementEnd
|
||||
|
||||
-- +migrate Down
|
||||
DROP FUNCTION do_something();
|
||||
DROP TABLE people;
|
||||
|
||||
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
|
||||
|
||||
Embedding migrations with bindata
|
||||
|
||||
If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files.
|
||||
|
||||
Just write your migration files as usual, as a set of SQL files in a folder.
|
||||
|
||||
Then use bindata to generate a .go file with the migrations embedded:
|
||||
|
||||
go-bindata -pkg myapp -o bindata.go db/migrations/
|
||||
|
||||
The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives).
|
||||
|
||||
Use the AssetMigrationSource in your application to find the migrations:
|
||||
|
||||
migrations := &migrate.AssetMigrationSource{
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
Dir: "db/migrations",
|
||||
}
|
||||
|
||||
Both Asset and AssetDir are functions provided by bindata.
|
||||
|
||||
Then proceed as usual.
|
||||
|
||||
Extending
|
||||
|
||||
Adding a new migration source means implementing MigrationSource.
|
||||
|
||||
type MigrationSource interface {
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
|
||||
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field.
|
||||
*/
|
||||
package migrate
|
475
vendor/github.com/rubenv/sql-migrate/migrate.go
generated
vendored
475
vendor/github.com/rubenv/sql-migrate/migrate.go
generated
vendored
@ -1,475 +0,0 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rubenv/sql-migrate/sqlparse"
|
||||
"gopkg.in/gorp.v1"
|
||||
)
|
||||
|
||||
type MigrationDirection int
|
||||
|
||||
const (
|
||||
Up MigrationDirection = iota
|
||||
Down
|
||||
)
|
||||
|
||||
var tableName = "gorp_migrations"
|
||||
var schemaName = ""
|
||||
var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`)
|
||||
|
||||
// Set the name of the table used to store migration info.
|
||||
//
|
||||
// Should be called before any other call such as (Exec, ExecMax, ...).
|
||||
func SetTable(name string) {
|
||||
if name != "" {
|
||||
tableName = name
|
||||
}
|
||||
}
|
||||
|
||||
// SetSchema sets the name of a schema that the migration table be referenced.
|
||||
func SetSchema(name string) {
|
||||
if name != "" {
|
||||
schemaName = name
|
||||
}
|
||||
}
|
||||
|
||||
func getTableName() string {
|
||||
t := tableName
|
||||
if schemaName != "" {
|
||||
t = fmt.Sprintf("%s.%s", schemaName, t)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
type Migration struct {
|
||||
Id string
|
||||
Up []string
|
||||
Down []string
|
||||
}
|
||||
|
||||
func (m Migration) Less(other *Migration) bool {
|
||||
switch {
|
||||
case m.isNumeric() && other.isNumeric():
|
||||
return m.VersionInt() < other.VersionInt()
|
||||
case m.isNumeric() && !other.isNumeric():
|
||||
return true
|
||||
case !m.isNumeric() && other.isNumeric():
|
||||
return false
|
||||
default:
|
||||
return m.Id < other.Id
|
||||
}
|
||||
}
|
||||
|
||||
func (m Migration) isNumeric() bool {
|
||||
return len(m.NumberPrefixMatches()) > 0
|
||||
}
|
||||
|
||||
func (m Migration) NumberPrefixMatches() []string {
|
||||
return numberPrefixRegex.FindStringSubmatch(m.Id)
|
||||
}
|
||||
|
||||
func (m Migration) VersionInt() int64 {
|
||||
v := m.NumberPrefixMatches()[1]
|
||||
value, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
type PlannedMigration struct {
|
||||
*Migration
|
||||
Queries []string
|
||||
}
|
||||
|
||||
type byId []*Migration
|
||||
|
||||
func (b byId) Len() int { return len(b) }
|
||||
func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) }
|
||||
|
||||
type MigrationRecord struct {
|
||||
Id string `db:"id"`
|
||||
AppliedAt time.Time `db:"applied_at"`
|
||||
}
|
||||
|
||||
var MigrationDialects = map[string]gorp.Dialect{
|
||||
"sqlite3": gorp.SqliteDialect{},
|
||||
"postgres": gorp.PostgresDialect{},
|
||||
"mysql": gorp.MySQLDialect{"InnoDB", "UTF8"},
|
||||
"mssql": gorp.SqlServerDialect{},
|
||||
"oci8": gorp.OracleDialect{},
|
||||
}
|
||||
|
||||
type MigrationSource interface {
|
||||
// Finds the migrations.
|
||||
//
|
||||
// The resulting slice of migrations should be sorted by Id.
|
||||
FindMigrations() ([]*Migration, error)
|
||||
}
|
||||
|
||||
// A hardcoded set of migrations, in-memory.
|
||||
type MemoryMigrationSource struct {
|
||||
Migrations []*Migration
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*MemoryMigrationSource)(nil)
|
||||
|
||||
func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(m.Migrations))
|
||||
|
||||
return m.Migrations, nil
|
||||
}
|
||||
|
||||
// A set of migrations loaded from a directory.
|
||||
type FileMigrationSource struct {
|
||||
Dir string
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*FileMigrationSource)(nil)
|
||||
|
||||
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
migrations := make([]*Migration, 0)
|
||||
|
||||
file, err := os.Open(f.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := file.Readdir(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, info := range files {
|
||||
if strings.HasSuffix(info.Name(), ".sql") {
|
||||
file, err := os.Open(path.Join(f.Dir, info.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migration, err := ParseMigration(info.Name(), file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(migrations))
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// Migrations from a bindata asset set.
|
||||
type AssetMigrationSource struct {
|
||||
// Asset should return content of file in path if exists
|
||||
Asset func(path string) ([]byte, error)
|
||||
|
||||
// AssetDir should return list of files in the path
|
||||
AssetDir func(path string) ([]string, error)
|
||||
|
||||
// Path in the bindata to use.
|
||||
Dir string
|
||||
}
|
||||
|
||||
var _ MigrationSource = (*AssetMigrationSource)(nil)
|
||||
|
||||
func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
|
||||
migrations := make([]*Migration, 0)
|
||||
|
||||
files, err := a.AssetDir(a.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, name := range files {
|
||||
if strings.HasSuffix(name, ".sql") {
|
||||
file, err := a.Asset(path.Join(a.Dir, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migration, err := ParseMigration(name, bytes.NewReader(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure migrations are sorted
|
||||
sort.Sort(byId(migrations))
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// Migration parsing
|
||||
func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
|
||||
m := &Migration{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
up, err := sqlparse.SplitSQLStatements(r, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
down, err := sqlparse.SplitSQLStatements(r, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Up = up
|
||||
m.Down = down
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Execute a set of migrations
|
||||
//
|
||||
// Returns the number of applied migrations.
|
||||
func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) {
|
||||
return ExecMax(db, dialect, m, dir, 0)
|
||||
}
|
||||
|
||||
// Execute a set of migrations
|
||||
//
|
||||
// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec).
|
||||
//
|
||||
// Returns the number of applied migrations.
|
||||
func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) {
|
||||
migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Apply migrations
|
||||
applied := 0
|
||||
for _, migration := range migrations {
|
||||
trans, err := dbMap.Begin()
|
||||
if err != nil {
|
||||
return applied, err
|
||||
}
|
||||
|
||||
for _, stmt := range migration.Queries {
|
||||
_, err := trans.Exec(stmt)
|
||||
if err != nil {
|
||||
trans.Rollback()
|
||||
return applied, err
|
||||
}
|
||||
}
|
||||
|
||||
if dir == Up {
|
||||
err = trans.Insert(&MigrationRecord{
|
||||
Id: migration.Id,
|
||||
AppliedAt: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
return applied, err
|
||||
}
|
||||
} else if dir == Down {
|
||||
_, err := trans.Delete(&MigrationRecord{
|
||||
Id: migration.Id,
|
||||
})
|
||||
if err != nil {
|
||||
return applied, err
|
||||
}
|
||||
} else {
|
||||
panic("Not possible")
|
||||
}
|
||||
|
||||
err = trans.Commit()
|
||||
if err != nil {
|
||||
return applied, err
|
||||
}
|
||||
|
||||
applied++
|
||||
}
|
||||
|
||||
return applied, nil
|
||||
}
|
||||
|
||||
// Plan a migration.
|
||||
func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) {
|
||||
dbMap, err := getMigrationDbMap(db, dialect)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
migrations, err := m.FindMigrations()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var migrationRecords []MigrationRecord
|
||||
_, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", getTableName()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Sort migrations that have been run by Id.
|
||||
var existingMigrations []*Migration
|
||||
for _, migrationRecord := range migrationRecords {
|
||||
existingMigrations = append(existingMigrations, &Migration{
|
||||
Id: migrationRecord.Id,
|
||||
})
|
||||
}
|
||||
sort.Sort(byId(existingMigrations))
|
||||
|
||||
// Get last migration that was run
|
||||
record := &Migration{}
|
||||
if len(existingMigrations) > 0 {
|
||||
record = existingMigrations[len(existingMigrations)-1]
|
||||
}
|
||||
|
||||
result := make([]*PlannedMigration, 0)
|
||||
|
||||
// Add missing migrations up to the last run migration.
|
||||
// This can happen for example when merges happened.
|
||||
if len(existingMigrations) > 0 {
|
||||
result = append(result, ToCatchup(migrations, existingMigrations, record)...)
|
||||
}
|
||||
|
||||
// Figure out which migrations to apply
|
||||
toApply := ToApply(migrations, record.Id, dir)
|
||||
toApplyCount := len(toApply)
|
||||
if max > 0 && max < toApplyCount {
|
||||
toApplyCount = max
|
||||
}
|
||||
for _, v := range toApply[0:toApplyCount] {
|
||||
|
||||
if dir == Up {
|
||||
result = append(result, &PlannedMigration{
|
||||
Migration: v,
|
||||
Queries: v.Up,
|
||||
})
|
||||
} else if dir == Down {
|
||||
result = append(result, &PlannedMigration{
|
||||
Migration: v,
|
||||
Queries: v.Down,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, dbMap, nil
|
||||
}
|
||||
|
||||
// Filter a slice of migrations into ones that should be applied.
|
||||
func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration {
|
||||
var index = -1
|
||||
if current != "" {
|
||||
for index < len(migrations)-1 {
|
||||
index++
|
||||
if migrations[index].Id == current {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if direction == Up {
|
||||
return migrations[index+1:]
|
||||
} else if direction == Down {
|
||||
if index == -1 {
|
||||
return []*Migration{}
|
||||
}
|
||||
|
||||
// Add in reverse order
|
||||
toApply := make([]*Migration, index+1)
|
||||
for i := 0; i < index+1; i++ {
|
||||
toApply[index-i] = migrations[i]
|
||||
}
|
||||
return toApply
|
||||
}
|
||||
|
||||
panic("Not possible")
|
||||
}
|
||||
|
||||
func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration {
|
||||
missing := make([]*PlannedMigration, 0)
|
||||
for _, migration := range migrations {
|
||||
found := false
|
||||
for _, existing := range existingMigrations {
|
||||
if existing.Id == migration.Id {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found && migration.Less(lastRun) {
|
||||
missing = append(missing, &PlannedMigration{Migration: migration, Queries: migration.Up})
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) {
|
||||
dbMap, err := getMigrationDbMap(db, dialect)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var records []*MigrationRecord
|
||||
query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", getTableName())
|
||||
_, err = dbMap.Select(&records, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
|
||||
d, ok := MigrationDialects[dialect]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unknown dialect: %s", dialect)
|
||||
}
|
||||
|
||||
// When using the mysql driver, make sure that the parseTime option is
|
||||
// configured, otherwise it won't map time columns to time.Time. See
|
||||
// https://github.com/rubenv/sql-migrate/issues/2
|
||||
if dialect == "mysql" {
|
||||
var out *time.Time
|
||||
err := db.QueryRow("SELECT NOW()").Scan(&out)
|
||||
if err != nil {
|
||||
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" {
|
||||
return nil, errors.New(`Cannot parse dates.
|
||||
|
||||
Make sure that the parseTime option is supplied to your database connection.
|
||||
Check https://github.com/go-sql-driver/mysql#parsetime for more info.`)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create migration database map
|
||||
dbMap := &gorp.DbMap{Db: db, Dialect: d}
|
||||
dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id")
|
||||
//dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds))
|
||||
|
||||
err := dbMap.CreateTablesIfNotExists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbMap, nil
|
||||
}
|
||||
|
||||
// TODO: Run migration + record insert in transaction.
|
28
vendor/github.com/rubenv/sql-migrate/sqlparse/README.md
generated
vendored
28
vendor/github.com/rubenv/sql-migrate/sqlparse/README.md
generated
vendored
@ -1,28 +0,0 @@
|
||||
# SQL migration parser
|
||||
|
||||
Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser.
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (C) 2014 by Ruben Vermeersch <ruben@rocketeer.be>
|
||||
Copyright (C) 2012-2014 by Liam Staskawicz
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
128
vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go
generated
vendored
128
vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
package sqlparse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"strings"
|
||||
)
|
||||
|
||||
const sqlCmdPrefix = "-- +migrate "
|
||||
|
||||
// Checks the line to see if the line has a statement-ending semicolon
|
||||
// or if the line contains a double-dash comment.
|
||||
func endsWithSemicolon(line string) bool {
|
||||
|
||||
prev := ""
|
||||
scanner := bufio.NewScanner(strings.NewReader(line))
|
||||
scanner.Split(bufio.ScanWords)
|
||||
|
||||
for scanner.Scan() {
|
||||
word := scanner.Text()
|
||||
if strings.HasPrefix(word, "--") {
|
||||
break
|
||||
}
|
||||
prev = word
|
||||
}
|
||||
|
||||
return strings.HasSuffix(prev, ";")
|
||||
}
|
||||
|
||||
// Split the given sql script into individual statements.
|
||||
//
|
||||
// The base case is to simply split on semicolons, as these
|
||||
// naturally terminate a statement.
|
||||
//
|
||||
// However, more complex cases like pl/pgsql can have semicolons
|
||||
// within a statement. For these cases, we provide the explicit annotations
|
||||
// 'StatementBegin' and 'StatementEnd' to allow the script to
|
||||
// tell us to ignore semicolons.
|
||||
func SplitSQLStatements(r io.ReadSeeker, direction bool) ([]string, error) {
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
scanner := bufio.NewScanner(r)
|
||||
|
||||
// track the count of each section
|
||||
// so we can diagnose scripts with no annotations
|
||||
upSections := 0
|
||||
downSections := 0
|
||||
|
||||
statementEnded := false
|
||||
ignoreSemicolons := false
|
||||
directionIsActive := false
|
||||
|
||||
stmts := make([]string, 0)
|
||||
|
||||
for scanner.Scan() {
|
||||
|
||||
line := scanner.Text()
|
||||
|
||||
// handle any migrate-specific commands
|
||||
if strings.HasPrefix(line, sqlCmdPrefix) {
|
||||
cmd := strings.TrimSpace(line[len(sqlCmdPrefix):])
|
||||
switch cmd {
|
||||
case "Up":
|
||||
directionIsActive = (direction == true)
|
||||
upSections++
|
||||
break
|
||||
|
||||
case "Down":
|
||||
directionIsActive = (direction == false)
|
||||
downSections++
|
||||
break
|
||||
|
||||
case "StatementBegin":
|
||||
if directionIsActive {
|
||||
ignoreSemicolons = true
|
||||
}
|
||||
break
|
||||
|
||||
case "StatementEnd":
|
||||
if directionIsActive {
|
||||
statementEnded = (ignoreSemicolons == true)
|
||||
ignoreSemicolons = false
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !directionIsActive {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(line + "\n"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement
|
||||
// Lines that end with semicolon that are in a statement block
|
||||
// do not conclude statement.
|
||||
if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded {
|
||||
statementEnded = false
|
||||
stmts = append(stmts, buf.String())
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// diagnose likely migration script errors
|
||||
if ignoreSemicolons {
|
||||
return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'")
|
||||
}
|
||||
|
||||
if upSections == 0 && downSections == 0 {
|
||||
return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed.
|
||||
See https://github.com/rubenv/sql-migrate for details.`)
|
||||
}
|
||||
|
||||
return stmts, nil
|
||||
}
|
22
vendor/gopkg.in/gorp.v1/LICENSE
generated
vendored
22
vendor/gopkg.in/gorp.v1/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
||||
(The MIT License)
|
||||
|
||||
Copyright (c) 2012 James Cooper <james@bitmechanic.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
'Software'), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
6
vendor/gopkg.in/gorp.v1/Makefile
generated
vendored
6
vendor/gopkg.in/gorp.v1/Makefile
generated
vendored
@ -1,6 +0,0 @@
|
||||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG = github.com/coopernurse/gorp
|
||||
GOFILES = gorp.go dialect.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
672
vendor/gopkg.in/gorp.v1/README.md
generated
vendored
672
vendor/gopkg.in/gorp.v1/README.md
generated
vendored
@ -1,672 +0,0 @@
|
||||
# Go Relational Persistence
|
||||
|
||||
[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp)
|
||||
|
||||
I hesitate to call gorp an ORM. Go doesn't really have objects, at least
|
||||
not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't
|
||||
know anything about the relationships between your structs (at least not
|
||||
yet). So the "R" is questionable too (but I use it in the name because,
|
||||
well, it seemed more clever).
|
||||
|
||||
The "M" is alive and well. Given some Go structs and a database, gorp
|
||||
should remove a fair amount of boilerplate busy-work from your code.
|
||||
|
||||
I hope that gorp saves you time, minimizes the drudgery of getting data
|
||||
in and out of your database, and helps your code focus on algorithms,
|
||||
not infrastructure.
|
||||
|
||||
* Bind struct fields to table columns via API or tag
|
||||
* Support for embedded structs
|
||||
* Support for transactions
|
||||
* Forward engineer db schema from structs (great for unit tests)
|
||||
* Pre/post insert/update/delete hooks
|
||||
* Automatically generate insert/update/delete statements for a struct
|
||||
* Automatic binding of auto increment PKs back to struct after insert
|
||||
* Delete by primary key(s)
|
||||
* Select by primary key(s)
|
||||
* Optional trace sql logging
|
||||
* Bind arbitrary SQL queries to a struct
|
||||
* Bind slice to SELECT query results without type assertions
|
||||
* Use positional or named bind parameters in custom SELECT queries
|
||||
* Optional optimistic locking using a version column (for update/deletes)
|
||||
|
||||
## Installation
|
||||
|
||||
# install the library:
|
||||
go get gopkg.in/gorp.v1
|
||||
|
||||
// use in your .go code:
|
||||
import (
|
||||
"gopkg.in/gorp.v1"
|
||||
)
|
||||
|
||||
## Versioning
|
||||
|
||||
This project provides a stable release (v1.x tags) and a bleeding edge codebase (master).
|
||||
|
||||
`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2.
|
||||
|
||||
If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path.
|
||||
|
||||
## API Documentation
|
||||
|
||||
Full godoc output from the latest v1 release is available here:
|
||||
|
||||
https://godoc.org/gopkg.in/gorp.v1
|
||||
|
||||
For the latest code in master:
|
||||
|
||||
https://godoc.org/github.com/go-gorp/gorp
|
||||
|
||||
## Quickstart
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"gopkg.in/gorp.v1"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize the DbMap
|
||||
dbmap := initDb()
|
||||
defer dbmap.Db.Close()
|
||||
|
||||
// delete any existing rows
|
||||
err := dbmap.TruncateTables()
|
||||
checkErr(err, "TruncateTables failed")
|
||||
|
||||
// create two posts
|
||||
p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum")
|
||||
p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum")
|
||||
|
||||
// insert rows - auto increment PKs will be set properly after the insert
|
||||
err = dbmap.Insert(&p1, &p2)
|
||||
checkErr(err, "Insert failed")
|
||||
|
||||
// use convenience SelectInt
|
||||
count, err := dbmap.SelectInt("select count(*) from posts")
|
||||
checkErr(err, "select count(*) failed")
|
||||
log.Println("Rows after inserting:", count)
|
||||
|
||||
// update a row
|
||||
p2.Title = "Go 1.2 is better than ever"
|
||||
count, err = dbmap.Update(&p2)
|
||||
checkErr(err, "Update failed")
|
||||
log.Println("Rows updated:", count)
|
||||
|
||||
// fetch one row - note use of "post_id" instead of "Id" since column is aliased
|
||||
//
|
||||
// Postgres users should use $1 instead of ? placeholders
|
||||
// See 'Known Issues' below
|
||||
//
|
||||
err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id)
|
||||
checkErr(err, "SelectOne failed")
|
||||
log.Println("p2 row:", p2)
|
||||
|
||||
// fetch all rows
|
||||
var posts []Post
|
||||
_, err = dbmap.Select(&posts, "select * from posts order by post_id")
|
||||
checkErr(err, "Select failed")
|
||||
log.Println("All rows:")
|
||||
for x, p := range posts {
|
||||
log.Printf(" %d: %v\n", x, p)
|
||||
}
|
||||
|
||||
// delete row by PK
|
||||
count, err = dbmap.Delete(&p1)
|
||||
checkErr(err, "Delete failed")
|
||||
log.Println("Rows deleted:", count)
|
||||
|
||||
// delete row manually via Exec
|
||||
_, err = dbmap.Exec("delete from posts where post_id=?", p2.Id)
|
||||
checkErr(err, "Exec failed")
|
||||
|
||||
// confirm count is zero
|
||||
count, err = dbmap.SelectInt("select count(*) from posts")
|
||||
checkErr(err, "select count(*) failed")
|
||||
log.Println("Row count - should be zero:", count)
|
||||
|
||||
log.Println("Done!")
|
||||
}
|
||||
|
||||
type Post struct {
|
||||
// db tag lets you specify the column name if it differs from the struct field
|
||||
Id int64 `db:"post_id"`
|
||||
Created int64
|
||||
Title string
|
||||
Body string
|
||||
}
|
||||
|
||||
func newPost(title, body string) Post {
|
||||
return Post{
|
||||
Created: time.Now().UnixNano(),
|
||||
Title: title,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func initDb() *gorp.DbMap {
|
||||
// connect to db using standard Go database/sql API
|
||||
// use whatever database/sql driver you wish
|
||||
db, err := sql.Open("sqlite3", "/tmp/post_db.bin")
|
||||
checkErr(err, "sql.Open failed")
|
||||
|
||||
// construct a gorp DbMap
|
||||
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}}
|
||||
|
||||
// add a table, setting the table name to 'posts' and
|
||||
// specifying that the Id property is an auto incrementing PK
|
||||
dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id")
|
||||
|
||||
// create the table. in a production system you'd generally
|
||||
// use a migration tool, or create the tables via scripts
|
||||
err = dbmap.CreateTablesIfNotExists()
|
||||
checkErr(err, "Create tables failed")
|
||||
|
||||
return dbmap
|
||||
}
|
||||
|
||||
func checkErr(err error, msg string) {
|
||||
if err != nil {
|
||||
log.Fatalln(msg, err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Mapping structs to tables
|
||||
|
||||
First define some types:
|
||||
|
||||
```go
|
||||
type Invoice struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
Memo string
|
||||
PersonId int64
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
FName string
|
||||
LName string
|
||||
}
|
||||
|
||||
// Example of using tags to alias fields to column names
|
||||
// The 'db' value is the column name
|
||||
//
|
||||
// A hyphen will cause gorp to skip this field, similar to the
|
||||
// Go json package.
|
||||
//
|
||||
// This is equivalent to using the ColMap methods:
|
||||
//
|
||||
// table := dbmap.AddTableWithName(Product{}, "product")
|
||||
// table.ColMap("Id").Rename("product_id")
|
||||
// table.ColMap("Price").Rename("unit_price")
|
||||
// table.ColMap("IgnoreMe").SetTransient(true)
|
||||
//
|
||||
type Product struct {
|
||||
Id int64 `db:"product_id"`
|
||||
Price int64 `db:"unit_price"`
|
||||
IgnoreMe string `db:"-"`
|
||||
}
|
||||
```
|
||||
|
||||
Then create a mapper, typically you'd do this one time at app startup:
|
||||
|
||||
```go
|
||||
// connect to db using standard Go database/sql API
|
||||
// use whatever database/sql driver you wish
|
||||
db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword")
|
||||
|
||||
// construct a gorp DbMap
|
||||
dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}}
|
||||
|
||||
// register the structs you wish to use with gorp
|
||||
// you can also use the shorter dbmap.AddTable() if you
|
||||
// don't want to override the table name
|
||||
//
|
||||
// SetKeys(true) means we have a auto increment primary key, which
|
||||
// will get automatically bound to your struct post-insert
|
||||
//
|
||||
t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id")
|
||||
t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id")
|
||||
t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id")
|
||||
```
|
||||
|
||||
### Struct Embedding
|
||||
|
||||
gorp supports embedding structs. For example:
|
||||
|
||||
```go
|
||||
type Names struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
type WithEmbeddedStruct struct {
|
||||
Id int64
|
||||
Names
|
||||
}
|
||||
|
||||
es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}}
|
||||
err := dbmap.Insert(es)
|
||||
```
|
||||
|
||||
See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example.
|
||||
|
||||
### Create/Drop Tables ###
|
||||
|
||||
Automatically create / drop registered tables. This is useful for unit tests
|
||||
but is entirely optional. You can of course use gorp with tables created manually,
|
||||
or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose).
|
||||
|
||||
```go
|
||||
// create all registered tables
|
||||
dbmap.CreateTables()
|
||||
|
||||
// same as above, but uses "if not exists" clause to skip tables that are
|
||||
// already defined
|
||||
dbmap.CreateTablesIfNotExists()
|
||||
|
||||
// drop
|
||||
dbmap.DropTables()
|
||||
```
|
||||
|
||||
### SQL Logging
|
||||
|
||||
Optionally you can pass in a logger to trace all SQL statements.
|
||||
I recommend enabling this initially while you're getting the feel for what
|
||||
gorp is doing on your behalf.
|
||||
|
||||
Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies.
|
||||
However, you can write your own `GorpLogger` implementation, or use a package such
|
||||
as `glog` if you want more control over how statements are logged.
|
||||
|
||||
```go
|
||||
// Will log all SQL statements + args as they are run
|
||||
// The first arg is a string prefix to prepend to all log messages
|
||||
dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds))
|
||||
|
||||
// Turn off tracing
|
||||
dbmap.TraceOff()
|
||||
```
|
||||
|
||||
### Insert
|
||||
|
||||
```go
|
||||
// Must declare as pointers so optional callback hooks
|
||||
// can operate on your data, not copies
|
||||
inv1 := &Invoice{0, 100, 200, "first order", 0}
|
||||
inv2 := &Invoice{0, 100, 200, "second order", 0}
|
||||
|
||||
// Insert your rows
|
||||
err := dbmap.Insert(inv1, inv2)
|
||||
|
||||
// Because we called SetKeys(true) on Invoice, the Id field
|
||||
// will be populated after the Insert() automatically
|
||||
fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id)
|
||||
```
|
||||
|
||||
### Update
|
||||
|
||||
Continuing the above example, use the `Update` method to modify an Invoice:
|
||||
|
||||
```go
|
||||
// count is the # of rows updated, which should be 1 in this example
|
||||
count, err := dbmap.Update(inv1)
|
||||
```
|
||||
|
||||
### Delete
|
||||
|
||||
If you have primary key(s) defined for a struct, you can use the `Delete`
|
||||
method to remove rows:
|
||||
|
||||
```go
|
||||
count, err := dbmap.Delete(inv1)
|
||||
```
|
||||
|
||||
### Select by Key
|
||||
|
||||
Use the `Get` method to fetch a single row by primary key. It returns
|
||||
nil if no row is found.
|
||||
|
||||
```go
|
||||
// fetch Invoice with Id=99
|
||||
obj, err := dbmap.Get(Invoice{}, 99)
|
||||
inv := obj.(*Invoice)
|
||||
```
|
||||
|
||||
### Ad Hoc SQL
|
||||
|
||||
#### SELECT
|
||||
|
||||
`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice
|
||||
or a single struct.
|
||||
|
||||
```go
|
||||
// Select a slice - first return value is not needed when a slice pointer is passed to Select()
|
||||
var posts []Post
|
||||
_, err := dbmap.Select(&posts, "select * from post order by id")
|
||||
|
||||
// You can also use primitive types
|
||||
var ids []string
|
||||
_, err := dbmap.Select(&ids, "select id from post")
|
||||
|
||||
// Select a single row.
|
||||
// Returns an error if no row found, or if more than one row is found
|
||||
var post Post
|
||||
err := dbmap.SelectOne(&post, "select * from post where id=?", id)
|
||||
```
|
||||
|
||||
Want to do joins? Just write the SQL and the struct. gorp will bind them:
|
||||
|
||||
```go
|
||||
// Define a type for your join
|
||||
// It *must* contain all the columns in your SELECT statement
|
||||
//
|
||||
// The names here should match the aliased column names you specify
|
||||
// in your SQL - no additional binding work required. simple.
|
||||
//
|
||||
type InvoicePersonView struct {
|
||||
InvoiceId int64
|
||||
PersonId int64
|
||||
Memo string
|
||||
FName string
|
||||
}
|
||||
|
||||
// Create some rows
|
||||
p1 := &Person{0, 0, 0, "bob", "smith"}
|
||||
dbmap.Insert(p1)
|
||||
|
||||
// notice how we can wire up p1.Id to the invoice easily
|
||||
inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id}
|
||||
dbmap.Insert(inv1)
|
||||
|
||||
// Run your query
|
||||
query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " +
|
||||
"from invoice_test i, person_test p " +
|
||||
"where i.PersonId = p.Id"
|
||||
|
||||
// pass a slice to Select()
|
||||
var list []InvoicePersonView
|
||||
_, err := dbmap.Select(&list, query)
|
||||
|
||||
// this should test true
|
||||
expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName}
|
||||
if reflect.DeepEqual(list[0], expected) {
|
||||
fmt.Println("Woot! My join worked!")
|
||||
}
|
||||
```
|
||||
|
||||
#### SELECT string or int64
|
||||
|
||||
gorp provides a few convenience methods for selecting a single string or int64.
|
||||
|
||||
```go
|
||||
// select single int64 from db (use $1 instead of ? for postgresql)
|
||||
i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal)
|
||||
|
||||
// select single string from db:
|
||||
s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal)
|
||||
|
||||
```
|
||||
|
||||
#### Named bind parameters
|
||||
|
||||
You may use a map or struct to bind parameters by name. This is currently
|
||||
only supported in SELECT queries.
|
||||
|
||||
```go
|
||||
_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{
|
||||
"name": "Rob",
|
||||
"age": 31,
|
||||
})
|
||||
```
|
||||
|
||||
#### UPDATE / DELETE
|
||||
|
||||
You can execute raw SQL if you wish. Particularly good for batch operations.
|
||||
|
||||
```go
|
||||
res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10)
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
You can batch operations into a transaction:
|
||||
|
||||
```go
|
||||
func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error {
|
||||
// Start a new transaction
|
||||
trans, err := dbmap.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trans.Insert(per)
|
||||
inv.PersonId = per.Id
|
||||
trans.Insert(inv)
|
||||
|
||||
// if the commit is successful, a nil error is returned
|
||||
return trans.Commit()
|
||||
}
|
||||
```
|
||||
|
||||
### Hooks
|
||||
|
||||
Use hooks to update data before/after saving to the db. Good for timestamps:
|
||||
|
||||
```go
|
||||
// implement the PreInsert and PreUpdate hooks
|
||||
func (i *Invoice) PreInsert(s gorp.SqlExecutor) error {
|
||||
i.Created = time.Now().UnixNano()
|
||||
i.Updated = i.Created
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error {
|
||||
i.Updated = time.Now().UnixNano()
|
||||
return nil
|
||||
}
|
||||
|
||||
// You can use the SqlExecutor to cascade additional SQL
|
||||
// Take care to avoid cycles. gorp won't prevent them.
|
||||
//
|
||||
// Here's an example of a cascading delete
|
||||
//
|
||||
func (p *Person) PreDelete(s gorp.SqlExecutor) error {
|
||||
query := "delete from invoice_test where PersonId=?"
|
||||
err := s.Exec(query, p.Id); if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Full list of hooks that you can implement:
|
||||
|
||||
PostGet
|
||||
PreInsert
|
||||
PostInsert
|
||||
PreUpdate
|
||||
PostUpdate
|
||||
PreDelete
|
||||
PostDelete
|
||||
|
||||
All have the same signature. for example:
|
||||
|
||||
func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error
|
||||
|
||||
### Optimistic Locking
|
||||
|
||||
gorp provides a simple optimistic locking feature, similar to Java's JPA, that
|
||||
will raise an error if you try to update/delete a row whose `version` column
|
||||
has a value different than the one in memory. This provides a safe way to do
|
||||
"select then update" style operations without explicit read and write locks.
|
||||
|
||||
```go
|
||||
// Version is an auto-incremented number, managed by gorp
|
||||
// If this property is present on your struct, update
|
||||
// operations will be constrained
|
||||
//
|
||||
// For example, say we defined Person as:
|
||||
|
||||
type Person struct {
|
||||
Id int64
|
||||
Created int64
|
||||
Updated int64
|
||||
FName string
|
||||
LName string
|
||||
|
||||
// automatically used as the Version col
|
||||
// use table.SetVersionCol("columnName") to map a different
|
||||
// struct field as the version field
|
||||
Version int64
|
||||
}
|
||||
|
||||
p1 := &Person{0, 0, 0, "Bob", "Smith", 0}
|
||||
dbmap.Insert(p1) // Version is now 1
|
||||
|
||||
obj, err := dbmap.Get(Person{}, p1.Id)
|
||||
p2 := obj.(*Person)
|
||||
p2.LName = "Edwards"
|
||||
dbmap.Update(p2) // Version is now 2
|
||||
|
||||
p1.LName = "Howard"
|
||||
|
||||
// Raises error because p1.Version == 1, which is out of date
|
||||
count, err := dbmap.Update(p1)
|
||||
_, ok := err.(gorp.OptimisticLockError)
|
||||
if ok {
|
||||
// should reach this statement
|
||||
|
||||
// in a real app you might reload the row and retry, or
|
||||
// you might propegate this to the user, depending on the desired
|
||||
// semantics
|
||||
fmt.Printf("Tried to update row with stale data: %v\n", err)
|
||||
} else {
|
||||
// some other db error occurred - log or return up the stack
|
||||
fmt.Printf("Unknown db err: %v\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
## Database Drivers
|
||||
|
||||
gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here:
|
||||
|
||||
http://code.google.com/p/go-wiki/wiki/SQLDrivers
|
||||
|
||||
Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be
|
||||
implemented per database vendor. Dialects are provided for:
|
||||
|
||||
* MySQL
|
||||
* PostgreSQL
|
||||
* sqlite3
|
||||
|
||||
Each of these three databases pass the test suite. See `gorp_test.go` for example
|
||||
DSNs for these three databases.
|
||||
|
||||
Support is also provided for:
|
||||
|
||||
* Oracle (contributed by @klaidliadon)
|
||||
* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb
|
||||
|
||||
Note that these databases are not covered by CI and I (@coopernurse) have no good way to
|
||||
test them locally. So please try them and send patches as needed, but expect a bit more
|
||||
unpredicability.
|
||||
|
||||
## Known Issues
|
||||
|
||||
### SQL placeholder portability
|
||||
|
||||
Different databases use different strings to indicate variable placeholders in
|
||||
prepared SQL statements. Unlike some database abstraction layers (such as JDBC),
|
||||
Go's `database/sql` does not standardize this.
|
||||
|
||||
SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates
|
||||
to a Dialect implementation for each database, and will generate portable SQL.
|
||||
|
||||
Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be
|
||||
parsed. Consequently you may have portability issues if you write a query like this:
|
||||
|
||||
```go
|
||||
// works on MySQL and Sqlite3, but not with Postgresql
|
||||
err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30)
|
||||
```
|
||||
|
||||
In `Select` and `SelectOne` you can use named parameters to work around this.
|
||||
The following is portable:
|
||||
|
||||
```go
|
||||
err := dbmap.SelectOne(&val, "select * from foo where id = :id",
|
||||
map[string]interface{} { "id": 30})
|
||||
```
|
||||
|
||||
### time.Time and time zones
|
||||
|
||||
gorp will pass `time.Time` fields through to the `database/sql` driver, but note that
|
||||
the behavior of this type varies across database drivers.
|
||||
|
||||
MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77
|
||||
|
||||
To avoid any potential issues with timezone/DST, consider using an integer field for time
|
||||
data and storing UNIX time.
|
||||
|
||||
## Running the tests
|
||||
|
||||
The included tests may be run against MySQL, Postgresql, or sqlite3.
|
||||
You must set two environment variables so the test code knows which driver to
|
||||
use, and how to connect to your database.
|
||||
|
||||
```sh
|
||||
# MySQL example:
|
||||
export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123
|
||||
export GORP_TEST_DIALECT=mysql
|
||||
|
||||
# run the tests
|
||||
go test
|
||||
|
||||
# run the tests and benchmarks
|
||||
go test -bench="Bench" -benchtime 10
|
||||
```
|
||||
|
||||
Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3"
|
||||
See the `test_all.sh` script for examples of all 3 databases. This is the script I run
|
||||
locally to test the library.
|
||||
|
||||
## Performance
|
||||
|
||||
gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL.
|
||||
|
||||
## Help/Support
|
||||
|
||||
IRC: #gorp
|
||||
Mailing list: gorp-dev@googlegroups.com
|
||||
Bugs/Enhancements: Create a github issue
|
||||
|
||||
## Pull requests / Contributions
|
||||
|
||||
Contributions are very welcome. Please follow these guidelines:
|
||||
|
||||
* Fork the `master` branch and issue pull requests targeting the `master` branch
|
||||
* If you are adding an enhancement, please open an issue first with your proposed change.
|
||||
* Changes that break backwards compatibility in the public API are only accepted after we
|
||||
discuss on a GitHub issue for a while.
|
||||
|
||||
Thanks!
|
||||
|
||||
## Contributors
|
||||
|
||||
* matthias-margush - column aliasing via tags
|
||||
* Rob Figueiredo - @robfig
|
||||
* Quinn Slack - @sqs
|
692
vendor/gopkg.in/gorp.v1/dialect.go
generated
vendored
692
vendor/gopkg.in/gorp.v1/dialect.go
generated
vendored
@ -1,692 +0,0 @@
|
||||
package gorp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Dialect interface encapsulates behaviors that differ across
|
||||
// SQL databases. At present the Dialect is only used by CreateTables()
|
||||
// but this could change in the future
|
||||
type Dialect interface {
|
||||
|
||||
// adds a suffix to any query, usually ";"
|
||||
QuerySuffix() string
|
||||
|
||||
// ToSqlType returns the SQL column type to use when creating a
|
||||
// table of the given Go Type. maxsize can be used to switch based on
|
||||
// size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB,
|
||||
// or LONGBLOB depending on the maxsize
|
||||
ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string
|
||||
|
||||
// string to append to primary key column definitions
|
||||
AutoIncrStr() string
|
||||
|
||||
// string to bind autoincrement columns to. Empty string will
|
||||
// remove reference to those columns in the INSERT statement.
|
||||
AutoIncrBindValue() string
|
||||
|
||||
AutoIncrInsertSuffix(col *ColumnMap) string
|
||||
|
||||
// string to append to "create table" statement for vendor specific
|
||||
// table attributes
|
||||
CreateTableSuffix() string
|
||||
|
||||
// string to truncate tables
|
||||
TruncateClause() string
|
||||
|
||||
// bind variable string to use when forming SQL statements
|
||||
// in many dbs it is "?", but Postgres appears to use $1
|
||||
//
|
||||
// i is a zero based index of the bind variable in this statement
|
||||
//
|
||||
BindVar(i int) string
|
||||
|
||||
// Handles quoting of a field name to ensure that it doesn't raise any
|
||||
// SQL parsing exceptions by using a reserved word as a field name.
|
||||
QuoteField(field string) string
|
||||
|
||||
// Handles building up of a schema.database string that is compatible with
|
||||
// the given dialect
|
||||
//
|
||||
// schema - The schema that <table> lives in
|
||||
// table - The table name
|
||||
QuotedTableForQuery(schema string, table string) string
|
||||
|
||||
// Existance clause for table creation / deletion
|
||||
IfSchemaNotExists(command, schema string) string
|
||||
IfTableExists(command, schema, table string) string
|
||||
IfTableNotExists(command, schema, table string) string
|
||||
}
|
||||
|
||||
// IntegerAutoIncrInserter is implemented by dialects that can perform
|
||||
// inserts with automatically incremented integer primary keys. If
|
||||
// the dialect can handle automatic assignment of more than just
|
||||
// integers, see TargetedAutoIncrInserter.
|
||||
type IntegerAutoIncrInserter interface {
|
||||
InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error)
|
||||
}
|
||||
|
||||
// TargetedAutoIncrInserter is implemented by dialects that can
|
||||
// perform automatic assignment of any primary key type (i.e. strings
|
||||
// for uuids, integers for serials, etc).
|
||||
type TargetedAutoIncrInserter interface {
|
||||
// InsertAutoIncrToTarget runs an insert operation and assigns the
|
||||
// automatically generated primary key directly to the passed in
|
||||
// target. The target should be a pointer to the primary key
|
||||
// field of the value being inserted.
|
||||
InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error
|
||||
}
|
||||
|
||||
func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
res, err := exec.Exec(insertSql, params...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res.LastInsertId()
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// sqlite3 //
|
||||
/////////////
|
||||
|
||||
type SqliteDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d SqliteDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "integer"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return "integer"
|
||||
case reflect.Float64, reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "blob"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "integer"
|
||||
case "NullFloat64":
|
||||
return "real"
|
||||
case "NullBool":
|
||||
return "integer"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns autoincrement
|
||||
func (d SqliteDialect) AutoIncrStr() string {
|
||||
return "autoincrement"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) AutoIncrBindValue() string {
|
||||
return "null"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d SqliteDialect) CreateTableSuffix() string {
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
// With sqlite, there technically isn't a TRUNCATE statement,
|
||||
// but a DELETE FROM uses a truncate optimization:
|
||||
// http://www.sqlite.org/lang_delete.html
|
||||
func (d SqliteDialect) TruncateClause() string {
|
||||
return "delete from"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d SqliteDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) QuoteField(f string) string {
|
||||
return `"` + f + `"`
|
||||
}
|
||||
|
||||
// sqlite does not have schemas like PostgreSQL does, so just escape it like normal
|
||||
func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d SqliteDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// PostgreSQL //
|
||||
////////////////
|
||||
|
||||
type PostgresDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
if isAutoIncr {
|
||||
return "serial"
|
||||
}
|
||||
return "integer"
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if isAutoIncr {
|
||||
return "bigserial"
|
||||
}
|
||||
return "bigint"
|
||||
case reflect.Float64:
|
||||
return "double precision"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "bytea"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double precision"
|
||||
case "NullBool":
|
||||
return "boolean"
|
||||
case "Time":
|
||||
return "timestamp with time zone"
|
||||
}
|
||||
|
||||
if maxsize > 0 {
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
} else {
|
||||
return "text"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Returns empty string
|
||||
func (d PostgresDialect) AutoIncrStr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d PostgresDialect) AutoIncrBindValue() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return " returning " + col.ColumnName
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d PostgresDialect) CreateTableSuffix() string {
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
func (d PostgresDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "$(i+1)"
|
||||
func (d PostgresDialect) BindVar(i int) string {
|
||||
return fmt.Sprintf("$%d", i+1)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error {
|
||||
rows, err := exec.query(insertSql, params...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
err := rows.Scan(target)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuoteField(f string) string {
|
||||
return `"` + strings.ToLower(f) + `"`
|
||||
}
|
||||
|
||||
func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d PostgresDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// MySQL //
|
||||
///////////
|
||||
|
||||
// Implementation of Dialect for MySQL databases.
|
||||
type MySQLDialect struct {
|
||||
|
||||
// Engine is the storage engine to use "InnoDB" vs "MyISAM" for example
|
||||
Engine string
|
||||
|
||||
// Encoding is the character encoding to use for created tables
|
||||
Encoding string
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int8:
|
||||
return "tinyint"
|
||||
case reflect.Uint8:
|
||||
return "tinyint unsigned"
|
||||
case reflect.Int16:
|
||||
return "smallint"
|
||||
case reflect.Uint16:
|
||||
return "smallint unsigned"
|
||||
case reflect.Int, reflect.Int32:
|
||||
return "int"
|
||||
case reflect.Uint, reflect.Uint32:
|
||||
return "int unsigned"
|
||||
case reflect.Int64:
|
||||
return "bigint"
|
||||
case reflect.Uint64:
|
||||
return "bigint unsigned"
|
||||
case reflect.Float64, reflect.Float32:
|
||||
return "double"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "mediumblob"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double"
|
||||
case "NullBool":
|
||||
return "tinyint"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns auto_increment
|
||||
func (d MySQLDialect) AutoIncrStr() string {
|
||||
return "auto_increment"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) AutoIncrBindValue() string {
|
||||
return "null"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns engine=%s charset=%s based on values stored on struct
|
||||
func (d MySQLDialect) CreateTableSuffix() string {
|
||||
if d.Engine == "" || d.Encoding == "" {
|
||||
msg := "gorp - undefined"
|
||||
|
||||
if d.Engine == "" {
|
||||
msg += " MySQLDialect.Engine"
|
||||
}
|
||||
if d.Engine == "" && d.Encoding == "" {
|
||||
msg += ","
|
||||
}
|
||||
if d.Encoding == "" {
|
||||
msg += " MySQLDialect.Encoding"
|
||||
}
|
||||
msg += ". Check that your MySQLDialect was correctly initialized when declared."
|
||||
panic(msg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d MySQLDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuoteField(f string) string {
|
||||
return "`" + f + "`"
|
||||
}
|
||||
|
||||
func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d MySQLDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Sql Server //
|
||||
////////////////
|
||||
|
||||
// Implementation of Dialect for Microsoft SQL Server databases.
|
||||
// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb
|
||||
|
||||
type SqlServerDialect struct {
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "bit"
|
||||
case reflect.Int8:
|
||||
return "tinyint"
|
||||
case reflect.Uint8:
|
||||
return "smallint"
|
||||
case reflect.Int16:
|
||||
return "smallint"
|
||||
case reflect.Uint16:
|
||||
return "int"
|
||||
case reflect.Int, reflect.Int32:
|
||||
return "int"
|
||||
case reflect.Uint, reflect.Uint32:
|
||||
return "bigint"
|
||||
case reflect.Int64:
|
||||
return "bigint"
|
||||
case reflect.Uint64:
|
||||
return "bigint"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Float64:
|
||||
return "float(53)"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "varbinary"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "float(53)"
|
||||
case "NullBool":
|
||||
return "tinyint"
|
||||
case "Time":
|
||||
return "datetime"
|
||||
}
|
||||
|
||||
if maxsize < 1 {
|
||||
maxsize = 255
|
||||
}
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
}
|
||||
|
||||
// Returns auto_increment
|
||||
func (d SqlServerDialect) AutoIncrStr() string {
|
||||
return "identity(0,1)"
|
||||
}
|
||||
|
||||
// Empty string removes autoincrement columns from the INSERT statements.
|
||||
func (d SqlServerDialect) AutoIncrBindValue() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d SqlServerDialect) CreateTableSuffix() string {
|
||||
|
||||
return d.suffix
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) TruncateClause() string {
|
||||
return "delete from"
|
||||
}
|
||||
|
||||
// Returns "?"
|
||||
func (d SqlServerDialect) BindVar(i int) string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
return standardInsertAutoIncr(exec, insertSql, params...)
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuoteField(f string) string {
|
||||
return `"` + f + `"`
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return table
|
||||
}
|
||||
return schema + "." + table
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) QuerySuffix() string { return ";" }
|
||||
|
||||
func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string {
|
||||
s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command)
|
||||
return s
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) IfTableExists(command, schema, table string) string {
|
||||
var schema_clause string
|
||||
if strings.TrimSpace(schema) != "" {
|
||||
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
|
||||
}
|
||||
s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
|
||||
return s
|
||||
}
|
||||
|
||||
func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string {
|
||||
var schema_clause string
|
||||
if strings.TrimSpace(schema) != "" {
|
||||
schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema)
|
||||
}
|
||||
s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command)
|
||||
return s
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Oracle //
|
||||
///////////
|
||||
|
||||
// Implementation of Dialect for Oracle databases.
|
||||
type OracleDialect struct{}
|
||||
|
||||
func (d OracleDialect) QuerySuffix() string { return "" }
|
||||
|
||||
func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
|
||||
case reflect.Bool:
|
||||
return "boolean"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
if isAutoIncr {
|
||||
return "serial"
|
||||
}
|
||||
return "integer"
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if isAutoIncr {
|
||||
return "bigserial"
|
||||
}
|
||||
return "bigint"
|
||||
case reflect.Float64:
|
||||
return "double precision"
|
||||
case reflect.Float32:
|
||||
return "real"
|
||||
case reflect.Slice:
|
||||
if val.Elem().Kind() == reflect.Uint8 {
|
||||
return "bytea"
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Name() {
|
||||
case "NullInt64":
|
||||
return "bigint"
|
||||
case "NullFloat64":
|
||||
return "double precision"
|
||||
case "NullBool":
|
||||
return "boolean"
|
||||
case "NullTime", "Time":
|
||||
return "timestamp with time zone"
|
||||
}
|
||||
|
||||
if maxsize > 0 {
|
||||
return fmt.Sprintf("varchar(%d)", maxsize)
|
||||
} else {
|
||||
return "text"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Returns empty string
|
||||
func (d OracleDialect) AutoIncrStr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d OracleDialect) AutoIncrBindValue() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
|
||||
return " returning " + col.ColumnName
|
||||
}
|
||||
|
||||
// Returns suffix
|
||||
func (d OracleDialect) CreateTableSuffix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d OracleDialect) TruncateClause() string {
|
||||
return "truncate"
|
||||
}
|
||||
|
||||
// Returns "$(i+1)"
|
||||
func (d OracleDialect) BindVar(i int) string {
|
||||
return fmt.Sprintf(":%d", i+1)
|
||||
}
|
||||
|
||||
func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
|
||||
rows, err := exec.query(insertSql, params...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
var id int64
|
||||
err := rows.Scan(&id)
|
||||
return id, err
|
||||
}
|
||||
|
||||
return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error())
|
||||
}
|
||||
|
||||
func (d OracleDialect) QuoteField(f string) string {
|
||||
return `"` + strings.ToUpper(f) + `"`
|
||||
}
|
||||
|
||||
func (d OracleDialect) QuotedTableForQuery(schema string, table string) string {
|
||||
if strings.TrimSpace(schema) == "" {
|
||||
return d.QuoteField(table)
|
||||
}
|
||||
|
||||
return schema + "." + d.QuoteField(table)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfSchemaNotExists(command, schema string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfTableExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if exists", command)
|
||||
}
|
||||
|
||||
func (d OracleDialect) IfTableNotExists(command, schema, table string) string {
|
||||
return fmt.Sprintf("%s if not exists", command)
|
||||
}
|
26
vendor/gopkg.in/gorp.v1/errors.go
generated
vendored
26
vendor/gopkg.in/gorp.v1/errors.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package gorp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A non-fatal error, when a select query returns columns that do not exist
|
||||
// as fields in the struct it is being mapped to
|
||||
type NoFieldInTypeError struct {
|
||||
TypeName string
|
||||
MissingColNames []string
|
||||
}
|
||||
|
||||
func (err *NoFieldInTypeError) Error() string {
|
||||
return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName)
|
||||
}
|
||||
|
||||
// returns true if the error is non-fatal (ie, we shouldn't immediately return)
|
||||
func NonFatalError(err error) bool {
|
||||
switch err.(type) {
|
||||
case *NoFieldInTypeError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
2085
vendor/gopkg.in/gorp.v1/gorp.go
generated
vendored
2085
vendor/gopkg.in/gorp.v1/gorp.go
generated
vendored
File diff suppressed because it is too large
Load Diff
22
vendor/gopkg.in/gorp.v1/test_all.sh
generated
vendored
22
vendor/gopkg.in/gorp.v1/test_all.sh
generated
vendored
@ -1,22 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# on macs, you may need to:
|
||||
# export GOBUILDFLAG=-ldflags -linkmode=external
|
||||
|
||||
set -e
|
||||
|
||||
export GORP_TEST_DSN=gorptest/gorptest/gorptest
|
||||
export GORP_TEST_DIALECT=mysql
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN=gorptest:gorptest@/gorptest
|
||||
export GORP_TEST_DIALECT=gomysql
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable"
|
||||
export GORP_TEST_DIALECT=postgres
|
||||
go test $GOBUILDFLAG .
|
||||
|
||||
export GORP_TEST_DSN=/tmp/gorptest.bin
|
||||
export GORP_TEST_DIALECT=sqlite
|
||||
go test $GOBUILDFLAG .
|
15
vendor/vendor.json
vendored
15
vendor/vendor.json
vendored
@ -636,16 +636,6 @@
|
||||
"revision": "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad",
|
||||
"revisionTime": "2017-04-24T20:45:52Z"
|
||||
},
|
||||
{
|
||||
"path": "github.com/rubenv/sql-migrate",
|
||||
"revision": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda",
|
||||
"revisionTime": "2015-07-13T16:07:51+02:00"
|
||||
},
|
||||
{
|
||||
"path": "github.com/rubenv/sql-migrate/sqlparse",
|
||||
"revision": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda",
|
||||
"revisionTime": "2015-07-13T16:07:51+02:00"
|
||||
},
|
||||
{
|
||||
"path": "github.com/russross/meddler",
|
||||
"revision": "308c3d0e5e45f543a2eb6c787cbfe0db3880e220",
|
||||
@ -851,11 +841,6 @@
|
||||
"revision": "014792cf3e266caff1e916876be12282b33059e0",
|
||||
"revisionTime": "2016-02-23T15:26:51-05:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/gorp.v1",
|
||||
"revision": "c87af80f3cc5036b55b83d77171e156791085e2e",
|
||||
"revisionTime": "2015-02-04T09:55:30+01:00"
|
||||
},
|
||||
{
|
||||
"path": "gopkg.in/yaml.v2",
|
||||
"revision": "5d6f7e02b7cdad63b06ab3877915532cd30073b4",
|
||||
|
Loading…
Reference in New Issue
Block a user