feat: Phase 1 - Core API server with authentication

- Added database schema for users, api_keys, sync_state, change_log, and sync_config
- Implemented API key generation and validation with bcrypt hashing
- Created Chi-based REST API server with endpoints for:
  - Task CRUD operations (create, read, update, delete)
  - Task actions (complete, start, stop)
  - Tag management (list, add, remove)
  - Projects listing
  - Health check endpoint
- Added middleware for authentication and CORS
- Implemented change log tracking with triggers (key:value format)
- Added configurable change log retention (default 30 days)
- Created server CLI commands (opal server start, opal server keygen)
- Dependencies added: golang.org/x/crypto/bcrypt, github.com/go-chi/chi/v5
This commit is contained in:
2026-01-05 16:14:49 +01:00
parent 9bde1aefea
commit ba0cfc08e3
16 changed files with 1423 additions and 7 deletions
+156
View File
@@ -0,0 +1,156 @@
package engine
import (
"crypto/rand"
"encoding/base64"
"fmt"
"time"
"golang.org/x/crypto/bcrypt"
)
// APIKey represents an API key in the database
type APIKey struct {
ID int
Name string
UserID int
CreatedAt time.Time
LastUsed *time.Time
Revoked bool
}
// GenerateAPIKey creates a new API key for the given name
func GenerateAPIKey(name string) (string, error) {
db := GetDB()
if db == nil {
return "", fmt.Errorf("database not initialized")
}
// Generate random key: oak_ + 32 random bytes (base64 encoded)
keyBytes := make([]byte, 32)
if _, err := rand.Read(keyBytes); err != nil {
return "", fmt.Errorf("failed to generate random key: %w", err)
}
key := "oak_" + base64.URLEncoding.EncodeToString(keyBytes)
// Hash the key for storage
hashedKey, err := bcrypt.GenerateFromPassword([]byte(key), bcrypt.DefaultCost)
if err != nil {
return "", fmt.Errorf("failed to hash key: %w", err)
}
// Store in database (user_id defaults to 1 for shared user)
_, err = db.Exec(`
INSERT INTO api_keys (key, name, user_id, created_at)
VALUES (?, ?, 1, ?)
`, string(hashedKey), name, timeNow().Unix())
if err != nil {
return "", fmt.Errorf("failed to store API key: %w", err)
}
return key, nil
}
// ValidateAPIKey checks if an API key is valid and updates last_used timestamp
func ValidateAPIKey(key string) (bool, int, error) {
db := GetDB()
if db == nil {
return false, 0, fmt.Errorf("database not initialized")
}
// Get all non-revoked keys
rows, err := db.Query(`
SELECT id, key, user_id, revoked
FROM api_keys
WHERE revoked = 0
`)
if err != nil {
return false, 0, fmt.Errorf("failed to query API keys: %w", err)
}
defer rows.Close()
// Check each key (bcrypt comparison)
for rows.Next() {
var id, userID int
var hashedKey string
var revoked bool
if err := rows.Scan(&id, &hashedKey, &userID, &revoked); err != nil {
continue
}
// Compare with bcrypt
if err := bcrypt.CompareHashAndPassword([]byte(hashedKey), []byte(key)); err == nil {
// Valid key found - update last_used
now := timeNow().Unix()
_, _ = db.Exec("UPDATE api_keys SET last_used = ? WHERE id = ?", now, id)
return true, userID, nil
}
}
return false, 0, nil
}
// ListAPIKeys returns all API keys for a user (without the actual key value)
func ListAPIKeys(userID int) ([]*APIKey, error) {
db := GetDB()
if db == nil {
return nil, fmt.Errorf("database not initialized")
}
rows, err := db.Query(`
SELECT id, name, user_id, created_at, last_used, revoked
FROM api_keys
WHERE user_id = ?
ORDER BY created_at DESC
`, userID)
if err != nil {
return nil, fmt.Errorf("failed to query API keys: %w", err)
}
defer rows.Close()
var keys []*APIKey
for rows.Next() {
key := &APIKey{}
var createdAt, lastUsed int64
var lastUsedNull *int64
err := rows.Scan(&key.ID, &key.Name, &key.UserID, &createdAt, &lastUsedNull, &key.Revoked)
if err != nil {
return nil, fmt.Errorf("failed to scan API key: %w", err)
}
key.CreatedAt = time.Unix(createdAt, 0)
if lastUsedNull != nil {
lastUsed = *lastUsedNull
t := time.Unix(lastUsed, 0)
key.LastUsed = &t
}
keys = append(keys, key)
}
return keys, nil
}
// RevokeAPIKey marks an API key as revoked
func RevokeAPIKey(keyID int) error {
db := GetDB()
if db == nil {
return fmt.Errorf("database not initialized")
}
result, err := db.Exec("UPDATE api_keys SET revoked = 1 WHERE id = ?", keyID)
if err != nil {
return fmt.Errorf("failed to revoke API key: %w", err)
}
rows, _ := result.RowsAffected()
if rows == 0 {
return fmt.Errorf("API key not found")
}
return nil
}
+194
View File
@@ -126,6 +126,137 @@ func runMigrations() error {
task_uuid TEXT NOT NULL,
FOREIGN KEY (task_uuid) REFERENCES tasks(uuid) ON DELETE CASCADE
);
-- Users table (minimal for now, expandable later)
CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT UNIQUE NOT NULL,
email TEXT,
created_at INTEGER NOT NULL
);
-- Default shared user for household
INSERT INTO users (id, username, created_at) VALUES (1, 'shared', unixepoch());
-- API Keys for authentication
CREATE TABLE api_keys (
id INTEGER PRIMARY KEY AUTOINCREMENT,
key TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
user_id INTEGER NOT NULL DEFAULT 1,
created_at INTEGER NOT NULL,
last_used INTEGER,
revoked INTEGER DEFAULT 0,
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE INDEX idx_api_keys_key ON api_keys(key);
CREATE INDEX idx_api_keys_user ON api_keys(user_id);
-- Sync state (per client device)
CREATE TABLE sync_state (
client_id TEXT PRIMARY KEY,
last_sync INTEGER NOT NULL,
last_change_id INTEGER DEFAULT 0
);
-- Change log (key:value format like edit command)
CREATE TABLE change_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_uuid TEXT NOT NULL,
change_type TEXT NOT NULL,
changed_at INTEGER NOT NULL,
data TEXT NOT NULL,
FOREIGN KEY (task_uuid) REFERENCES tasks(uuid) ON DELETE CASCADE
);
CREATE INDEX idx_change_log_timestamp ON change_log(changed_at);
CREATE INDEX idx_change_log_task ON change_log(task_uuid);
CREATE INDEX idx_change_log_id ON change_log(id);
-- Sync configuration
CREATE TABLE sync_config (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);
-- Default: keep change log for 30 days
INSERT INTO sync_config (key, value) VALUES ('change_log_retention_days', '30');
-- Triggers to populate change_log
CREATE TRIGGER track_task_create AFTER INSERT ON tasks
BEGIN
INSERT INTO change_log (task_uuid, change_type, changed_at, data)
VALUES (NEW.uuid, 'create', NEW.modified,
'uuid: ' || NEW.uuid || CHAR(10) ||
'description: ' || NEW.description || CHAR(10) ||
'status: ' || CASE NEW.status
WHEN 80 THEN 'pending'
WHEN 67 THEN 'completed'
WHEN 68 THEN 'deleted'
WHEN 82 THEN 'recurring'
ELSE 'pending'
END || CHAR(10) ||
'priority: ' || CASE NEW.priority
WHEN 0 THEN 'L'
WHEN 1 THEN 'D'
WHEN 2 THEN 'M'
WHEN 3 THEN 'H'
ELSE 'D'
END || CHAR(10) ||
CASE WHEN NEW.project IS NOT NULL THEN 'project: ' || NEW.project || CHAR(10) ELSE '' END ||
'created: ' || NEW.created || CHAR(10) ||
'modified: ' || NEW.modified || CHAR(10) ||
CASE WHEN NEW.start IS NOT NULL THEN 'start: ' || NEW.start || CHAR(10) ELSE '' END ||
CASE WHEN NEW.end IS NOT NULL THEN 'end: ' || NEW.end || CHAR(10) ELSE '' END ||
CASE WHEN NEW.due IS NOT NULL THEN 'due: ' || NEW.due || CHAR(10) ELSE '' END ||
CASE WHEN NEW.scheduled IS NOT NULL THEN 'scheduled: ' || NEW.scheduled || CHAR(10) ELSE '' END ||
CASE WHEN NEW.wait IS NOT NULL THEN 'wait: ' || NEW.wait || CHAR(10) ELSE '' END ||
CASE WHEN NEW.until_date IS NOT NULL THEN 'until: ' || NEW.until_date || CHAR(10) ELSE '' END ||
CASE WHEN NEW.recurrence_duration IS NOT NULL THEN 'recurrence: ' || NEW.recurrence_duration || CHAR(10) ELSE '' END ||
CASE WHEN NEW.parent_uuid IS NOT NULL THEN 'parent_uuid: ' || NEW.parent_uuid || CHAR(10) ELSE '' END
);
END;
CREATE TRIGGER track_task_update AFTER UPDATE ON tasks
BEGIN
INSERT INTO change_log (task_uuid, change_type, changed_at, data)
VALUES (NEW.uuid, 'update', NEW.modified,
'uuid: ' || NEW.uuid || CHAR(10) ||
'description: ' || NEW.description || CHAR(10) ||
'status: ' || CASE NEW.status
WHEN 80 THEN 'pending'
WHEN 67 THEN 'completed'
WHEN 68 THEN 'deleted'
WHEN 82 THEN 'recurring'
ELSE 'pending'
END || CHAR(10) ||
'priority: ' || CASE NEW.priority
WHEN 0 THEN 'L'
WHEN 1 THEN 'D'
WHEN 2 THEN 'M'
WHEN 3 THEN 'H'
ELSE 'D'
END || CHAR(10) ||
CASE WHEN NEW.project IS NOT NULL THEN 'project: ' || NEW.project || CHAR(10) ELSE '' END ||
'created: ' || NEW.created || CHAR(10) ||
'modified: ' || NEW.modified || CHAR(10) ||
CASE WHEN NEW.start IS NOT NULL THEN 'start: ' || NEW.start || CHAR(10) ELSE '' END ||
CASE WHEN NEW.end IS NOT NULL THEN 'end: ' || NEW.end || CHAR(10) ELSE '' END ||
CASE WHEN NEW.due IS NOT NULL THEN 'due: ' || NEW.due || CHAR(10) ELSE '' END ||
CASE WHEN NEW.scheduled IS NOT NULL THEN 'scheduled: ' || NEW.scheduled || CHAR(10) ELSE '' END ||
CASE WHEN NEW.wait IS NOT NULL THEN 'wait: ' || NEW.wait || CHAR(10) ELSE '' END ||
CASE WHEN NEW.until_date IS NOT NULL THEN 'until: ' || NEW.until_date || CHAR(10) ELSE '' END ||
CASE WHEN NEW.recurrence_duration IS NOT NULL THEN 'recurrence: ' || NEW.recurrence_duration || CHAR(10) ELSE '' END ||
CASE WHEN NEW.parent_uuid IS NOT NULL THEN 'parent_uuid: ' || NEW.parent_uuid || CHAR(10) ELSE '' END
);
END;
CREATE TRIGGER track_task_delete AFTER DELETE ON tasks
BEGIN
INSERT INTO change_log (task_uuid, change_type, changed_at, data)
VALUES (OLD.uuid, 'delete', unixepoch(), 'uuid: ' || OLD.uuid);
END;
`,
},
}
@@ -167,3 +298,66 @@ func runMigrations() error {
func getCurrentTimestamp() int64 {
return timeNow().Unix()
}
// GetCurrentTimestamp returns the current Unix timestamp (exported for API use)
func GetCurrentTimestamp() int64 {
return getCurrentTimestamp()
}
// CleanupChangeLog removes old change log entries based on retention policy
func CleanupChangeLog() error {
db := GetDB()
if db == nil {
return fmt.Errorf("database not initialized")
}
// Get retention days from config
var retentionDays int
err := db.QueryRow("SELECT value FROM sync_config WHERE key = 'change_log_retention_days'").Scan(&retentionDays)
if err != nil {
retentionDays = 30 // Default to 30 days if not found
}
// Calculate cutoff timestamp
cutoffTime := timeNow().AddDate(0, 0, -retentionDays).Unix()
// Delete old entries
result, err := db.Exec("DELETE FROM change_log WHERE changed_at < ?", cutoffTime)
if err != nil {
return fmt.Errorf("failed to cleanup change log: %w", err)
}
rows, _ := result.RowsAffected()
if rows > 0 {
fmt.Printf("Cleaned up %d old change log entries\n", rows)
}
return nil
}
// GetChangeLogRetentionDays returns the configured retention period
func GetChangeLogRetentionDays() (int, error) {
db := GetDB()
if db == nil {
return 0, fmt.Errorf("database not initialized")
}
var days int
err := db.QueryRow("SELECT value FROM sync_config WHERE key = 'change_log_retention_days'").Scan(&days)
if err != nil {
return 30, nil // Default
}
return days, nil
}
// SetChangeLogRetentionDays sets the retention period
func SetChangeLogRetentionDays(days int) error {
db := GetDB()
if db == nil {
return fmt.Errorf("database not initialized")
}
_, err := db.Exec("INSERT OR REPLACE INTO sync_config (key, value) VALUES ('change_log_retention_days', ?)", days)
return err
}
+55
View File
@@ -0,0 +1,55 @@
package engine
import (
"fmt"
)
// GetAllTags returns all unique tags from the database
func GetAllTags() ([]string, error) {
db := GetDB()
if db == nil {
return nil, fmt.Errorf("database not initialized")
}
rows, err := db.Query("SELECT DISTINCT tag FROM tags ORDER BY tag")
if err != nil {
return nil, fmt.Errorf("failed to query tags: %w", err)
}
defer rows.Close()
var tags []string
for rows.Next() {
var tag string
if err := rows.Scan(&tag); err != nil {
return nil, fmt.Errorf("failed to scan tag: %w", err)
}
tags = append(tags, tag)
}
return tags, nil
}
// GetAllProjects returns all unique projects from the database
func GetAllProjects() ([]string, error) {
db := GetDB()
if db == nil {
return nil, fmt.Errorf("database not initialized")
}
rows, err := db.Query("SELECT DISTINCT project FROM tasks WHERE project IS NOT NULL ORDER BY project")
if err != nil {
return nil, fmt.Errorf("failed to query projects: %w", err)
}
defer rows.Close()
var projects []string
for rows.Next() {
var project string
if err := rows.Scan(&project); err != nil {
return nil, fmt.Errorf("failed to scan project: %w", err)
}
projects = append(projects, project)
}
return projects, nil
}