diff --git a/README.md b/README.md index 85039f8..ba0fb08 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,13 @@ solactl senderid list # 발송 내역 조회 solactl messages list +# 발송 내역 export (CSV/JSON/JSONL) +solactl messages export --output messages.csv + +# 일별 통계 export (CSV/JSON/JSONL) +solactl statistics export-daily --output stats.csv \ + --start-date 2026-05-04 --end-date 2026-05-11 + # 잔액 조회 solactl balance @@ -120,3 +127,52 @@ solactl quota list-requests --status PENDING # 검토 대기 중인 요청만 > **주의** — 동일 계정에 PENDING 요청이 이미 있을 때 새 요청을 제출하면 이전 요청은 자동으로 REJECTED 처리됩니다. +## 발송 내역 / 통계 Export + +대량 export는 messages-v4 부하를 줄이기 위해 다음 가드를 자동 적용합니다. + +- **6개월(180일) 이전 데이터는 조회 불가** — 사내 DB에서 자동 삭제됩니다. +- **7일 초과 범위는 1일 단위 윈도우로 자동 분할** — UTC 자정 기준으로 잘게 호출. +- **`--throttle` (기본 500ms)** — 페이지/윈도우 호출 사이 sleep. 최소 100ms 강제. +- **`--page-size` 강제 상한** — `messages export` 200, `statistics export-daily` 100. +- **Ctrl+C 시 부분 결과 보존** — stderr에 `--resume-token` 안내. 다음 명령에 `--append --resume-token <토큰>`을 붙여 이어받을 수 있습니다. + +### 메시지 내역 export + +```bash +# 기본: 최근 7일, CSV, page-size 50, throttle 500ms +solactl messages export --output messages.csv + +# 31일 범위 — 자동으로 31개 1일 윈도우로 분할 +solactl messages export --output messages.csv \ + --start-date 2026-04-02 --end-date 2026-05-03 + +# JSONL 포맷 + 필터 +solactl messages export --output messages.jsonl --format jsonl \ + --type SMS --status-code 4000 --from 029302266 + +# 중단 후 재개 +solactl messages export --output messages.csv --append \ + --resume-token eyJ2IjoxLCJ3IjoiMjAyNi0wNS0wMSJ9 +``` + +CSV 컬럼: `messageId, type, status, statusCode, to, from, country, subject, dateCreated, dateUpdated, groupId, accountId, text, customFields`. + +### 일별 통계 export + +```bash +# 7일 범위 +solactl statistics export-daily --output stats.csv \ + --start-date 2026-05-04 --end-date 2026-05-11 + +# 31일 범위 — 자동 일별 분할 +solactl statistics export-daily --output stats.csv \ + --start-date 2026-04-02 --end-date 2026-05-03 + +# Windows Excel 한글 호환 (UTF-8 BOM) +solactl statistics export-daily --output stats.csv \ + --start-date 2026-05-04 --end-date 2026-05-11 --bom +``` + +CSV는 고정 prefix (`date, accountId, prepaid, balance, point, profit, refundBalance, refundPoint`) + 응답에서 발견된 모든 `count.*` 키를 정렬해 컬럼화 (`count_SMS, count_LMS, count_MMS, ...`). + diff --git a/cmd/balance.go b/cmd/balance.go index d0bd1e2..89c7280 100644 --- a/cmd/balance.go +++ b/cmd/balance.go @@ -3,10 +3,10 @@ package cmd import ( "encoding/json" "fmt" - "strconv" - "strings" "github.com/spf13/cobra" + + "github.com/solapi/solactl/pkg/types" ) var balanceCmd = &cobra.Command{ @@ -52,35 +52,4 @@ func runBalance(cmd *cobra.Command, args []string) error { return nil } -// formatNumber formats an integer with thousand separators. -func formatNumber(n int) string { - negative := n < 0 - if negative { - n = -n - } - - s := strconv.Itoa(n) - if len(s) <= 3 { - if negative { - return "-" + s - } - return s - } - - var b strings.Builder - offset := len(s) % 3 - if offset > 0 { - b.WriteString(s[:offset]) - } - for i := offset; i < len(s); i += 3 { - if b.Len() > 0 { - b.WriteByte(',') - } - b.WriteString(s[i : i+3]) - } - - if negative { - return "-" + b.String() - } - return b.String() -} +func formatNumber(n int) string { return types.FormatThousands(n) } diff --git a/cmd/balance_test.go b/cmd/balance_test.go index c743b5b..97295f3 100644 --- a/cmd/balance_test.go +++ b/cmd/balance_test.go @@ -142,7 +142,7 @@ func TestBalance_JSON(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - var parsed map[string]interface{} + var parsed map[string]any if err := json.Unmarshal([]byte(strings.TrimSpace(buf.String())), &parsed); err != nil { t.Fatalf("output is not valid JSON: %v", err) } diff --git a/cmd/export_common.go b/cmd/export_common.go new file mode 100644 index 0000000..f9a0655 --- /dev/null +++ b/cmd/export_common.go @@ -0,0 +1,66 @@ +package cmd + +import ( + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/solapi/solactl/pkg/progress" +) + +// resolveProgressMode maps --progress / --no-progress flags to progress.Mode. +func resolveProgressMode(flag string, noProgress bool) (progress.Mode, error) { + if noProgress { + flag = "off" + } + switch flag { + case "auto": + return progress.ModeAuto, nil + case "on": + return progress.ModeOn, nil + case "off": + return progress.ModeOff, nil + } + return 0, fmt.Errorf("잘못된 --progress 값: %s (auto|on|off)", flag) +} + +// parseExportDate accepts "2006-01-02", "2006-01-02T15:04:05Z", or RFC3339. +func parseExportDate(s string) (time.Time, error) { + for _, layout := range []string{time.RFC3339, "2006-01-02T15:04:05Z", "2006-01-02"} { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC(), nil + } + } + return time.Time{}, fmt.Errorf("지원되는 날짜 형식: 2006-01-02, 2006-01-02T15:04:05Z, RFC3339") +} + +// nopWriteCloser wraps stdout-like writers that must not be closed by the caller. +type nopWriteCloser struct{} + +func (nopWriteCloser) Close() error { return nil } + +// openExportOutput resolves the --output flag to (writer, closer). path == "-" +// returns the global stdout writer plus a no-op closer. append=false rejects an +// existing file via O_EXCL so users do not silently overwrite previous exports. +func openExportOutput(path string, appendMode bool) (io.Writer, io.Closer, error) { + if path == "-" { + return out(), nopWriteCloser{}, nil + } + if appendMode { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return nil, nil, err + } + return f, f, nil + } + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o644) + if err != nil { + if errors.Is(err, os.ErrExist) { + return nil, nil, fmt.Errorf("출력 파일이 이미 존재: %s (--append를 사용하거나 파일을 삭제하세요)", path) + } + return nil, nil, err + } + return f, f, nil +} diff --git a/cmd/messages_export.go b/cmd/messages_export.go new file mode 100644 index 0000000..4e45618 --- /dev/null +++ b/cmd/messages_export.go @@ -0,0 +1,422 @@ +package cmd + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "net/url" + "os" + "sort" + "strconv" + "time" + + "github.com/spf13/cobra" + + "github.com/solapi/solactl/pkg/client" + "github.com/solapi/solactl/pkg/clock" + "github.com/solapi/solactl/pkg/exporter" + "github.com/solapi/solactl/pkg/output" + "github.com/solapi/solactl/pkg/progress" +) + +// 상한과 기본값. messages-v4 부하 보호를 위해 보수적으로 설정. +const ( + messagesExportPageSizeMax = 200 + messagesExportPageSizeDefault = 50 + messagesExportThrottleDefault = 500 * time.Millisecond +) + +var ( + msgExportFlagOutput string + msgExportFlagFormat string // csv|json|jsonl + msgExportFlagThrottle time.Duration + msgExportFlagPageSize int + msgExportFlagMaxPages int + msgExportFlagAppend bool + msgExportFlagBOM bool + msgExportFlagProgress string // auto|on|off + msgExportFlagNoProgress bool + msgExportFlagResumeToken string + + // messages 전용 필터. + msgExportFlagStartDate string + msgExportFlagEndDate string + msgExportFlagType string + msgExportFlagStatus string + msgExportFlagTo string + msgExportFlagFrom string + msgExportFlagGroupID string + msgExportFlagStartKey string +) + +var messagesExportCmd = &cobra.Command{ + Use: "export", + Short: "발송 내역을 CSV/JSON/JSONL로 export합니다", + Long: `발송 내역을 파일로 export합니다. + +7일을 초과하는 기간은 자동으로 1일 단위 윈도우로 분할되어 streaming됩니다. +페이지 사이 --throttle 만큼 sleep을 두어 messages-v4 부하를 줄입니다. +6개월(180일) 이전 데이터는 자동 삭제되므로 조회할 수 없습니다. +Ctrl+C로 중단하면 stderr에 재개용 --resume-token이 출력됩니다.`, + RunE: runMessagesExport, +} + +func init() { + f := messagesExportCmd.Flags() + f.StringVar(&msgExportFlagOutput, "output", "", "출력 파일 경로 (필수, '-'는 stdout)") + f.StringVar(&msgExportFlagFormat, "format", "csv", "출력 포맷 (csv|json|jsonl)") + f.DurationVar(&msgExportFlagThrottle, "throttle", messagesExportThrottleDefault, "페이지/윈도우 호출 사이 sleep (최소 100ms)") + f.IntVar(&msgExportFlagPageSize, "page-size", messagesExportPageSizeDefault, fmt.Sprintf("페이지당 건수 (최대 %d)", messagesExportPageSizeMax)) + f.IntVar(&msgExportFlagMaxPages, "max-pages", 0, "전체 페이지 상한 (0=무제한)") + f.BoolVar(&msgExportFlagAppend, "append", false, "기존 파일에 이어쓰기 (헤더 검증)") + f.BoolVar(&msgExportFlagBOM, "bom", false, "UTF-8 BOM 추가 (Windows Excel 한글)") + f.StringVar(&msgExportFlagProgress, "progress", "auto", "진행률 표시 모드 (auto|on|off)") + f.BoolVar(&msgExportFlagNoProgress, "no-progress", false, "--progress=off와 동일") + f.StringVar(&msgExportFlagResumeToken, "resume-token", "", "중단된 export 재개 토큰") + f.StringVar(&msgExportFlagStartDate, "start-date", "", "시작 날짜 (ISO 8601, 6개월 이내)") + f.StringVar(&msgExportFlagEndDate, "end-date", "", "종료 날짜 (ISO 8601)") + f.StringVar(&msgExportFlagType, "type", "", "메시지 타입 (SMS, LMS, MMS, ATA, ...)") + f.StringVar(&msgExportFlagStatus, "status-code", "", "상태 코드") + f.StringVar(&msgExportFlagTo, "to", "", "수신 번호") + f.StringVar(&msgExportFlagFrom, "from", "", "발신 번호") + f.StringVar(&msgExportFlagGroupID, "group-id", "", "그룹 ID") + f.StringVar(&msgExportFlagStartKey, "start-key", "", "단일 윈도우 내 재개용 (resume-token 권장)") + _ = messagesExportCmd.MarkFlagRequired("output") + messagesCmd.AddCommand(messagesExportCmd) +} + +// 14개 고정 컬럼. customFields는 JSON 문자열로 한 컬럼에 직렬화. +var messagesCSVHeaders = []string{ + "messageId", "type", "status", "statusCode", "to", "from", "country", + "subject", "dateCreated", "dateUpdated", "groupId", "accountId", "text", "customFields", +} + +// messageRecord는 CSV row 구성을 위한 부분 디코딩. 누락 필드는 빈 문자열. +type messageRecord struct { + MessageID string `json:"messageId"` + Type string `json:"type"` + Status string `json:"status"` + StatusCode string `json:"statusCode"` + To string `json:"to"` + From string `json:"from"` + Country string `json:"country"` + Subject string `json:"subject"` + DateCreated string `json:"dateCreated"` + DateUpdated string `json:"dateUpdated"` + GroupID string `json:"groupId"` + AccountID string `json:"accountId"` + Text string `json:"text"` + CustomFields map[string]any `json:"customFields,omitempty"` +} + +func (m *messageRecord) row() []string { + customFields := "" + if len(m.CustomFields) > 0 { + if b, err := json.Marshal(m.CustomFields); err == nil { + customFields = string(b) + } + } + return []string{ + m.MessageID, m.Type, m.Status, m.StatusCode, m.To, m.From, m.Country, + m.Subject, m.DateCreated, m.DateUpdated, m.GroupID, m.AccountID, m.Text, customFields, + } +} + +func runMessagesExport(_ *cobra.Command, _ []string) error { + // 1. progress 모드. + mode, err := resolveProgressMode(msgExportFlagProgress, msgExportFlagNoProgress) + if err != nil { + return err + } + + // 2. resume-token / start-key 결정. + startWindowDate, initialState, err := resolveResumeState(msgExportFlagResumeToken, msgExportFlagStartKey) + if err != nil { + return err + } + + // 3. 날짜 파싱 (기본값 [now-7d, now]). + now := time.Now().UTC() + startDate, endDate, err := resolveDateRange(msgExportFlagStartDate, msgExportFlagEndDate, now) + if err != nil { + return err + } + + // 4. format 검증. + format := msgExportFlagFormat + if format != "csv" && format != "json" && format != "jsonl" { + return fmt.Errorf("잘못된 --format 값: %s (csv|json|jsonl)", format) + } + if format == "json" && msgExportFlagAppend { + return errors.New("--format json은 --append와 함께 사용할 수 없습니다 (json array는 append-safe 불가)") + } + + // 5. page-size / throttle / 날짜 범위 사전 검증. + // exporter.Run도 다시 검증하지만, 파일을 열기 전에 거부해야 zero-byte 파일을 남기지 않는다. + if err := exporter.ValidatePageSize("messages export --page-size", msgExportFlagPageSize, messagesExportPageSizeMax); err != nil { + return err + } + if err := exporter.ValidateThrottle(msgExportFlagThrottle, exporter.MinThrottle); err != nil { + return err + } + if _, err := exporter.ValidateDateRange(startDate, endDate, now, exporter.DefaultMaxLookbackDays); err != nil { + return err + } + + // 6. 클라이언트 준비. + c, err := newClient() + if err != nil { + return err + } + + // 7. 출력 파일/스트림 오픈. (validation 통과 후에만 — 실패 시 빈 파일 남기지 않음) + w, closer, err := openExportOutput(msgExportFlagOutput, msgExportFlagAppend) + if err != nil { + return err + } + defer func() { _ = closer.Close() }() + + // 8. RowWriter 생성 (format별). + rw, err := newMessagesRowWriter(format, w, msgExportFlagOutput, msgExportFlagAppend, msgExportFlagBOM) + if err != nil { + return err + } + + // 9. progress reporter (항상 stderr). + rep := progress.New(progress.Options{Writer: errOut(), Mode: mode}) + + // 10. fetcher 구성. + fetcher := newMessagesFetcher(c, msgExportFlagPageSize, buildMessagesFilters()) + + // 11. exporter.Run. + opts := exporter.Options{ + Now: now, + StartDate: startDate, + EndDate: endDate, + PageSize: msgExportFlagPageSize, + MaxPages: msgExportFlagMaxPages, + Throttle: msgExportFlagThrottle, + Fetcher: fetcher, + Writer: rw, + Reporter: rep, + Clock: clock.Real(), + StartWindowDate: startWindowDate, + InitialState: initialState, + } + result, runErr := exporter.Run(ctx(), opts) + + // 12. JSON array 종결. 부분 결과여도 [] 또는 [..]는 닫아 valid JSON 유지. + if jw, ok := rw.(*output.JSONArrayWriter); ok { + if closeErr := jw.Close(); closeErr != nil && runErr == nil { + runErr = closeErr + } + } + + // 13. 부분 결과 + 재개 안내. 토큰은 stderr로. + if result.ResumeToken != "" { + _, _ = fmt.Fprintf(errOut(), "\n중단됨. 누적 %d건 처리.\n", result.RecordsWritten) + _, _ = fmt.Fprintf(errOut(), "재개:\n solactl messages export --output %s --append --resume-token %s\n", msgExportFlagOutput, result.ResumeToken) + } + return runErr +} + +func resolveResumeState(token, startKey string) (string, exporter.PageState, error) { + if token != "" { + tok, err := exporter.DecodeToken(token) + if err != nil { + return "", nil, fmt.Errorf("--resume-token 디코드 실패: %w", err) + } + return tok.Window, tok.State, nil + } + if startKey != "" { + b, err := json.Marshal(map[string]string{"startKey": startKey}) + if err != nil { + return "", nil, err + } + return "", b, nil + } + return "", nil, nil +} + +func resolveDateRange(startStr, endStr string, now time.Time) (time.Time, time.Time, error) { + var start, end time.Time + if startStr == "" { + start = now.AddDate(0, 0, -7) + } else { + s, err := parseExportDate(startStr) + if err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("--start-date 파싱 실패: %w", err) + } + start = s + } + if endStr == "" { + end = now + } else { + e, err := parseExportDate(endStr) + if err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("--end-date 파싱 실패: %w", err) + } + end = e + } + return start, end, nil +} + + +// buildMessagesFilters는 flag → query 파라미터 매핑. limit/날짜/startKey는 fetcher에서 윈도우별로 채움. +func buildMessagesFilters() url.Values { + p := url.Values{} + if msgExportFlagType != "" { + p.Set("type", msgExportFlagType) + } + if msgExportFlagStatus != "" { + p.Set("statusCode", msgExportFlagStatus) + } + if msgExportFlagTo != "" { + p.Set("to", msgExportFlagTo) + } + if msgExportFlagFrom != "" { + p.Set("from", msgExportFlagFrom) + } + if msgExportFlagGroupID != "" { + p.Set("groupId", msgExportFlagGroupID) + } + return p +} + +// newMessagesFetcher는 GET /messages/v4/list 호출 PageFetcher를 만든다. +// messages-v4 응답 포맷은 두 가지: {messageList:{id:rec,...}} 또는 {messages:[...]}. +// 양쪽을 모두 처리하되, map 응답의 경우 키를 messageId로 백필한다. +func newMessagesFetcher(c *client.Client, pageSize int, baseFilters url.Values) exporter.PageFetcher { + return func(ctx context.Context, w exporter.Window, state exporter.PageState) (exporter.Page, error) { + params := url.Values{} + maps.Copy(params, baseFilters) + params.Set("limit", strconv.Itoa(pageSize)) + params.Set("startDate", w.Start.UTC().Format(time.RFC3339)) + params.Set("endDate", w.End.UTC().Format(time.RFC3339)) + if len(state) > 0 { + var st struct { + StartKey string `json:"startKey"` + } + if err := json.Unmarshal(state, &st); err == nil && st.StartKey != "" { + params.Set("startKey", st.StartKey) + } + } + raw, err := c.Get(ctx, "messages/v4/list", params) + if err != nil { + return exporter.Page{}, err + } + + var page struct { + MessageList map[string]json.RawMessage `json:"messageList"` + Messages []json.RawMessage `json:"messages"` + NextKey string `json:"nextKey"` + } + if err := json.Unmarshal(raw, &page); err != nil { + return exporter.Page{}, fmt.Errorf("응답 파싱 실패: %w", err) + } + + var records []json.RawMessage + switch { + case len(page.Messages) > 0: + records = page.Messages + case len(page.MessageList) > 0: + keys := make([]string, 0, len(page.MessageList)) + for k := range page.MessageList { + keys = append(keys, k) + } + sort.Strings(keys) + records = make([]json.RawMessage, 0, len(keys)) + for _, k := range keys { + rec := page.MessageList[k] + rec = ensureMessageID(rec, k) + records = append(records, rec) + } + } + var next exporter.PageState + if page.NextKey != "" { + nb, err := json.Marshal(map[string]string{"startKey": page.NextKey}) + if err != nil { + return exporter.Page{}, err + } + next = nb + } + return exporter.Page{Records: records, Next: next}, nil + } +} + +// ensureMessageID는 record에 messageId 키가 없으면 map key로 백필한다. +// 파싱 실패 시 원본을 그대로 반환 (응답이 비정형이어도 export는 계속 진행). +func ensureMessageID(rec json.RawMessage, id string) json.RawMessage { + var probe map[string]json.RawMessage + if err := json.Unmarshal(rec, &probe); err != nil { + return rec + } + if _, ok := probe["messageId"]; ok { + return rec + } + idJSON, _ := json.Marshal(id) + probe["messageId"] = idJSON + b, err := json.Marshal(probe) + if err != nil { + return rec + } + return b +} + +// --- RowWriter 구현 --- + +type messagesCSVRowWriter struct{ cw *output.CSVWriter } + +func (m *messagesCSVRowWriter) WriteRecord(rec json.RawMessage) error { + var mr messageRecord + if err := json.Unmarshal(rec, &mr); err != nil { + return fmt.Errorf("메시지 레코드 디코드 실패: %w", err) + } + return m.cw.WriteRow(mr.row()) +} +func (m *messagesCSVRowWriter) Flush() error { return m.cw.Flush() } + +func newMessagesRowWriter(format string, w io.Writer, path string, appendMode, bom bool) (exporter.RowWriter, error) { + switch format { + case "csv": + var reader io.Reader + if appendMode && path != "-" { + // Append 모드 헤더 검증을 위해 별도 reader 핸들로 open. + // write 핸들과 분리 (writer는 O_APPEND, reader는 read-only). + r, err := os.Open(path) + if err != nil { + // 빈/없는 파일에 대한 append는 CSVWriter.verifyAppendHeader에서 명확히 거부. + return nil, fmt.Errorf("--append: 기존 파일 열기 실패: %w", err) + } + defer func() { _ = r.Close() }() + cw, err := output.NewCSVWriter(w, r, output.CSVOptions{ + Headers: messagesCSVHeaders, + AddBOM: bom, + Append: true, + StripCtrl: true, + }) + if err != nil { + return nil, err + } + return &messagesCSVRowWriter{cw: cw}, nil + } + cw, err := output.NewCSVWriter(w, reader, output.CSVOptions{ + Headers: messagesCSVHeaders, + AddBOM: bom, + Append: false, + StripCtrl: true, + }) + if err != nil { + return nil, err + } + return &messagesCSVRowWriter{cw: cw}, nil + case "jsonl": + return output.NewJSONLWriter(w), nil + case "json": + return output.NewJSONArrayWriter(w), nil + } + return nil, fmt.Errorf("지원되지 않는 format: %s", format) +} diff --git a/cmd/messages_export_test.go b/cmd/messages_export_test.go new file mode 100644 index 0000000..8bc12fe --- /dev/null +++ b/cmd/messages_export_test.go @@ -0,0 +1,700 @@ +package cmd + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/solapi/solactl/pkg/client" + "github.com/solapi/solactl/pkg/exporter" +) + +// setupMessagesExportTest wires up an httptest server, capture buffers, and a +// fresh tmp dir. flag 변수 reset만으로도 cobra Flags가 가리키는 포인터를 공유하지만, +// pflag의 Changed 비트는 별도로 리셋해야 MarkFlagRequired가 정확히 동작한다. +func setupMessagesExportTest(t *testing.T, handler http.HandlerFunc) (stderr *bytes.Buffer, tmpDir string) { + t.Helper() + resetFlags() + resetMessagesExportFlags() + resetPflagChanged(messagesExportCmd) + + ts := httptest.NewServer(handler) + t.Cleanup(ts.Close) + + c := &client.Client{ + HTTPClient: ts.Client(), + APIKey: "testkey", + APISecret: "testsecret", + MaxRetries: 0, + BaseDelay: time.Millisecond, + BaseURLOverride: ts.URL, + } + clientOverride = c + + var outBuf, errBuf bytes.Buffer + outWriter = &outBuf + errWriter = &errBuf + + tmpDir = t.TempDir() + + t.Cleanup(func() { + clientOverride = nil + outWriter = nil + errWriter = nil + resetMessagesExportFlags() + resetPflagChanged(messagesExportCmd) + }) + return &errBuf, tmpDir +} + +// runExport executes the export command with the given args and returns the error. +func runExport(args ...string) error { + full := append([]string{"messages", "export"}, args...) + rootCmd.SetArgs(full) + return rootCmd.Execute() +} + +// recentDate returns an ISO date N days before today (UTC) — within lookback. +func recentDate(daysAgo int) string { + return time.Now().UTC().AddDate(0, 0, -daysAgo).Format("2006-01-02") +} + +func TestMessagesExport_CSV_SingleWindow_LegacyMap(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + t.Errorf("method=%s want GET", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/messages/v4/list") { + t.Errorf("path=%s", r.URL.Path) + } + resp := `{"messageList":{ + "MSG001":{"type":"SMS","statusCode":"2000","to":"01011112222","from":"01099998888","dateCreated":"2026-05-10T10:00:00"}, + "MSG002":{"type":"LMS","statusCode":"2000","to":"01033334444","from":"01099998888","dateCreated":"2026-05-10T11:00:00"} + }}` + _, _ = io.WriteString(w, resp) + }) + + outPath := filepath.Join(tmpDir, "out.csv") + if err := runExport("--output", outPath, "--throttle", "100ms", "--progress", "off"); err != nil { + t.Fatalf("execute: %v", err) + } + + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + r := csv.NewReader(bytes.NewReader(data)) + rows, err := r.ReadAll() + if err != nil { + t.Fatalf("csv parse: %v", err) + } + if len(rows) != 3 { // header + 2 records + t.Fatalf("rows=%d want 3", len(rows)) + } + // 헤더 검증. + for i, h := range messagesCSVHeaders { + if rows[0][i] != h { + t.Errorf("header[%d]=%q want %q", i, rows[0][i], h) + } + } + // 정렬은 키 사전순: MSG001 → MSG002. + if rows[1][0] != "MSG001" || rows[2][0] != "MSG002" { + t.Errorf("messageId order=%q,%q want MSG001,MSG002", rows[1][0], rows[2][0]) + } +} + +func TestMessagesExport_CSV_SingleWindow_ArrayResponse(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + resp := `{"messages":[ + {"messageId":"M-A","type":"SMS","statusCode":"2000","to":"010"}, + {"messageId":"M-B","type":"LMS","statusCode":"2000","to":"011"} + ]}` + _, _ = io.WriteString(w, resp) + }) + + outPath := filepath.Join(tmpDir, "out.csv") + if err := runExport("--output", outPath, "--throttle", "100ms", "--progress", "off"); err != nil { + t.Fatalf("execute: %v", err) + } + rows := mustReadCSV(t, outPath) + if len(rows) != 3 { + t.Fatalf("rows=%d", len(rows)) + } + if rows[1][0] != "M-A" || rows[2][0] != "M-B" { + t.Errorf("ids=%q,%q", rows[1][0], rows[2][0]) + } +} + +func TestMessagesExport_JSONL(t *testing.T) { + // 8일 범위 → 8개 1일 윈도우. + var calls atomic.Int64 + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + i := calls.Add(1) + resp := fmt.Sprintf(`{"messageList":{"M%d":{"type":"SMS","statusCode":"2000","to":"010"}}}`, i) + _, _ = io.WriteString(w, resp) + }) + + outPath := filepath.Join(tmpDir, "out.jsonl") + startDate := time.Now().UTC().AddDate(0, 0, -8).Format("2006-01-02") + endDate := time.Now().UTC().Format("2006-01-02") + err := runExport( + "--output", outPath, + "--format", "jsonl", + "--start-date", startDate, + "--end-date", endDate, + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + if len(lines) < 2 { + t.Fatalf("expected multiple jsonl lines, got %d", len(lines)) + } + for i, l := range lines { + var probe map[string]any + if err := json.Unmarshal([]byte(l), &probe); err != nil { + t.Fatalf("line %d not valid JSON: %v", i, err) + } + } +} + +func TestMessagesExport_JSON(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"X","type":"SMS"},{"messageId":"Y","type":"LMS"}]}`) + }) + outPath := filepath.Join(tmpDir, "out.json") + if err := runExport("--output", outPath, "--format", "json", "--throttle", "100ms", "--progress", "off"); err != nil { + t.Fatalf("execute: %v", err) + } + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + var arr []map[string]any + if err := json.Unmarshal(data, &arr); err != nil { + t.Fatalf("invalid JSON array: %v (%s)", err, string(data)) + } + if len(arr) != 2 { + t.Errorf("array len=%d want 2", len(arr)) + } +} + +func TestMessagesExport_JSON_AppendRejected(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "out.json") + if err := os.WriteFile(outPath, []byte("[]"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + err := runExport("--output", outPath, "--format", "json", "--append", "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected error: json + append not allowed") + } + if !strings.Contains(err.Error(), "--append") { + t.Errorf("error should mention --append: %v", err) + } +} + +func TestMessagesExport_MultiWindowAutoSplit(t *testing.T) { + // 31일 범위 → 31개 윈도우. 각 호출의 startDate/endDate가 1일 간격이며 UTC 자정 정렬. + var ( + mu sync.Mutex + startQs []string + endQs []string + callPath []string + ) + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + callPath = append(callPath, r.URL.Path) + startQs = append(startQs, r.URL.Query().Get("startDate")) + endQs = append(endQs, r.URL.Query().Get("endDate")) + mu.Unlock() + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + + outPath := filepath.Join(tmpDir, "multi.jsonl") + err := runExport( + "--output", outPath, + "--format", "jsonl", + "--start-date", "2026-04-02", + "--end-date", "2026-05-03", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + mu.Lock() + defer mu.Unlock() + if len(callPath) != 31 { + t.Fatalf("calls=%d want 31", len(callPath)) + } + for _, p := range callPath { + if strings.Contains(p, "messages-internal") { + t.Fatalf("internal endpoint used: %s", p) + } + if !strings.HasSuffix(p, "/messages/v4/list") { + t.Errorf("unexpected path: %s", p) + } + } + + prevEnd := "" + for i, s := range startQs { + sTime, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("start[%d] parse: %v", i, err) + } + if !sTime.Equal(sTime.Truncate(24 * time.Hour).UTC()) { + t.Errorf("start[%d]=%s not at UTC midnight", i, s) + } + eTime, err := time.Parse(time.RFC3339, endQs[i]) + if err != nil { + t.Fatalf("end[%d] parse: %v", i, err) + } + if d := eTime.Sub(sTime); d != 24*time.Hour { + t.Errorf("window[%d] span=%v want 24h", i, d) + } + if i > 0 && endQs[i-1] != s { + t.Errorf("window[%d] start=%s does not chain from prev end=%s", i, s, prevEnd) + } + prevEnd = endQs[i] + } +} + +func TestMessagesExport_LookbackExceeded(t *testing.T) { + called := atomic.Int64{} + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + called.Add(1) + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "no.csv") + oldDate := time.Now().UTC().AddDate(0, 0, -200).Format("2006-01-02") + err := runExport("--output", outPath, "--start-date", oldDate, "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected lookback error") + } + if !errors.Is(err, exporter.ErrLookbackExceeded) { + t.Errorf("err=%v want ErrLookbackExceeded", err) + } + if called.Load() != 0 { + t.Errorf("API called %d times before validation", called.Load()) + } + if _, err := os.Stat(outPath); !errors.Is(err, os.ErrNotExist) { + // 파일이 생기지 말아야 — 단, openExportOutput는 운영적으로 검증 전에 호출되지 않으므로 + // 검증 실패 → file 미생성 보장. + t.Errorf("file should not be created on validation error: %v", err) + } +} + +func TestMessagesExport_PageSizeUpperBound(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "nope.csv") + err := runExport("--output", outPath, "--page-size", "500", "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected page-size error") + } + if !strings.Contains(err.Error(), "exceeds max") { + t.Errorf("unexpected error: %v", err) + } +} + +func TestMessagesExport_ThrottleMinimum(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "nope.csv") + err := runExport("--output", outPath, "--throttle", "50ms", "--progress", "off") + if err == nil { + t.Fatal("expected throttle error") + } + if !strings.Contains(err.Error(), "throttle") { + t.Errorf("unexpected error: %v", err) + } +} + +func TestMessagesExport_RequiredOutput(t *testing.T) { + setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + // --output 미지정 — cobra가 required flag 에러 반환. + err := runExport("--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected required flag error") + } + if !strings.Contains(err.Error(), "output") { + t.Errorf("error should mention output: %v", err) + } +} + +func TestMessagesExport_FileExistsWithoutAppend(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "exists.csv") + if err := os.WriteFile(outPath, []byte("existing\n"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + err := runExport("--output", outPath, "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected file-exists error") + } + if !strings.Contains(err.Error(), "이미 존재") { + t.Errorf("error should mention file exists: %v", err) + } +} + +func TestMessagesExport_Append_HeaderMismatch(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "bad.csv") + if err := os.WriteFile(outPath, []byte("wrong,header,here\n"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + err := runExport("--output", outPath, "--append", "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected header mismatch error") + } +} + +func TestMessagesExport_Append_Success(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"NEW","type":"SMS"}]}`) + }) + outPath := filepath.Join(tmpDir, "ok.csv") + // 기존 파일: 정확한 헤더 + 1행. + existing := strings.Join(messagesCSVHeaders, ",") + "\nOLD,SMS,,,,,,,,,,,,\n" + if err := os.WriteFile(outPath, []byte(existing), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + err := runExport("--output", outPath, "--append", "--throttle", "100ms", "--progress", "off") + if err != nil { + t.Fatalf("execute: %v", err) + } + rows := mustReadCSV(t, outPath) + if len(rows) != 3 { // 헤더 + OLD + NEW + t.Fatalf("rows=%d want 3", len(rows)) + } + if rows[1][0] != "OLD" || rows[2][0] != "NEW" { + t.Errorf("ids=%q,%q", rows[1][0], rows[2][0]) + } + // 헤더가 다시 쓰이지 않았는지 검증: 헤더 라인이 단 1회. + data, _ := os.ReadFile(outPath) + if c := strings.Count(string(data), "messageId,type,status,statusCode"); c != 1 { + t.Errorf("header appears %d times, want 1", c) + } +} + +func TestMessagesExport_ResumeToken(t *testing.T) { + // 첫 라운드: cancel을 통해 토큰을 받고, 두번째 라운드에서 재개. + var calls atomic.Int64 + stderr, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + n := calls.Add(1) + if n == 1 { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"P1","type":"SMS"}],"nextKey":"K2"}`) + return + } + if n == 2 { + // 두번째 호출에서 5xx — fetcher 에러로 토큰 발급. + w.WriteHeader(500) + _, _ = w.Write([]byte(`{"errorCode":"X","errorMessage":"boom"}`)) + return + } + _, _ = io.WriteString(w, `{"messages":[{"messageId":"P3","type":"SMS"}]}`) + }) + + outPath := filepath.Join(tmpDir, "resume.csv") + startDate := recentDate(1) + endDate := time.Now().UTC().Format("2006-01-02") + err := runExport("--output", outPath, "--start-date", startDate, "--end-date", endDate, "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected error from second-call 500") + } + se := stderr.String() + if !strings.Contains(se, "--resume-token") { + t.Fatalf("stderr should contain resume token hint:\n%s", se) + } + // 두번째 라운드 — append + resume-token. 토큰 추출. + tok := extractResumeToken(se) + if tok == "" { + t.Fatalf("no token in stderr:\n%s", se) + } + + // 두번째 호출 사이클 — 동일 date range 유지해야 windows split 결과가 일치. + resetMessagesExportFlags() + resetPflagChanged(messagesExportCmd) + err = runExport( + "--output", outPath, "--append", + "--start-date", startDate, "--end-date", endDate, + "--resume-token", tok, + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("resume execute: %v", err) + } + rows := mustReadCSV(t, outPath) + // 헤더 + P1 (첫 라운드) + P3 (두번째 라운드). + if len(rows) < 2 { + t.Fatalf("rows=%d want >=2", len(rows)) + } +} + +func TestMessagesExport_StartKey(t *testing.T) { + var capturedKey atomic.Value + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, r *http.Request) { + // 첫 호출의 startKey만 캡처. 이후 호출은 nextKey 없는 응답으로 종료. + if capturedKey.Load() == nil { + capturedKey.Store(r.URL.Query().Get("startKey")) + } + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "k.csv") + err := runExport("--output", outPath, "--start-key", "MSG999", "--throttle", "100ms", "--progress", "off") + if err != nil { + t.Fatalf("execute: %v", err) + } + got, _ := capturedKey.Load().(string) + if got != "MSG999" { + t.Errorf("startKey=%q want MSG999", got) + } +} + +func TestMessagesExport_NoInternalEndpoint(t *testing.T) { + var paths []string + var mu sync.Mutex + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + paths = append(paths, r.URL.Path) + mu.Unlock() + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "p.csv") + err := runExport( + "--output", outPath, + "--start-date", "2026-04-02", + "--end-date", "2026-04-15", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + mu.Lock() + defer mu.Unlock() + for _, p := range paths { + if strings.Contains(p, "messages-internal") { + t.Fatalf("internal endpoint used: %s", p) + } + } +} + +func TestMessagesExport_FiltersInQuery(t *testing.T) { + var captured atomic.Value + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if captured.Load() == nil { + captured.Store(r.URL.Query()) + } + _, _ = io.WriteString(w, `{"messages":[]}`) + }) + outPath := filepath.Join(tmpDir, "f.csv") + err := runExport( + "--output", outPath, + "--type", "SMS", + "--status-code", "4000", + "--to", "01011112222", + "--from", "01033334444", + "--group-id", "GRP1", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + q, _ := captured.Load().(url.Values) + if q.Get("type") != "SMS" { + t.Errorf("type=%q", q.Get("type")) + } + if q.Get("statusCode") != "4000" { + t.Errorf("statusCode=%q", q.Get("statusCode")) + } + if q.Get("to") != "01011112222" { + t.Errorf("to=%q", q.Get("to")) + } + if q.Get("from") != "01033334444" { + t.Errorf("from=%q", q.Get("from")) + } + if q.Get("groupId") != "GRP1" { + t.Errorf("groupId=%q", q.Get("groupId")) + } +} + +func TestMessagesExport_StdoutOutput(t *testing.T) { + stderr, _ := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"X1","type":"SMS"}]}`) + }) + // progress=on을 의도적으로 켜서 stderr에만 쓰이는지 검증. + err := runExport("--output", "-", "--format", "jsonl", "--throttle", "100ms", "--progress", "on") + if err != nil { + t.Fatalf("execute: %v", err) + } + if outWriter == nil { + t.Fatal("outWriter nil") + } + stdout := outWriter.(*bytes.Buffer).String() + if !strings.Contains(stdout, `"messageId":"X1"`) { + t.Errorf("stdout missing JSONL output: %q", stdout) + } + // progress는 stderr로만 가야 함 — stdout에는 진행률 텍스트 없어야. + if strings.Contains(stdout, "다운로드 현황") { + t.Errorf("progress leaked into stdout: %q", stdout) + } + // stderr에는 적어도 finalize 한 줄이 들어가야. + if !strings.Contains(stderr.String(), "다운로드") { + t.Errorf("progress not on stderr: %q", stderr.String()) + } +} + +func TestMessagesExport_BOM(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"K","type":"SMS"}]}`) + }) + outPath := filepath.Join(tmpDir, "bom.csv") + err := runExport("--output", outPath, "--bom", "--throttle", "100ms", "--progress", "off") + if err != nil { + t.Fatalf("execute: %v", err) + } + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + if len(data) < 3 || data[0] != 0xEF || data[1] != 0xBB || data[2] != 0xBF { + t.Errorf("missing UTF-8 BOM prefix: % X", data[:min(3, len(data))]) + } +} + +func TestMessagesExport_5xxError(t *testing.T) { + var calls atomic.Int64 + stderr, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + n := calls.Add(1) + if n == 1 { + _, _ = io.WriteString(w, `{"messages":[{"messageId":"A","type":"SMS"}],"nextKey":"K2"}`) + return + } + w.WriteHeader(500) + _, _ = w.Write([]byte(`{"errorCode":"X","errorMessage":"boom"}`)) + }) + + outPath := filepath.Join(tmpDir, "err.csv") + err := runExport("--output", outPath, "--throttle", "100ms", "--progress", "off") + if err == nil { + t.Fatal("expected error from 500") + } + // 첫 페이지 데이터는 디스크에 보존되어야 함. + rows := mustReadCSV(t, outPath) + if len(rows) < 2 { + t.Fatalf("rows=%d want >=2 (header + first page)", len(rows)) + } + if rows[1][0] != "A" { + t.Errorf("first row id=%q want A", rows[1][0]) + } + // stderr에 resume-token 안내. + if !strings.Contains(stderr.String(), "--resume-token") { + t.Errorf("stderr missing resume-token hint: %q", stderr.String()) + } +} + +func TestMessagesExport_EnsureMessageID_Backfill(t *testing.T) { + _, tmpDir := setupMessagesExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + // map 응답인데 record에 messageId 없음 → key로 채워져야. + _, _ = io.WriteString(w, `{"messageList":{"BACKFILL-ID":{"type":"SMS","statusCode":"2000"}}}`) + }) + outPath := filepath.Join(tmpDir, "bf.csv") + if err := runExport("--output", outPath, "--throttle", "100ms", "--progress", "off"); err != nil { + t.Fatalf("execute: %v", err) + } + rows := mustReadCSV(t, outPath) + if len(rows) != 2 { + t.Fatalf("rows=%d", len(rows)) + } + if rows[1][0] != "BACKFILL-ID" { + t.Errorf("messageId=%q want BACKFILL-ID", rows[1][0]) + } +} + +// --- helpers --- + +func mustReadCSV(t *testing.T, path string) [][]string { + t.Helper() + f, err := os.Open(path) + if err != nil { + t.Fatalf("open: %v", err) + } + defer func() { _ = f.Close() }() + r := csv.NewReader(f) + r.FieldsPerRecord = -1 + rows, err := r.ReadAll() + if err != nil { + t.Fatalf("csv read: %v", err) + } + return rows +} + +// extractResumeToken parses the "--resume-token TOKEN" hint from stderr. +func extractResumeToken(stderr string) string { + const marker = "--resume-token " + _, rest, ok := strings.Cut(stderr, marker) + if !ok { + return "" + } + // 토큰은 공백/줄바꿈에서 끝남. + end := strings.IndexAny(rest, " \n\t\r") + if end < 0 { + return strings.TrimSpace(rest) + } + return strings.TrimSpace(rest[:end]) +} + +func resetMessagesExportFlags() { + msgExportFlagOutput = "" + msgExportFlagFormat = "csv" + msgExportFlagThrottle = messagesExportThrottleDefault + msgExportFlagPageSize = messagesExportPageSizeDefault + msgExportFlagMaxPages = 0 + msgExportFlagAppend = false + msgExportFlagBOM = false + msgExportFlagProgress = "auto" + msgExportFlagNoProgress = false + msgExportFlagResumeToken = "" + msgExportFlagStartDate = "" + msgExportFlagEndDate = "" + msgExportFlagType = "" + msgExportFlagStatus = "" + msgExportFlagTo = "" + msgExportFlagFrom = "" + msgExportFlagGroupID = "" + msgExportFlagStartKey = "" +} diff --git a/cmd/messages_test.go b/cmd/messages_test.go index 52995aa..74826a1 100644 --- a/cmd/messages_test.go +++ b/cmd/messages_test.go @@ -163,7 +163,7 @@ func TestMessagesList_JSON(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - var parsed map[string]interface{} + var parsed map[string]any if err := json.Unmarshal([]byte(strings.TrimSpace(buf.String())), &parsed); err != nil { t.Fatalf("output is not valid JSON: %v", err) } diff --git a/cmd/send.go b/cmd/send.go index dd063b2..4b8bb7b 100644 --- a/cmd/send.go +++ b/cmd/send.go @@ -23,10 +23,10 @@ var sendCmd = &cobra.Command{ // Shared flags available to all send subcommands. var ( - sendFlagTo string - sendFlagFrom string - sendFlagText string - sendFlagScheduled string + sendFlagTo string + sendFlagFrom string + sendFlagText string + sendFlagScheduled string sendFlagFile string // CSV file for bulk sending sendFlagSkipValidation bool sendFlagStrict bool @@ -95,10 +95,7 @@ func sendMessages(c *client.Client, msgs []types.Message) error { batchNum := 0 for start := 0; start < len(msgs); start += maxBatchSize { - end := start + maxBatchSize - if end > len(msgs) { - end = len(msgs) - } + end := min(start+maxBatchSize, len(msgs)) batch := msgs[start:end] batchNum++ @@ -271,6 +268,3 @@ func resolveFrom(c *client.Client) (string, error) { len(phones), strings.Join(lines, "\n")) } } - -// boolPtr returns a pointer to the given bool value. -func boolPtr(v bool) *bool { return &v } diff --git a/cmd/send_ata.go b/cmd/send_ata.go index 171f4cd..a187f99 100644 --- a/cmd/send_ata.go +++ b/cmd/send_ata.go @@ -74,7 +74,7 @@ func runSendATA(cmd *cobra.Command, args []string) error { Title: sendATAFlagTitle, } if sendATAFlagDisableSms { - kakaoOpts.DisableSms = boolPtr(true) + kakaoOpts.DisableSms = new(true) } var msgs []types.Message diff --git a/cmd/send_bms.go b/cmd/send_bms.go index e6b2260..5938934 100644 --- a/cmd/send_bms.go +++ b/cmd/send_bms.go @@ -100,7 +100,7 @@ func runSendBMS(cmd *cobra.Command, args []string) error { }, } if sendBMSFlagAd { - kakaoOpts.AdFlag = boolPtr(true) + kakaoOpts.AdFlag = new(true) } if sendBMSFlagFree { @@ -123,7 +123,7 @@ func runSendBMS(cmd *cobra.Command, args []string) error { } if sendBMSFlagAdult { - kakaoOpts.BMS.Adult = boolPtr(true) + kakaoOpts.BMS.Adult = new(true) } buttons, err := buildBMSButtons() diff --git a/cmd/send_rcs.go b/cmd/send_rcs.go index 16a30bb..6a4b98a 100644 --- a/cmd/send_rcs.go +++ b/cmd/send_rcs.go @@ -70,7 +70,7 @@ func runSendRCS(cmd *cobra.Command, args []string) error { rcsOpts.TemplateID = sendRCSFlagTemplateID } if sendRCSFlagCopyAllowed { - rcsOpts.CopyAllowed = boolPtr(true) + rcsOpts.CopyAllowed = new(true) } // Handle image upload for RCS diff --git a/cmd/send_test.go b/cmd/send_test.go index 9d0aecd..dd9084f 100644 --- a/cmd/send_test.go +++ b/cmd/send_test.go @@ -474,7 +474,7 @@ func TestSendSMS_JSONOutput(t *testing.T) { output := buf.String() // JSON output should be valid JSON - var parsed map[string]interface{} + var parsed map[string]any if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { t.Fatalf("output is not valid JSON: %v\noutput: %s", err, output) } @@ -907,7 +907,7 @@ func TestSendMessages_BatchSplit(t *testing.T) { // Build 5 messages and call sendMessages directly. var msgs []types.Message - for i := 0; i < 5; i++ { + for i := range 5 { msgs = append(msgs, types.Message{ To: fmt.Sprintf("0101111%04d", i), From: "01012345678", @@ -1417,7 +1417,7 @@ func TestSendMessages_ExactlyMaxBatch(t *testing.T) { // Exactly maxBatchSize (5) messages: should result in exactly 1 API call. var msgs []types.Message - for i := 0; i < 5; i++ { + for i := range 5 { msgs = append(msgs, types.Message{ To: fmt.Sprintf("0101111%04d", i), From: "01012345678", @@ -1489,7 +1489,7 @@ func TestSendMessages_MultipleBatches(t *testing.T) { // 7 messages with maxBatchSize=3 should yield 3 API calls: 3 + 3 + 1. var msgs []types.Message - for i := 0; i < 7; i++ { + for i := range 7 { msgs = append(msgs, types.Message{ To: fmt.Sprintf("0101111%04d", i), From: "01012345678", @@ -1979,7 +1979,7 @@ func TestSendATA_JSONOutput(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - var parsed map[string]interface{} + var parsed map[string]any if err := json.Unmarshal([]byte(strings.TrimSpace(buf.String())), &parsed); err != nil { t.Fatalf("output is not valid JSON: %v", err) } diff --git a/cmd/senderid_test.go b/cmd/senderid_test.go index 346d16f..74945e9 100644 --- a/cmd/senderid_test.go +++ b/cmd/senderid_test.go @@ -645,8 +645,8 @@ func TestSenderIDList_EmptyActive(t *testing.T) { t.Errorf("expected PHONE NUMBER header even for empty list, got:\n%s", output) } // Should not contain any phone number data - lines := strings.Split(strings.TrimSpace(output), "\n") - for _, line := range lines { + lines := strings.SplitSeq(strings.TrimSpace(output), "\n") + for line := range lines { trimmed := strings.TrimSpace(line) if trimmed == "" { continue @@ -770,8 +770,8 @@ func TestSenderIDList_AllFieldsEmpty(t *testing.T) { // Empty method and expireAt should show "-" // Count occurrences of "-" that are not part of table borders // The phone number row should have "-" for status, method, and expireAt - lines := strings.Split(output, "\n") - for _, line := range lines { + lines := strings.SplitSeq(output, "\n") + for line := range lines { if strings.Contains(line, "01000000000") { // This data row should contain "-" for empty fields dashCount := strings.Count(line, "-") diff --git a/cmd/statistics.go b/cmd/statistics.go new file mode 100644 index 0000000..3dae03b --- /dev/null +++ b/cmd/statistics.go @@ -0,0 +1,16 @@ +package cmd + +import "github.com/spf13/cobra" + +var statisticsCmd = &cobra.Command{ + Use: "statistics", + Aliases: []string{"stats"}, + Short: "발송 통계를 조회하고 export합니다", + Long: `발송 통계 관련 명령 그룹입니다. + + solactl statistics export-daily 일별 통계 내역을 CSV/JSON/JSONL로 export`, +} + +func init() { + rootCmd.AddCommand(statisticsCmd) +} diff --git a/cmd/statistics_export_daily.go b/cmd/statistics_export_daily.go new file mode 100644 index 0000000..a3a64d6 --- /dev/null +++ b/cmd/statistics_export_daily.go @@ -0,0 +1,448 @@ +package cmd + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "sort" + "strconv" + "time" + + "github.com/spf13/cobra" + + "github.com/solapi/solactl/pkg/client" + "github.com/solapi/solactl/pkg/clock" + "github.com/solapi/solactl/pkg/exporter" + "github.com/solapi/solactl/pkg/output" + "github.com/solapi/solactl/pkg/progress" +) + +// 상한과 기본값. statistics/daily 엔드포인트는 messages보다 보수적으로 설정. +const ( + statisticsExportPageSizeMax = 100 + statisticsExportPageSizeDefault = 30 + statisticsExportThrottleDefault = 500 * time.Millisecond +) + +var ( + statsExportFlagOutput string + statsExportFlagFormat string + statsExportFlagThrottle time.Duration + statsExportFlagPageSize int + statsExportFlagMaxPages int + statsExportFlagAppend bool + statsExportFlagBOM bool + statsExportFlagProgress string + statsExportFlagNoProgress bool + statsExportFlagResumeToken string + + statsExportFlagStartDate string + statsExportFlagEndDate string + statsExportFlagPrepaid string + statsExportFlagOffset int +) + +var statisticsExportDailyCmd = &cobra.Command{ + Use: "export-daily", + Short: "일별 발송 통계를 CSV/JSON/JSONL로 export합니다", + Long: `일별 발송 통계를 파일로 export합니다. + +7일을 초과하는 기간은 자동으로 1일 단위 윈도우로 분할되어 streaming됩니다. +페이지 사이 --throttle 만큼 sleep을 두어 통계 서버 부하를 줄입니다. +6개월(180일) 이전 데이터는 자동 삭제되므로 조회할 수 없습니다. + +CSV는 count.* 키의 합집합을 헤더로 사용합니다(union-header). +모든 윈도우/페이지의 record를 수집한 뒤 단일 헤더로 일괄 출력합니다. +Ctrl+C로 중단하면 stderr에 재개용 --resume-token이 출력됩니다.`, + RunE: runStatisticsExportDaily, +} + +func init() { + f := statisticsExportDailyCmd.Flags() + f.StringVar(&statsExportFlagOutput, "output", "", "출력 파일 경로 (필수, '-'는 stdout)") + f.StringVar(&statsExportFlagFormat, "format", "csv", "출력 포맷 (csv|json|jsonl)") + f.DurationVar(&statsExportFlagThrottle, "throttle", statisticsExportThrottleDefault, "페이지/윈도우 호출 사이 sleep (최소 100ms)") + f.IntVar(&statsExportFlagPageSize, "page-size", statisticsExportPageSizeDefault, fmt.Sprintf("페이지당 건수 (최대 %d)", statisticsExportPageSizeMax)) + f.IntVar(&statsExportFlagMaxPages, "max-pages", 0, "전체 페이지 상한 (0=무제한)") + f.BoolVar(&statsExportFlagAppend, "append", false, "기존 파일에 이어쓰기 (헤더 검증)") + f.BoolVar(&statsExportFlagBOM, "bom", false, "UTF-8 BOM 추가 (Windows Excel 한글)") + f.StringVar(&statsExportFlagProgress, "progress", "auto", "진행률 표시 모드 (auto|on|off)") + f.BoolVar(&statsExportFlagNoProgress, "no-progress", false, "--progress=off와 동일") + f.StringVar(&statsExportFlagResumeToken, "resume-token", "", "중단된 export 재개 토큰") + f.StringVar(&statsExportFlagStartDate, "start-date", "", "시작 날짜 (ISO 8601, 6개월 이내, 필수)") + f.StringVar(&statsExportFlagEndDate, "end-date", "", "종료 날짜 (ISO 8601, 필수)") + f.StringVar(&statsExportFlagPrepaid, "prepaid", "", "선불/후불 필터 (true|false). 미지정 시 전체") + f.IntVar(&statsExportFlagOffset, "offset", 0, "단일 윈도우 내 재개용 offset (resume-token 권장)") + _ = statisticsExportDailyCmd.MarkFlagRequired("output") + statisticsCmd.AddCommand(statisticsExportDailyCmd) +} + +// statisticsCSVFixedHeaders는 모든 응답에서 보장되는 고정 prefix 컬럼. +// count.* 키들은 union-header로 동적 suffix로 추가된다. +var statisticsCSVFixedHeaders = []string{ + "date", "accountId", "prepaid", "balance", "point", "profit", "refundBalance", "refundPoint", +} + +// dailyStatRecord는 statistics/daily 응답 한 항목의 부분 디코딩. +type dailyStatRecord struct { + Date string `json:"date"` + AccountID string `json:"accountId"` + Prepaid *bool `json:"prepaid,omitempty"` + Balance float64 `json:"balance"` + Point float64 `json:"point"` + Profit float64 `json:"profit"` + Count map[string]any `json:"count"` + Refund *struct { + Balance float64 `json:"balance"` + Point float64 `json:"point"` + } `json:"refund,omitempty"` +} + +func (r *dailyStatRecord) fixedRow() []string { + prepaid := "" + if r.Prepaid != nil { + if *r.Prepaid { + prepaid = "true" + } else { + prepaid = "false" + } + } + refBal, refPt := "", "" + if r.Refund != nil { + refBal = strconv.FormatFloat(r.Refund.Balance, 'f', -1, 64) + refPt = strconv.FormatFloat(r.Refund.Point, 'f', -1, 64) + } + return []string{ + r.Date, r.AccountID, prepaid, + strconv.FormatFloat(r.Balance, 'f', -1, 64), + strconv.FormatFloat(r.Point, 'f', -1, 64), + strconv.FormatFloat(r.Profit, 'f', -1, 64), + refBal, refPt, + } +} + +func runStatisticsExportDaily(_ *cobra.Command, _ []string) error { + // 1. progress 모드. + mode, err := resolveProgressMode(statsExportFlagProgress, statsExportFlagNoProgress) + if err != nil { + return err + } + + // 2. start/end 필수 검증 (messages와 다름 — 기본값 없음). + if statsExportFlagStartDate == "" { + return errors.New("--start-date는 필수입니다") + } + if statsExportFlagEndDate == "" { + return errors.New("--end-date는 필수입니다") + } + + // 3. prepaid 검증. + if err := validatePrepaidFlag(statsExportFlagPrepaid); err != nil { + return err + } + + // 4. format 검증. + format := statsExportFlagFormat + if format != "csv" && format != "json" && format != "jsonl" { + return fmt.Errorf("잘못된 --format 값: %s (csv|json|jsonl)", format) + } + if format == "json" && statsExportFlagAppend { + return errors.New("--format json은 --append와 함께 사용할 수 없습니다 (json array는 append-safe 불가)") + } + + // 5. resume-token / offset 결정. + startWindowDate, initialState, err := resolveStatisticsResumeState(statsExportFlagResumeToken, statsExportFlagOffset) + if err != nil { + return err + } + + // 6. 날짜 파싱. + startDate, err := parseExportDate(statsExportFlagStartDate) + if err != nil { + return fmt.Errorf("--start-date 파싱 실패: %w", err) + } + endDate, err := parseExportDate(statsExportFlagEndDate) + if err != nil { + return fmt.Errorf("--end-date 파싱 실패: %w", err) + } + + now := time.Now().UTC() + + // 7. page-size / throttle / 날짜 범위 사전 검증. + if err := exporter.ValidatePageSize("statistics export-daily --page-size", statsExportFlagPageSize, statisticsExportPageSizeMax); err != nil { + return err + } + if err := exporter.ValidateThrottle(statsExportFlagThrottle, exporter.MinThrottle); err != nil { + return err + } + if _, err := exporter.ValidateDateRange(startDate, endDate, now, exporter.DefaultMaxLookbackDays); err != nil { + return err + } + + // 8. 클라이언트 준비. + c, err := newClient() + if err != nil { + return err + } + + // 9. 출력 파일/스트림 오픈. (validation 통과 후에만) + w, closer, err := openExportOutput(statsExportFlagOutput, statsExportFlagAppend) + if err != nil { + return err + } + defer func() { _ = closer.Close() }() + + // 10. RowWriter 생성 (format별). + rw, err := newStatisticsRowWriter(format, w, statsExportFlagOutput, statsExportFlagAppend, statsExportFlagBOM) + if err != nil { + return err + } + + // 11. progress reporter (항상 stderr). + rep := progress.New(progress.Options{Writer: errOut(), Mode: mode}) + + // 12. fetcher 구성. + fetcher := newStatisticsFetcher(c, statsExportFlagPageSize, statsExportFlagPrepaid) + + // 13. exporter.Run. + opts := exporter.Options{ + Now: now, + StartDate: startDate, + EndDate: endDate, + PageSize: statsExportFlagPageSize, + MaxPages: statsExportFlagMaxPages, + Throttle: statsExportFlagThrottle, + Fetcher: fetcher, + Writer: rw, + Reporter: rep, + Clock: clock.Real(), + StartWindowDate: startWindowDate, + InitialState: initialState, + } + result, runErr := exporter.Run(ctx(), opts) + + // 14. format별 종결 처리. + // - CSV: union-header batch write를 위해 FinalizeWrite 호출 (runErr 여부 무관) + // - JSON: array close + var finalizeErr error + switch v := rw.(type) { + case *statisticsCSVRowWriter: + finalizeErr = v.FinalizeWrite() + case *output.JSONArrayWriter: + finalizeErr = v.Close() + } + if runErr == nil && finalizeErr != nil { + runErr = finalizeErr + } + + // 15. 부분 결과 + 재개 안내. + if result.ResumeToken != "" { + _, _ = fmt.Fprintf(errOut(), "\n중단됨. 누적 %d건 처리.\n", result.RecordsWritten) + _, _ = fmt.Fprintf(errOut(), "재개:\n solactl statistics export-daily --output %s --append --resume-token %s\n", + statsExportFlagOutput, result.ResumeToken) + } + return runErr +} + +// resolveStatisticsResumeState: resume-token 우선, 다음으로 offset (단일 윈도우 내). +func resolveStatisticsResumeState(token string, offset int) (string, exporter.PageState, error) { + if token != "" { + tok, err := exporter.DecodeToken(token) + if err != nil { + return "", nil, fmt.Errorf("--resume-token 디코드 실패: %w", err) + } + return tok.Window, tok.State, nil + } + if offset > 0 { + b, err := json.Marshal(map[string]int{"offset": offset}) + if err != nil { + return "", nil, err + } + return "", b, nil + } + return "", nil, nil +} + +// validatePrepaidFlag: 빈 문자열 또는 "true"/"false"만 허용. +func validatePrepaidFlag(v string) error { + switch v { + case "", "true", "false": + return nil + } + return fmt.Errorf("잘못된 --prepaid 값: %s (true|false 또는 미지정)", v) +} + +// newStatisticsFetcher: GET /messages/v4/statistics/daily PageFetcher. +// offset/limit 페이지네이션. len(arr) < pageSize면 윈도우 종료로 간주. +func newStatisticsFetcher(c *client.Client, pageSize int, prepaid string) exporter.PageFetcher { + return func(ctx context.Context, w exporter.Window, state exporter.PageState) (exporter.Page, error) { + params := url.Values{} + params.Set("limit", strconv.Itoa(pageSize)) + params.Set("startDate", w.Start.UTC().Format(time.RFC3339)) + params.Set("endDate", w.End.UTC().Format(time.RFC3339)) + if prepaid != "" { + params.Set("prepaid", prepaid) + } + offset := 0 + if len(state) > 0 { + var st struct { + Offset int `json:"offset"` + } + if err := json.Unmarshal(state, &st); err == nil { + offset = st.Offset + } + } + if offset > 0 { + params.Set("offset", strconv.Itoa(offset)) + } + + raw, err := c.Get(ctx, "messages/v4/statistics/daily", params) + if err != nil { + return exporter.Page{}, err + } + + var arr []json.RawMessage + if err := json.Unmarshal(raw, &arr); err != nil { + return exporter.Page{}, fmt.Errorf("응답 파싱 실패: %w", err) + } + + // 종료 신호: 빈 배열 또는 len < pageSize. + // len == pageSize일 때만 다음 페이지를 가져온다 — 한 번의 추가 빈 호출 회피. + var next exporter.PageState + if len(arr) >= pageSize { + nextOffset := offset + len(arr) + nb, mErr := json.Marshal(map[string]int{"offset": nextOffset}) + if mErr != nil { + return exporter.Page{}, mErr + } + next = nb + } + return exporter.Page{Records: arr, Next: next}, nil + } +} + +// --- RowWriter 구현 --- + +// statisticsCSVRowWriter는 union-header 전략으로 모든 record를 누적 후 FinalizeWrite에서 +// 단일 헤더와 함께 일괄 출력한다. 스트리밍 모드는 헤더가 미정이라 불가능. +type statisticsCSVRowWriter struct { + w io.Writer + appendMode bool + bom bool + appendReader io.ReadCloser + + records []*dailyStatRecord + countKeys map[string]struct{} +} + +func (s *statisticsCSVRowWriter) WriteRecord(rec json.RawMessage) error { + var r dailyStatRecord + if err := json.Unmarshal(rec, &r); err != nil { + return fmt.Errorf("통계 레코드 디코드 실패: %w", err) + } + s.records = append(s.records, &r) + for k := range r.Count { + s.countKeys[k] = struct{}{} + } + return nil +} + +// Flush is intentionally a no-op: CSV는 FinalizeWrite에서 일괄 출력. +func (s *statisticsCSVRowWriter) Flush() error { return nil } + +// FinalizeWrite는 누적된 record를 union header로 한 번에 write한다. +// runErr 발생 후에도 호출되어 부분 결과를 보존한다. +func (s *statisticsCSVRowWriter) FinalizeWrite() error { + if s.appendReader != nil { + defer func() { _ = s.appendReader.Close() }() + } + countKeys := make([]string, 0, len(s.countKeys)) + for k := range s.countKeys { + countKeys = append(countKeys, k) + } + sort.Strings(countKeys) + headers := make([]string, 0, len(statisticsCSVFixedHeaders)+len(countKeys)) + headers = append(headers, statisticsCSVFixedHeaders...) + for _, k := range countKeys { + headers = append(headers, "count_"+k) + } + + cw, err := output.NewCSVWriter(s.w, s.appendReader, output.CSVOptions{ + Headers: headers, + AddBOM: s.bom, + Append: s.appendMode, + StripCtrl: true, + }) + if err != nil { + return err + } + + for _, r := range s.records { + row := append(r.fixedRow(), countCellValues(r.Count, countKeys)...) + if err := cw.WriteRow(row); err != nil { + return err + } + } + return cw.Flush() +} + +// countCellValues returns the cell text for each key from r.Count. +// 누락 키는 빈 셀. +func countCellValues(count map[string]any, keys []string) []string { + cells := make([]string, len(keys)) + for i, k := range keys { + if v, ok := count[k]; ok { + cells[i] = formatNumericCell(v) + } + } + return cells +} + +// formatNumericCell: count 셀의 다양한 JSON 숫자/문자 표현을 string으로 정규화. +func formatNumericCell(v any) string { + switch n := v.(type) { + case float64: + return strconv.FormatFloat(n, 'f', -1, 64) + case int: + return strconv.Itoa(n) + case int64: + return strconv.FormatInt(n, 10) + case json.Number: + return n.String() + case string: + return n + default: + b, _ := json.Marshal(v) + return string(b) + } +} + +func newStatisticsRowWriter(format string, w io.Writer, path string, appendMode, bom bool) (exporter.RowWriter, error) { + switch format { + case "csv": + rw := &statisticsCSVRowWriter{ + w: w, + appendMode: appendMode, + bom: bom, + countKeys: make(map[string]struct{}), + } + if appendMode && path != "-" { + // Append 모드: 기존 헤더는 FinalizeWrite 시점에 검증한다 — union 결정 후에만 비교 가능. + r, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("--append: 기존 파일 열기 실패: %w", err) + } + rw.appendReader = r + } + return rw, nil + case "jsonl": + return output.NewJSONLWriter(w), nil + case "json": + return output.NewJSONArrayWriter(w), nil + } + return nil, fmt.Errorf("지원되지 않는 format: %s", format) +} diff --git a/cmd/statistics_test.go b/cmd/statistics_test.go new file mode 100644 index 0000000..8c13fa9 --- /dev/null +++ b/cmd/statistics_test.go @@ -0,0 +1,794 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "slices" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/solapi/solactl/pkg/client" + "github.com/solapi/solactl/pkg/exporter" +) + +// setupStatisticsExportTest wires up an httptest server, capture buffers, tmp dir. +func setupStatisticsExportTest(t *testing.T, handler http.HandlerFunc) (stderr *bytes.Buffer, tmpDir string) { + t.Helper() + resetFlags() + resetStatisticsExportFlags() + resetPflagChanged(statisticsExportDailyCmd) + + ts := httptest.NewServer(handler) + t.Cleanup(ts.Close) + + c := &client.Client{ + HTTPClient: ts.Client(), + APIKey: "testkey", + APISecret: "testsecret", + MaxRetries: 0, + BaseDelay: time.Millisecond, + BaseURLOverride: ts.URL, + } + clientOverride = c + + var outBuf, errBuf bytes.Buffer + outWriter = &outBuf + errWriter = &errBuf + + tmpDir = t.TempDir() + + t.Cleanup(func() { + clientOverride = nil + outWriter = nil + errWriter = nil + resetStatisticsExportFlags() + resetPflagChanged(statisticsExportDailyCmd) + }) + return &errBuf, tmpDir +} + +// runStatsExport executes the statistics export-daily command. +func runStatsExport(args ...string) error { + full := append([]string{"statistics", "export-daily"}, args...) + rootCmd.SetArgs(full) + return rootCmd.Execute() +} + +// recentDateUTC: ISO date N days before today (UTC) — 활용 가능 범위. +func recentDateUTC(daysAgo int) string { + return time.Now().UTC().AddDate(0, 0, -daysAgo).Format("2006-01-02") +} + +func TestStatisticsExportDaily_CSV_SingleWindow(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + t.Errorf("method=%s want GET", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/messages/v4/statistics/daily") { + t.Errorf("path=%s", r.URL.Path) + } + resp := `[{ + "accountId":"AC1","date":"2026-05-09T00:00:00.000Z", + "count":{"SMS":10,"LMS":5}, + "prepaid":true,"balance":100.5,"point":50,"profit":25, + "refund":{"balance":0,"point":0} + }]` + _, _ = io.WriteString(w, resp) + }) + + outPath := filepath.Join(tmpDir, "out.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), + "--end-date", recentDateUTC(0), + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + rows := mustReadCSV(t, outPath) + if len(rows) != 2 { // header + 1 record + t.Fatalf("rows=%d want 2", len(rows)) + } + wantHeaders := append(slices.Clone(statisticsCSVFixedHeaders), "count_LMS", "count_SMS") + if len(rows[0]) != len(wantHeaders) { + t.Fatalf("header len=%d want %d (%v)", len(rows[0]), len(wantHeaders), rows[0]) + } + for i, h := range wantHeaders { + if rows[0][i] != h { + t.Errorf("header[%d]=%q want %q", i, rows[0][i], h) + } + } + // Row 검증: count_LMS=5, count_SMS=10 등. + rec := rows[1] + headerToIdx := make(map[string]int, len(rows[0])) + for i, h := range rows[0] { + headerToIdx[h] = i + } + if rec[headerToIdx["accountId"]] != "AC1" { + t.Errorf("accountId=%q", rec[headerToIdx["accountId"]]) + } + if rec[headerToIdx["prepaid"]] != "true" { + t.Errorf("prepaid=%q", rec[headerToIdx["prepaid"]]) + } + if rec[headerToIdx["count_SMS"]] != "10" { + t.Errorf("count_SMS=%q want 10", rec[headerToIdx["count_SMS"]]) + } + if rec[headerToIdx["count_LMS"]] != "5" { + t.Errorf("count_LMS=%q want 5", rec[headerToIdx["count_LMS"]]) + } +} + +func TestStatisticsExportDaily_UnionHeader(t *testing.T) { + // 다중 윈도우: 9일 범위 → 9개 윈도우. 호출별로 다른 count 키 셋을 반환하여 union을 검증. + var calls atomic.Int64 + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + n := calls.Add(1) + var body string + switch n { + case 1: + body = `[{"accountId":"A","date":"2026-04-24T00:00:00.000Z","count":{"SMS":10},"balance":0,"point":0,"profit":0}]` + case 5: + body = `[{"accountId":"A","date":"2026-04-28T00:00:00.000Z","count":{"SMS":1,"RCS_SMS":2},"balance":0,"point":0,"profit":0}]` + default: + body = `[]` + } + _, _ = io.WriteString(w, body) + }) + + outPath := filepath.Join(tmpDir, "union.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", "2026-04-24", + "--end-date", "2026-05-03", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + rows := mustReadCSV(t, outPath) + if len(rows) < 3 { // header + 2 records + t.Fatalf("rows=%d want >=3 (%v)", len(rows), rows) + } + // 헤더 marshal: count_RCS_SMS, count_SMS 둘 다 존재. + header := rows[0] + headerSet := make(map[string]int) + for i, h := range header { + headerSet[h] = i + } + if _, ok := headerSet["count_RCS_SMS"]; !ok { + t.Fatalf("union header missing count_RCS_SMS: %v", header) + } + if _, ok := headerSet["count_SMS"]; !ok { + t.Fatalf("union header missing count_SMS: %v", header) + } + // 첫 윈도우 record (count_SMS:10만 있는)의 count_RCS_SMS는 빈 셀. + var firstRecord []string + for i := 1; i < len(rows); i++ { + if rows[i][headerSet["count_SMS"]] == "10" { + firstRecord = rows[i] + break + } + } + if firstRecord == nil { + t.Fatalf("could not find record with count_SMS=10: %v", rows) + } + if firstRecord[headerSet["count_RCS_SMS"]] != "" { + t.Errorf("first window count_RCS_SMS=%q want empty", firstRecord[headerSet["count_RCS_SMS"]]) + } +} + +func TestStatisticsExportDaily_OperationalLogScenario(t *testing.T) { + // 31일 윈도우 split + 각 호출의 path/startDate/endDate 검증. + var ( + mu sync.Mutex + startQs []string + endQs []string + callPath []string + ) + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + callPath = append(callPath, r.URL.Path) + startQs = append(startQs, r.URL.Query().Get("startDate")) + endQs = append(endQs, r.URL.Query().Get("endDate")) + mu.Unlock() + _, _ = io.WriteString(w, `[]`) + }) + + outPath := filepath.Join(tmpDir, "multi.jsonl") + err := runStatsExport( + "--output", outPath, + "--format", "jsonl", + "--start-date", "2026-04-02", + "--end-date", "2026-05-03", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + mu.Lock() + defer mu.Unlock() + if len(callPath) != 31 { + t.Fatalf("calls=%d want 31", len(callPath)) + } + for _, p := range callPath { + if strings.Contains(p, "messages-internal") { + t.Fatalf("internal endpoint used: %s", p) + } + if !strings.HasSuffix(p, "/messages/v4/statistics/daily") { + t.Errorf("unexpected path: %s", p) + } + } + + for i, s := range startQs { + sTime, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("start[%d] parse: %v", i, err) + } + if !sTime.Equal(sTime.Truncate(24 * time.Hour).UTC()) { + t.Errorf("start[%d]=%s not at UTC midnight", i, s) + } + eTime, err := time.Parse(time.RFC3339, endQs[i]) + if err != nil { + t.Fatalf("end[%d] parse: %v", i, err) + } + if d := eTime.Sub(sTime); d != 24*time.Hour { + t.Errorf("window[%d] span=%v want 24h", i, d) + } + if i > 0 && endQs[i-1] != s { + t.Errorf("window[%d] start=%s does not chain from prev end=%s", i, s, endQs[i-1]) + } + } +} + +func TestStatisticsExportDaily_LookbackExceeded(t *testing.T) { + called := atomic.Int64{} + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + called.Add(1) + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "no.csv") + oldDate := time.Now().UTC().AddDate(0, 0, -200).Format("2006-01-02") + err := runStatsExport( + "--output", outPath, + "--start-date", oldDate, + "--end-date", recentDateUTC(0), + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected lookback error") + } + if !errors.Is(err, exporter.ErrLookbackExceeded) { + t.Errorf("err=%v want ErrLookbackExceeded", err) + } + if called.Load() != 0 { + t.Errorf("API called %d times before validation", called.Load()) + } + if _, err := os.Stat(outPath); !errors.Is(err, os.ErrNotExist) { + t.Errorf("file should not be created on validation error: %v", err) + } +} + +func TestStatisticsExportDaily_RequiredDates(t *testing.T) { + // --start-date 누락. + t.Run("missing_start", func(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "ns.csv") + err := runStatsExport( + "--output", outPath, + "--end-date", recentDateUTC(0), + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected --start-date required error") + } + if !strings.Contains(err.Error(), "start-date") { + t.Errorf("error should mention start-date: %v", err) + } + }) + // --end-date 누락. + t.Run("missing_end", func(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "ne.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(2), + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected --end-date required error") + } + if !strings.Contains(err.Error(), "end-date") { + t.Errorf("error should mention end-date: %v", err) + } + }) +} + +func TestStatisticsExportDaily_PageSizeUpperBound(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "nope.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--page-size", "500", + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected page-size error") + } + if !strings.Contains(err.Error(), "exceeds max") { + t.Errorf("unexpected error: %v", err) + } +} + +func TestStatisticsExportDaily_ThrottleMinimum(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "nope.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--throttle", "50ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected throttle error") + } + if !strings.Contains(err.Error(), "throttle") { + t.Errorf("unexpected error: %v", err) + } +} + +func TestStatisticsExportDaily_JSONL(t *testing.T) { + // 8일 범위 → 8개 윈도우 → 호출 8회 → 적어도 2 라인 이상. + var calls atomic.Int64 + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + i := calls.Add(1) + body := fmt.Sprintf(`[{"accountId":"A%d","date":"2026-05-09T00:00:00.000Z","count":{"SMS":%d},"balance":0,"point":0,"profit":0}]`, i, i) + _, _ = io.WriteString(w, body) + }) + + outPath := filepath.Join(tmpDir, "out.jsonl") + startDate := time.Now().UTC().AddDate(0, 0, -8).Format("2006-01-02") + endDate := time.Now().UTC().Format("2006-01-02") + err := runStatsExport( + "--output", outPath, + "--format", "jsonl", + "--start-date", startDate, + "--end-date", endDate, + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + if len(lines) < 2 { + t.Fatalf("expected multiple jsonl lines, got %d", len(lines)) + } + for i, l := range lines { + var probe map[string]any + if err := json.Unmarshal([]byte(l), &probe); err != nil { + t.Fatalf("line %d not valid JSON: %v", i, err) + } + } +} + +func TestStatisticsExportDaily_JSONAppendRejected(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "out.json") + if err := os.WriteFile(outPath, []byte("[]"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--format", "json", "--append", + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected error: json + append not allowed") + } + if !strings.Contains(err.Error(), "--append") { + t.Errorf("error should mention --append: %v", err) + } +} + +func TestStatisticsExportDaily_OffsetPagination(t *testing.T) { + // 단일 윈도우, page-size 2. offset 0: 2개 (full), offset 2: 1개 (partial → 종료). + var calls atomic.Int64 + var capturedOffsets []string + var mu sync.Mutex + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + n := calls.Add(1) + mu.Lock() + capturedOffsets = append(capturedOffsets, r.URL.Query().Get("offset")) + mu.Unlock() + switch n { + case 1: + _, _ = io.WriteString(w, `[ + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}, + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":2},"balance":0,"point":0,"profit":0} + ]`) + case 2: + _, _ = io.WriteString(w, `[ + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":3},"balance":0,"point":0,"profit":0} + ]`) + default: + t.Errorf("unexpected call %d", n) + _, _ = io.WriteString(w, `[]`) + } + }) + + outPath := filepath.Join(tmpDir, "offset.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--page-size", "2", + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + if got := calls.Load(); got != 2 { + t.Fatalf("calls=%d want 2", got) + } + mu.Lock() + defer mu.Unlock() + if len(capturedOffsets) != 2 { + t.Fatalf("captured offsets=%v", capturedOffsets) + } + // 첫 호출은 offset 없음 ("" because 0 is skipped). + if capturedOffsets[0] != "" { + t.Errorf("first call offset=%q want empty", capturedOffsets[0]) + } + // 두번째 호출은 offset=2. + if capturedOffsets[1] != "2" { + t.Errorf("second call offset=%q want 2", capturedOffsets[1]) + } + rows := mustReadCSV(t, outPath) + // 헤더 + 3 records. + if len(rows) != 4 { + t.Fatalf("rows=%d want 4", len(rows)) + } +} + +func TestStatisticsExportDaily_EmptyResultEndsWindow(t *testing.T) { + // page-size 30, 첫 응답에 2개 (< page-size) → 다음 페이지 호출 없음. + var calls atomic.Int64 + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + calls.Add(1) + _, _ = io.WriteString(w, `[ + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}, + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":2},"balance":0,"point":0,"profit":0} + ]`) + }) + + outPath := filepath.Join(tmpDir, "empty.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + if got := calls.Load(); got != 1 { + t.Fatalf("calls=%d want 1 (partial page should end window)", got) + } +} + +func TestStatisticsExportDaily_PrepaidFilter(t *testing.T) { + t.Run("valid_true", func(t *testing.T) { + var captured atomic.Value + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if captured.Load() == nil { + captured.Store(r.URL.Query().Get("prepaid")) + } + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "p.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--prepaid", "true", + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + got, _ := captured.Load().(string) + if got != "true" { + t.Errorf("prepaid query=%q want true", got) + } + }) + t.Run("valid_false", func(t *testing.T) { + var captured atomic.Value + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if captured.Load() == nil { + captured.Store(r.URL.Query().Get("prepaid")) + } + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "p2.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--prepaid", "false", + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + got, _ := captured.Load().(string) + if got != "false" { + t.Errorf("prepaid query=%q want false", got) + } + }) + t.Run("invalid", func(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "p3.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--prepaid", "bogus", + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected --prepaid invalid value error") + } + if !strings.Contains(err.Error(), "prepaid") { + t.Errorf("error should mention prepaid: %v", err) + } + }) + t.Run("empty_omitted_from_query", func(t *testing.T) { + var hasPrepaid atomic.Bool + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Has("prepaid") { + hasPrepaid.Store(true) + } + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "p4.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + if hasPrepaid.Load() { + t.Errorf("prepaid query should be absent when flag is empty") + } + }) +} + +func TestStatisticsExportDaily_NoInternalEndpoint(t *testing.T) { + var paths []string + var mu sync.Mutex + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + paths = append(paths, r.URL.Path) + mu.Unlock() + _, _ = io.WriteString(w, `[]`) + }) + outPath := filepath.Join(tmpDir, "p.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", "2026-04-15", + "--end-date", "2026-04-22", + "--throttle", "100ms", + "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + mu.Lock() + defer mu.Unlock() + if len(paths) == 0 { + t.Fatal("no API calls made") + } + for _, p := range paths { + if strings.Contains(p, "messages-internal") { + t.Fatalf("internal endpoint used: %s", p) + } + if !strings.HasSuffix(p, "/messages/v4/statistics/daily") { + t.Errorf("unexpected path: %s", p) + } + } +} + +func TestStatisticsExportDaily_ResumeToken(t *testing.T) { + // 첫 라운드: 두번째 호출에서 5xx로 중단 → resume-token 발급. + // 재호출: 동일 윈도우/offset에서 진행. + var calls atomic.Int64 + stderr, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + n := calls.Add(1) + switch n { + case 1: + // page-size=2짜리 full page → next offset=2. + _, _ = io.WriteString(w, `[ + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}, + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":2},"balance":0,"point":0,"profit":0} + ]`) + case 2: + w.WriteHeader(500) + _, _ = w.Write([]byte(`{"errorCode":"X","errorMessage":"boom"}`)) + default: + // 재개 단계: 종결. + _, _ = io.WriteString(w, `[{"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":3},"balance":0,"point":0,"profit":0}]`) + } + }) + + outPath := filepath.Join(tmpDir, "resume.csv") + startDate := recentDateUTC(1) + endDate := recentDateUTC(0) + err := runStatsExport( + "--output", outPath, + "--start-date", startDate, "--end-date", endDate, + "--page-size", "2", + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected error from second-call 500") + } + se := stderr.String() + if !strings.Contains(se, "--resume-token") { + t.Fatalf("stderr should contain resume token hint:\n%s", se) + } + tok := extractResumeToken(se) + if tok == "" { + t.Fatalf("no token in stderr:\n%s", se) + } + + // 두번째 라운드 - append + resume-token. + resetStatisticsExportFlags() + resetPflagChanged(statisticsExportDailyCmd) + err = runStatsExport( + "--output", outPath, "--append", + "--start-date", startDate, "--end-date", endDate, + "--page-size", "2", + "--resume-token", tok, + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("resume execute: %v", err) + } +} + +func TestStatisticsExportDaily_StdoutOutput(t *testing.T) { + stderr, _ := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[{"accountId":"X","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}]`) + }) + err := runStatsExport( + "--output", "-", + "--format", "jsonl", + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--throttle", "100ms", "--progress", "on", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + if outWriter == nil { + t.Fatal("outWriter nil") + } + stdout := outWriter.(*bytes.Buffer).String() + if !strings.Contains(stdout, `"accountId":"X"`) { + t.Errorf("stdout missing JSONL output: %q", stdout) + } + // progress는 stderr로만. + if strings.Contains(stdout, "다운로드 현황") { + t.Errorf("progress leaked into stdout: %q", stdout) + } + if !strings.Contains(stderr.String(), "다운로드") { + t.Errorf("progress not on stderr: %q", stderr.String()) + } +} + +func TestStatisticsExportDaily_BOM(t *testing.T) { + _, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + _, _ = io.WriteString(w, `[{"accountId":"X","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}]`) + }) + outPath := filepath.Join(tmpDir, "bom.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--bom", + "--throttle", "100ms", "--progress", "off", + ) + if err != nil { + t.Fatalf("execute: %v", err) + } + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("read: %v", err) + } + if len(data) < 3 || data[0] != 0xEF || data[1] != 0xBB || data[2] != 0xBF { + t.Errorf("missing UTF-8 BOM prefix: % X", data[:min(3, len(data))]) + } +} + +func TestStatisticsExportDaily_5xxError_PartialDataPreserved(t *testing.T) { + // 한 윈도우에서 첫 페이지 성공 + 두번째 페이지 5xx → 첫 페이지 데이터는 디스크에 보존. + var calls atomic.Int64 + stderr, tmpDir := setupStatisticsExportTest(t, func(w http.ResponseWriter, _ *http.Request) { + n := calls.Add(1) + if n == 1 { + _, _ = io.WriteString(w, `[ + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":1},"balance":0,"point":0,"profit":0}, + {"accountId":"A","date":"2026-05-09T00:00:00.000Z","count":{"SMS":2},"balance":0,"point":0,"profit":0} + ]`) + return + } + w.WriteHeader(500) + _, _ = w.Write([]byte(`{"errorCode":"X","errorMessage":"boom"}`)) + }) + + outPath := filepath.Join(tmpDir, "err.csv") + err := runStatsExport( + "--output", outPath, + "--start-date", recentDateUTC(1), "--end-date", recentDateUTC(0), + "--page-size", "2", + "--throttle", "100ms", "--progress", "off", + ) + if err == nil { + t.Fatal("expected error from 500") + } + // 첫 페이지 데이터는 디스크에 (FinalizeWrite가 runErr 후에도 호출됨). + rows := mustReadCSV(t, outPath) + if len(rows) < 3 { // header + 2 records + t.Fatalf("rows=%d want >=3 (header + first page records preserved)", len(rows)) + } + if !strings.Contains(stderr.String(), "--resume-token") { + t.Errorf("stderr missing resume-token hint: %q", stderr.String()) + } +} + +func resetStatisticsExportFlags() { + statsExportFlagOutput = "" + statsExportFlagFormat = "csv" + statsExportFlagThrottle = statisticsExportThrottleDefault + statsExportFlagPageSize = statisticsExportPageSizeDefault + statsExportFlagMaxPages = 0 + statsExportFlagAppend = false + statsExportFlagBOM = false + statsExportFlagProgress = "auto" + statsExportFlagNoProgress = false + statsExportFlagResumeToken = "" + statsExportFlagStartDate = "" + statsExportFlagEndDate = "" + statsExportFlagPrepaid = "" + statsExportFlagOffset = 0 +} diff --git a/internal/version/semver.go b/internal/version/semver.go index 28c3978..a3d65dd 100644 --- a/internal/version/semver.go +++ b/internal/version/semver.go @@ -95,10 +95,7 @@ func comparePrereleaseIdentifiers(a, b string) int { aParts := strings.Split(a, ".") bParts := strings.Split(b, ".") - limit := len(aParts) - if len(bParts) < limit { - limit = len(bParts) - } + limit := min(len(bParts), len(aParts)) for i := 0; i < limit; i++ { aNum, aIsNum := strconv.Atoi(aParts[i]) diff --git a/pkg/apierror/apierror_test.go b/pkg/apierror/apierror_test.go index e7f26ea..1e0e705 100644 --- a/pkg/apierror/apierror_test.go +++ b/pkg/apierror/apierror_test.go @@ -460,7 +460,7 @@ func TestClassify_Concurrent(t *testing.T) { wg.Add(goroutines) results := make([]*ClassifiedError, goroutines) - for i := 0; i < goroutines; i++ { + for i := range goroutines { go func(idx int) { defer wg.Done() results[idx] = Classify(apiErr) diff --git a/pkg/auth/hmac_test.go b/pkg/auth/hmac_test.go index 5069312..97c5f34 100644 --- a/pkg/auth/hmac_test.go +++ b/pkg/auth/hmac_test.go @@ -238,7 +238,7 @@ func TestGenerateAuthorization_Concurrent(t *testing.T) { wg.Add(goroutines) errs := make(chan error, goroutines) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer wg.Done() result, err := GenerateAuthorization("NCS1234567890ABC", "01234567890123456789012345678901") diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go index 492091e..98cefd3 100644 --- a/pkg/client/client_test.go +++ b/pkg/client/client_test.go @@ -673,7 +673,7 @@ func TestGet_Concurrent(t *testing.T) { results := make([]json.RawMessage, goroutines) wg.Add(goroutines) - for i := 0; i < goroutines; i++ { + for i := range goroutines { go func(idx int) { defer wg.Done() results[idx], errs[idx] = directGet(context.Background(), c, ts.URL+"/test", nil) diff --git a/pkg/clock/clock.go b/pkg/clock/clock.go new file mode 100644 index 0000000..fa86f66 --- /dev/null +++ b/pkg/clock/clock.go @@ -0,0 +1,104 @@ +// Package clock abstracts wall-clock access so throttling and retry logic +// can be exercised deterministically in tests. +package clock + +import ( + "context" + "sync" + "time" +) + +// Clock abstracts time so exporter throttling can be tested deterministically. +type Clock interface { + // Now returns the current time. + Now() time.Time + // Sleep blocks for d or until ctx is done. Returns ctx.Err() if canceled, + // nil if the full duration elapsed. d <= 0 returns immediately (nil). + Sleep(ctx context.Context, d time.Duration) error +} + +// realClock is the stdlib-backed Clock implementation. +type realClock struct{} + +// Real returns a Clock backed by time.Now and time.NewTimer. +func Real() Clock { + return realClock{} +} + +// Now returns the current wall-clock time. +func (realClock) Now() time.Time { + return time.Now() +} + +// Sleep blocks for d or until ctx is done. +func (realClock) Sleep(ctx context.Context, d time.Duration) error { + if d <= 0 { + return nil + } + // 이미 취소된 컨텍스트는 타이머 생성을 건너뛰고 즉시 반환한다. + if err := ctx.Err(); err != nil { + return err + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// Fake is a controllable clock for tests. +// Fake methods are safe for concurrent use. +type Fake struct { + mu sync.Mutex + now time.Time + sleeps []time.Duration +} + +// NewFake creates a Fake clock at the given instant. +func NewFake(now time.Time) *Fake { + return &Fake{now: now} +} + +// Now returns the current fake time. +func (f *Fake) Now() time.Time { + f.mu.Lock() + defer f.mu.Unlock() + return f.now +} + +// Sleep records the duration in Sleeps() and advances the clock by d. +// If ctx is already canceled, returns ctx.Err() without recording or advancing. +// d <= 0 returns immediately, no record. +func (f *Fake) Sleep(ctx context.Context, d time.Duration) error { + // 컨텍스트 취소를 먼저 검사하여 d<=0과의 우선순위를 분명히 한다. + if err := ctx.Err(); err != nil { + return err + } + if d <= 0 { + return nil + } + f.mu.Lock() + f.sleeps = append(f.sleeps, d) + f.now = f.now.Add(d) + f.mu.Unlock() + return nil +} + +// Sleeps returns a copy of the recorded sleep durations in call order. +func (f *Fake) Sleeps() []time.Duration { + f.mu.Lock() + defer f.mu.Unlock() + out := make([]time.Duration, len(f.sleeps)) + copy(out, f.sleeps) + return out +} + +// Advance moves the fake clock forward without recording a sleep. +func (f *Fake) Advance(d time.Duration) { + f.mu.Lock() + f.now = f.now.Add(d) + f.mu.Unlock() +} diff --git a/pkg/clock/clock_test.go b/pkg/clock/clock_test.go new file mode 100644 index 0000000..4b5d272 --- /dev/null +++ b/pkg/clock/clock_test.go @@ -0,0 +1,239 @@ +package clock + +import ( + "context" + "errors" + "sync" + "testing" + "time" +) + +func TestReal_Sleep_ZeroOrNegative(t *testing.T) { + t.Parallel() + c := Real() + cases := []struct { + name string + d time.Duration + }{ + {"zero", 0}, + {"negative", -1 * time.Second}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + start := time.Now() + if err := c.Sleep(context.Background(), tc.d); err != nil { + t.Fatalf("Sleep returned err: %v", err) + } + if elapsed := time.Since(start); elapsed > 50*time.Millisecond { + t.Fatalf("Sleep with d=%v took too long: %v", tc.d, elapsed) + } + }) + } +} + +func TestReal_Sleep_Completes(t *testing.T) { + t.Parallel() + c := Real() + const target = 10 * time.Millisecond + start := time.Now() + if err := c.Sleep(context.Background(), target); err != nil { + t.Fatalf("Sleep returned err: %v", err) + } + elapsed := time.Since(start) + // flake 방지를 위해 넓은 범위(5~100ms)를 허용한다. + if elapsed < 5*time.Millisecond || elapsed > 100*time.Millisecond { + t.Fatalf("Sleep elapsed=%v outside [5ms, 100ms]", elapsed) + } +} + +func TestReal_Sleep_CtxCancel_Immediate(t *testing.T) { + t.Parallel() + c := Real() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + start := time.Now() + err := c.Sleep(ctx, 1*time.Second) + elapsed := time.Since(start) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } + if elapsed > 100*time.Millisecond { + t.Fatalf("immediate-cancel Sleep too slow: %v", elapsed) + } +} + +func TestReal_Sleep_CtxCancel_DuringSleep(t *testing.T) { + t.Parallel() + c := Real() + ctx, cancel := context.WithCancel(context.Background()) + const cancelAfter = 50 * time.Millisecond + go func() { + time.Sleep(cancelAfter) + cancel() + }() + start := time.Now() + err := c.Sleep(ctx, 1*time.Second) + elapsed := time.Since(start) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } + // 취소 시점(50ms) 근처에서 반환되어야 한다. 넉넉히 5~500ms 허용. + if elapsed < 5*time.Millisecond || elapsed > 500*time.Millisecond { + t.Fatalf("cancel-during Sleep elapsed=%v outside [5ms, 500ms]", elapsed) + } +} + +func TestFake_Now(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + if got := f.Now(); !got.Equal(t0) { + t.Fatalf("Now()=%v, want %v", got, t0) + } + f.Advance(1 * time.Second) + if got, want := f.Now(), t0.Add(1*time.Second); !got.Equal(want) { + t.Fatalf("Now() after Advance=%v, want %v", got, want) + } +} + +func TestFake_Sleep_ZeroOrNegative(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + cases := []struct { + name string + d time.Duration + }{ + {"zero", 0}, + {"negative", -1 * time.Second}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + f := NewFake(t0) + if err := f.Sleep(context.Background(), tc.d); err != nil { + t.Fatalf("Sleep returned err: %v", err) + } + if got := f.Sleeps(); len(got) != 0 { + t.Fatalf("Sleeps() should be empty, got %v", got) + } + if got := f.Now(); !got.Equal(t0) { + t.Fatalf("Now() must not advance, got %v want %v", got, t0) + } + }) + } +} + +func TestFake_Sleep_Records(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + const d = 500 * time.Millisecond + for range 3 { + if err := f.Sleep(context.Background(), d); err != nil { + t.Fatalf("Sleep returned err: %v", err) + } + } + got := f.Sleeps() + want := []time.Duration{d, d, d} + if len(got) != len(want) { + t.Fatalf("Sleeps len=%d, want %d", len(got), len(want)) + } + for i := range got { + if got[i] != want[i] { + t.Fatalf("Sleeps[%d]=%v, want %v", i, got[i], want[i]) + } + } + if got, want := f.Now(), t0.Add(1500*time.Millisecond); !got.Equal(want) { + t.Fatalf("Now()=%v, want %v", got, want) + } +} + +func TestFake_Sleep_CtxCancel(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := f.Sleep(ctx, 1*time.Second) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } + if got := f.Sleeps(); len(got) != 0 { + t.Fatalf("Sleeps() must be empty, got %v", got) + } + if got := f.Now(); !got.Equal(t0) { + t.Fatalf("Now() must not advance, got %v want %v", got, t0) + } +} + +func TestFake_Advance(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + f.Advance(2 * time.Second) + if got, want := f.Now(), t0.Add(2*time.Second); !got.Equal(want) { + t.Fatalf("Now()=%v, want %v", got, want) + } + if got := f.Sleeps(); len(got) != 0 { + t.Fatalf("Advance must not record sleep, got %v", got) + } +} + +func TestFake_Concurrent(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + const n = 100 + var wg sync.WaitGroup + wg.Add(n * 3) + for range n { + go func() { + defer wg.Done() + _ = f.Sleep(context.Background(), 1*time.Millisecond) + }() + go func() { + defer wg.Done() + _ = f.Now() + }() + go func() { + defer wg.Done() + f.Advance(1 * time.Millisecond) + }() + } + wg.Wait() + // Sleep 호출 횟수만 카운트한다. + if got := len(f.Sleeps()); got != n { + t.Fatalf("Sleeps len=%d, want %d", got, n) + } +} + +func TestFake_SleepsCopy(t *testing.T) { + t.Parallel() + t0 := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + f := NewFake(t0) + if err := f.Sleep(context.Background(), 1*time.Second); err != nil { + t.Fatalf("Sleep returned err: %v", err) + } + got := f.Sleeps() + if len(got) != 1 { + t.Fatalf("initial Sleeps len=%d, want 1", len(got)) + } + // 반환된 슬라이스를 변조해도 내부 상태에 영향이 없어야 한다. + got[0] = 999 * time.Hour + got = append(got, 7*time.Hour) + if len(got) != 2 { + t.Fatalf("local append failed: len=%d", len(got)) + } + again := f.Sleeps() + if len(again) != 1 { + t.Fatalf("after mutation Sleeps len=%d, want 1", len(again)) + } + if again[0] != 1*time.Second { + t.Fatalf("after mutation Sleeps[0]=%v, want 1s", again[0]) + } +} + +// Clock 인터페이스 만족 여부를 컴파일 타임에 검증한다. +var _ Clock = realClock{} +var _ Clock = (*Fake)(nil) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 0fe575b..c6c7e3b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -420,7 +420,7 @@ func TestConcurrent_SaveLoad(t *testing.T) { wg.Add(goroutines) panicCh := make(chan string, goroutines) - for i := 0; i < goroutines; i++ { + for i := range goroutines { if i%2 == 0 { go func(n int) { defer wg.Done() diff --git a/pkg/exporter/exporter.go b/pkg/exporter/exporter.go new file mode 100644 index 0000000..6965ede --- /dev/null +++ b/pkg/exporter/exporter.go @@ -0,0 +1,288 @@ +package exporter + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/solapi/solactl/pkg/clock" + "github.com/solapi/solactl/pkg/progress" +) + +// Defaults applied when the corresponding Options field is zero/empty. +const ( + DefaultMaxLookbackDays = 180 + DefaultMaxWindowDays = 7 + DefaultPageSize = 50 + DefaultThrottle = 500 * time.Millisecond + MinThrottle = 100 * time.Millisecond +) + +// PageState is per-fetcher opaque cursor state. nil = start of window. +type PageState = json.RawMessage + +// Page is one fetched page. Next==nil signals end-of-window. +type Page struct { + Records []json.RawMessage + Next PageState +} + +// PageFetcher fetches one page within w. state==nil means first page in window. +type PageFetcher func(ctx context.Context, w Window, state PageState) (Page, error) + +// RowWriter accepts raw JSON records. Implementations are not required to be +// goroutine-safe — Run serializes all calls. +type RowWriter interface { + WriteRecord(record json.RawMessage) error + Flush() error +} + +// Options configures Run. Required fields: Now, StartDate, EndDate, Fetcher, Writer. +type Options struct { + Now time.Time + StartDate time.Time + EndDate time.Time + PageSize int + MaxPages int + Throttle time.Duration + MaxLookbackDays int + MaxWindowDays int + Fetcher PageFetcher + Writer RowWriter + Reporter progress.Reporter + Clock clock.Clock + + // Resume from a specific window. Empty = start from the first window. + StartWindowDate string + // State for the first page of the resumed window. Used only on the first + // iteration when StartWindowDate matches. + InitialState PageState +} + +// Result is returned by Run. ResumeToken!="" indicates a recoverable interruption. +type Result struct { + RecordsWritten int + ResumeToken string +} + +// ValidatePageSize fails if value > hardMax. value<=0 means "use default" and +// is left for the caller to fill in; the validator only rejects over-limit. +func ValidatePageSize(name string, value, hardMax int) error { + if hardMax <= 0 { + return fmt.Errorf("%s: hardMax must be positive, got %d", name, hardMax) + } + if value > hardMax { + return fmt.Errorf("%s: %d exceeds max %d", name, value, hardMax) + } + return nil +} + +// ValidateThrottle fails if d < minD. minD<=0 → MinThrottle. +// d<=0 is treated as "use default" by Run, so we permit it here as well. +func ValidateThrottle(d, minD time.Duration) error { + if minD <= 0 { + minD = MinThrottle + } + if d <= 0 { + return nil + } + if d < minD { + return fmt.Errorf("throttle %s below minimum %s", d, minD) + } + return nil +} + +// Run drives the windowed pagination loop. See package docs and Options +// fields for semantics. Reporter.Finalize is always called (except on panic). +func Run(ctx context.Context, opts Options) (Result, error) { + if opts.Fetcher == nil { + return Result{}, errors.New("exporter: Fetcher is required") + } + if opts.Writer == nil { + return Result{}, errors.New("exporter: Writer is required") + } + + // Default-fill before any validation that depends on these values. + if opts.MaxLookbackDays <= 0 { + opts.MaxLookbackDays = DefaultMaxLookbackDays + } + if opts.MaxWindowDays <= 0 { + opts.MaxWindowDays = DefaultMaxWindowDays + } + if opts.Throttle <= 0 { + opts.Throttle = DefaultThrottle + } + if opts.Clock == nil { + opts.Clock = clock.Real() + } + if opts.Reporter == nil { + opts.Reporter = progress.New(progress.Options{Mode: progress.ModeOff}) + } + + // Validation (post-default). Note: PageSize<=0 is *allowed* — caller may rely + // on server-side default. ValidatePageSize is a callable helper for callers. + if err := ValidateThrottle(opts.Throttle, MinThrottle); err != nil { + return Result{}, err + } + + effectiveEnd, err := ValidateDateRange(opts.StartDate, opts.EndDate, opts.Now, opts.MaxLookbackDays) + if err != nil { + return Result{}, err + } + + windows := SplitWindows(opts.StartDate, effectiveEnd, opts.MaxWindowDays) + if len(windows) == 0 { + // 검증을 통과했는데 윈도우가 비어 있으면 내부 invariant 위반. + return Result{}, errors.New("exporter: no windows after split (internal invariant)") + } + + // Resume: jump to the window with matching Label(). + skipIdx := 0 + if opts.StartWindowDate != "" { + found := -1 + for i, w := range windows { + if w.Label() == opts.StartWindowDate { + found = i + break + } + } + if found < 0 { + return Result{}, fmt.Errorf("exporter: resume window %q not in split range", opts.StartWindowDate) + } + skipIdx = found + } + + totalWindows := len(windows) + startTime := opts.Clock.Now() + + var ( + recordsWritten int + totalPages int + runErr error + // resumeWindowIdx / resumeState describe the window/state to resume from. + // resumeWindowIdx==-1 ⇒ no resume token (successful completion). + resumeWindowIdx = -1 + resumeState PageState + ) + + // finalize는 모든 종료 경로에서 정확히 1회 호출되도록 함수 끝에서 deferred. + finalize := func() Result { + elapsed := opts.Clock.Now().Sub(startTime) + opts.Reporter.Finalize(runErr, recordsWritten, elapsed) + res := Result{RecordsWritten: recordsWritten} + if resumeWindowIdx >= 0 && resumeWindowIdx < totalWindows { + tok, encErr := EncodeToken(ResumeToken{ + Version: resumeTokenVersion, + Window: windows[resumeWindowIdx].Label(), + State: resumeState, + }) + if encErr == nil { + res.ResumeToken = tok + } else if runErr == nil { + runErr = encErr + } + } + return res + } + +windowLoop: + for i := skipIdx; i < totalWindows; i++ { + w := windows[i] + + // 사이클 진입 전 cancel 점검. + if cerr := ctx.Err(); cerr != nil { + runErr = cerr + resumeWindowIdx = i + resumeState = nil + break + } + + opts.Reporter.WindowStart(i+1, totalWindows, w.Label()) + + var state PageState + if i == skipIdx { + state = opts.InitialState + } + + pageIdx := 0 + for { + if cerr := ctx.Err(); cerr != nil { + runErr = cerr + resumeWindowIdx = i + resumeState = state + break windowLoop + } + + page, ferr := opts.Fetcher(ctx, w, state) + if ferr != nil { + runErr = ferr + resumeWindowIdx = i + resumeState = state + break windowLoop + } + + for _, rec := range page.Records { + if werr := opts.Writer.WriteRecord(rec); werr != nil { + runErr = werr + resumeWindowIdx = i + resumeState = state + break windowLoop + } + recordsWritten++ + } + if ferr := opts.Writer.Flush(); ferr != nil { + runErr = ferr + resumeWindowIdx = i + resumeState = state + break windowLoop + } + + pageIdx++ + totalPages++ + opts.Reporter.PageProgress(i+1, totalWindows, pageIdx, recordsWritten) + + // MaxPages 도달: 다음 페이지를 위해 token 인코딩. 같은 윈도우 내 next state 사용. + if opts.MaxPages > 0 && totalPages >= opts.MaxPages { + if len(page.Next) > 0 { + resumeWindowIdx = i + resumeState = page.Next + } else if i+1 < totalWindows { + // 현재 윈도우는 끝났고 다음 윈도우부터 재개. + resumeWindowIdx = i + 1 + resumeState = nil + } + break windowLoop + } + + if len(page.Next) == 0 { + // 윈도우 종료. nil 또는 빈 RawMessage 모두 종료 신호로 처리해 무한 루프 회피. + break + } + state = page.Next + + // 페이지 사이 throttle. ctx cancel이면 escape. + if serr := opts.Clock.Sleep(ctx, opts.Throttle); serr != nil { + runErr = serr + resumeWindowIdx = i + resumeState = state + break windowLoop + } + } + + opts.Reporter.WindowDone(i+1, totalWindows, w.Label(), recordsWritten) + + // 윈도우 사이 throttle (마지막 윈도우 후에는 sleep 없음). + if i+1 < totalWindows { + if serr := opts.Clock.Sleep(ctx, opts.Throttle); serr != nil { + runErr = serr + resumeWindowIdx = i + 1 + resumeState = nil + break + } + } + } + + return finalize(), runErr +} diff --git a/pkg/exporter/exporter_test.go b/pkg/exporter/exporter_test.go new file mode 100644 index 0000000..cb74a41 --- /dev/null +++ b/pkg/exporter/exporter_test.go @@ -0,0 +1,1014 @@ +package exporter + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/solapi/solactl/pkg/clock" + "github.com/solapi/solactl/pkg/progress" +) + +// --- helpers -------------------------------------------------------------- + +// recordingWriter captures all records written. +type recordingWriter struct { + mu sync.Mutex + records []json.RawMessage + flushed int + writeErr error // 설정되면 매 WriteRecord 호출에서 반환 + flushErr error // 설정되면 매 Flush 호출에서 반환 +} + +func (w *recordingWriter) WriteRecord(rec json.RawMessage) error { + w.mu.Lock() + defer w.mu.Unlock() + if w.writeErr != nil { + return w.writeErr + } + cp := make(json.RawMessage, len(rec)) + copy(cp, rec) + w.records = append(w.records, cp) + return nil +} +func (w *recordingWriter) Flush() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.flushErr != nil { + return w.flushErr + } + w.flushed++ + return nil +} + +// recordingReporter captures all reporter callbacks. +type recordingReporter struct { + mu sync.Mutex + starts []reporterEvent + pages []reporterEvent + dones []reporterEvent + finalizeCalls []finalizeEvent +} + +type reporterEvent struct { + WindowIndex int + Total int + Label string + PageIndex int + Cumulative int +} + +type finalizeEvent struct { + Err error + Cumulative int + Elapsed time.Duration +} + +func (r *recordingReporter) WindowStart(wi, total int, label string) { + r.mu.Lock() + defer r.mu.Unlock() + r.starts = append(r.starts, reporterEvent{WindowIndex: wi, Total: total, Label: label}) +} +func (r *recordingReporter) PageProgress(wi, total, pi, cum int) { + r.mu.Lock() + defer r.mu.Unlock() + r.pages = append(r.pages, reporterEvent{WindowIndex: wi, Total: total, PageIndex: pi, Cumulative: cum}) +} +func (r *recordingReporter) WindowDone(wi, total int, label string, cum int) { + r.mu.Lock() + defer r.mu.Unlock() + r.dones = append(r.dones, reporterEvent{WindowIndex: wi, Total: total, Label: label, Cumulative: cum}) +} +func (r *recordingReporter) Finalize(err error, cum int, elapsed time.Duration) { + r.mu.Lock() + defer r.mu.Unlock() + r.finalizeCalls = append(r.finalizeCalls, finalizeEvent{Err: err, Cumulative: cum, Elapsed: elapsed}) +} + +// fakeFetcher returns canned pages per (window-label, state-key) pair. +// +// pages 키: "