diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
index 404f9595..915629dc 100644
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -2,7 +2,8 @@ name: CI
on:
pull_request_target:
- branches: [master]
+ branches:
+ - '**'
push:
branches:
- master
@@ -12,17 +13,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Set up Go 1.x
- uses: actions/setup-go@v2
- with:
- go-version: ^1.18
-
- name: Check out code into the Go module directory
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 1
ref: ${{ github.event.pull_request.head.sha }}
+ - name: Set up Go 1.x
+ uses: actions/setup-go@v3
+ with:
+ go-version: ^1.18
+
+
- name: Build the binary
run: make build
@@ -30,11 +32,14 @@ jobs:
run: make test_setup
env:
CODE_PATH: /home/runner/code
+ APP_PATH: /home/runner/app
- name: Run tests
run: make test
env:
CODE_PATH: /home/runner/code
+ APP_PATH: /home/runner/app
+ TOOLBOX_PATH: /home/runner/toolbox
- name: Report test coverage to DeepSource
uses: deepsourcelabs/test-coverage-action@master
diff --git a/.gitignore b/.gitignore
index d9e0a2ed..eecd835d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,9 @@ dist
coverage.out
cover.out
+# node
+node_modules
+
+# Editor workspaces
+.idea/
+.vscode/
diff --git a/Makefile b/Makefile
index 35929b8d..263c3bcc 100644
--- a/Makefile
+++ b/Makefile
@@ -5,18 +5,16 @@ build_local:
cd cmd/deepsource && go build -tags static_all -o /tmp/deepsource .
test:
- CGO_ENABLED=0 go test -v ./command/report/tests/... -run TestReportKeyValueWorkflow -count=1
- CGO_ENABLED=0 go test -v ./command/report/tests/... -run TestReportKeyValueFileWorkflow -count=1
- echo "\n====TESTING DEEPSOURCE PACKAGE====\n"
- CGO_ENABLED=0 go test -v ./deepsource/tests/...
- echo "\n====TESTING CONFIG VALIDATOR PACKAGE====\n"
- go test -v ./configvalidator/... -count=1
- echo "\n====CALCULATING TEST COVERAGE FOR ENTIRE PACKAGE====\n"
go test -v -coverprofile=coverage.out -count=1 ./...
test_setup:
- mkdir -p ${CODE_PATH}
+ mkdir -p ${CODE_PATH} ${APP_PATH}
cd ${CODE_PATH} && ls -A1 | xargs rm -rf
git clone https://github.com/deepsourcelabs/cli ${CODE_PATH}
chmod +x /tmp/deepsource
cp ./command/report/tests/dummy/python_coverage.xml /tmp
+ # Setup git user and email on CI.
+ ifeq ($(CI),true)
+ git config --global user.name github-actions
+ git config --global user.email github-actions@github.com
+ endif
diff --git a/analysis/config/config.go b/analysis/config/config.go
new file mode 100644
index 00000000..5a2f3dc2
--- /dev/null
+++ b/analysis/config/config.go
@@ -0,0 +1,98 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/pelletier/go-toml/v2"
+)
+
+type AnalysisRun struct {
+ AnalyzerName string
+ LocalCodePath string
+ ContainerCodePath string
+ DSConfig DSConfig
+ AnalysisFiles []string
+ ExcludedFiles []string
+ TestFiles []string
+}
+
+/* Checks for excluded and test files based on the `exclude_patterns` and `test_patterns`
+ * configured by the user */
+func (r *AnalysisRun) checkExcludedAndTestFiles() (err error) {
+ // Get a list of the files that match the `exclude_patterns` configured in `.deepsource.toml`
+ r.ExcludedFiles, err = r.getMatchingFiles(r.DSConfig.ExcludePatterns)
+ if err != nil {
+ return err
+ }
+
+ // Get a list of the files that match the `test_patterns` configured in `.deepsource.toml`
+ r.TestFiles, err = r.getMatchingFiles(r.DSConfig.TestPatterns)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/* Parses the DeepSource config (.deepsource.toml) located at the root of the project at CODE_PATH as does
+ * .git directory and resolves the config data into AnalysisRun */
+func (r *AnalysisRun) parseDeepSourceConfig() (err error) {
+ r.DSConfig = DSConfig{}
+
+ // Having resolved the project root path, check for the presence of .deepsource.toml in that path
+ // Read it if it exists and throw error if it doesn't
+ if _, err := os.Stat(path.Join(r.LocalCodePath, ".deepsource.toml")); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ fmt.Println("Could not find .deepsource.toml config at project root path:", r.LocalCodePath)
+ // If DeepSource config is not present, keep empty r.DSConfig
+ return nil
+ }
+ }
+
+ // If present, read the DeepSource config and decode it into r.DSConfig
+ config, err := os.ReadFile(path.Join(r.LocalCodePath, ".deepsource.toml"))
+ if err != nil {
+ fmt.Printf("Failed to read the .deepsource.toml config. Error: %s", err)
+ return nil
+ }
+
+ // Unmarshal the []byte config data into struct
+ err = toml.Unmarshal(config, &r.DSConfig)
+ if err != nil {
+ fmt.Printf("Failed to retrieve the .deepsource.toml config data. Error: %s", err)
+ return nil
+ }
+ return err
+}
+
+/* Configures the analysis_config.json file which is used by analyzer to run analysis since it contains
+ * metadata about the files to be analyzed, the test files in the project, the excluded files and analyzer meta */
+func (r *AnalysisRun) ConfigureAnalysis() (*AnalysisConfig, error) {
+ var err error
+ analyzerConfig := &AnalysisConfig{}
+
+ // Parse DeepSource config (.deepsource.toml) in order to get the
+ // configured `exclude_patterns` and `test_patterns`
+ if err = r.parseDeepSourceConfig(); err != nil {
+ return analyzerConfig, err
+ }
+
+ // Get the list of all the files present in the CODE_PATH and are to be analyzed
+ if r.AnalysisFiles, err = readAllFiles(r.LocalCodePath); err != nil {
+ return analyzerConfig, err
+ }
+
+ // Gets the list of files to be excluded from analysis and the test files present
+ // Doesn't return error, just logs it even if the error comes up since it doens't affect the analysis run
+ if err = r.checkExcludedAndTestFiles(); err != nil {
+ return analyzerConfig, nil
+ }
+
+ // Filter out the files to be analyzed by removing the r.ExcludedFiles from them and assign them to r.AnalysisFiles
+ r.filterAnalysisFiles()
+
+ // Format the analysis config data into LSP format of analysis_config.json
+ return r.formatAnalysisConfigToLSP(), nil
+}
diff --git a/analysis/config/format.go b/analysis/config/format.go
new file mode 100644
index 00000000..11c814f5
--- /dev/null
+++ b/analysis/config/format.go
@@ -0,0 +1,36 @@
+package config
+
+import (
+ "github.com/deepsourcelabs/cli/analysis/lsp"
+)
+
+/* Formats the analysis config data in the form of the LSP format as defined in
+ * the `analysis/types.go` */
+func (r *AnalysisRun) formatAnalysisConfigToLSP() *AnalysisConfig {
+ anaConfig := AnalysisConfig{
+ ExcludePatterns: r.DSConfig.ExcludePatterns,
+ TestPatterns: r.DSConfig.TestPatterns,
+ }
+
+ // Store the files, test files and excluded files in the LSP based analysis config
+ for _, file := range r.AnalysisFiles {
+ anaConfig.Files = append(anaConfig.Files, lsp.TextDocumentItem{URI: lsp.DocumentURI(file)})
+ }
+
+ for _, testFile := range r.TestFiles {
+ anaConfig.TestFiles = append(anaConfig.TestFiles, lsp.TextDocumentItem{URI: lsp.DocumentURI(testFile)})
+ }
+
+ for _, excludedFile := range r.ExcludedFiles {
+ anaConfig.ExcludedFiles = append(anaConfig.ExcludedFiles, lsp.TextDocumentItem{URI: lsp.DocumentURI(excludedFile)})
+ }
+
+ // Read analyzer_meta from DeepSource config (.deepsource.toml) and
+ // store the one corresponding to the Analyzer whose check is scheduled in analysis_config.json
+ for _, analyzer := range r.DSConfig.Analyzers {
+ if analyzer.Name == r.AnalyzerName {
+ anaConfig.AnalyzerMeta = analyzer.Meta
+ }
+ }
+ return &anaConfig
+}
diff --git a/analysis/config/types.go b/analysis/config/types.go
new file mode 100644
index 00000000..1e8414fb
--- /dev/null
+++ b/analysis/config/types.go
@@ -0,0 +1,103 @@
+package config
+
+import "github.com/deepsourcelabs/cli/analysis/lsp"
+
+///////////////////////////////////////////////////////
+// DSConfig is the struct for .deepsource.toml file //
+/////////////////////////////////////////////////////
+
+type DSConfig struct {
+ Version int `mapstructure:"version,omitempty" json:"version" toml:"version"`
+ ExcludePatterns []string `mapstructure:"exclude_patterns,omitempty" json:"exclude_patterns,omitempty" toml:"exclude_patterns"`
+ TestPatterns []string `mapstructure:"test_patterns,omitempty" json:"test_patterns,omitempty" toml:"test_patterns"`
+ Analyzers []Analyzer `mapstructure:"analyzers,omitempty" json:"analyzers,omitempty" toml:"analyzers"`
+ Transformers []Transformer `mapstructure:"transformers,omitempty" json:"transformers,omitempty" toml:"transformers"`
+}
+
+type Analyzer struct {
+ Name string `mapstructure:"name,omitempty" json:"name,omitempty" toml:"name"`
+ RuntimeVersion string `mapstructure:"runtime_version,omitempty" json:"runtime_version,omitempty" toml:"runtime_version,omitempty"`
+ Enabled bool `mapstructure:"enabled,omitempty" json:"enabled" toml:"enabled"`
+ DependencyFilePaths []string `mapstructure:"dependency_file_paths,omitempty" json:"dependency_file_paths,omitempty"`
+ Meta interface{} `mapstructure:"meta,omitempty" json:"meta,omitempty" toml:"meta"`
+ Thresholds interface{} `mapstructure:"thresholds,omitempty" json:"thresholds,omitempty"`
+}
+
+type Transformer struct {
+ Name string `mapstructure:"name,omitempty" json:"name,omitempty" toml:"name"`
+ Enabled bool `mapstructure:"enabled,omitempty" json:"enabled,omitempty" toml:"enabled"`
+}
+
+///////////////////////////////////////
+// LSP based Analysis Config Types //
+/////////////////////////////////////
+
+type AnalysisConfig struct {
+ Files []lsp.TextDocumentItem `json:"files"`
+ TestFiles []lsp.TextDocumentItem `json:"test_files"`
+ ExcludedFiles []lsp.TextDocumentItem `json:"excluded_files"`
+ ExcludePatterns []string `json:"exclude_patterns"`
+ TestPatterns []string `json:"test_patterns"`
+ AnalyzerMeta interface{} `json:"analyzer_meta"`
+}
+
+//////////////////////////////////////
+// LSP based Analysis Result Types //
+/////////////////////////////////////
+
+type Namespace struct {
+ Key string `json:"key"`
+ Value float64 `json:"value"`
+}
+
+type Metric struct {
+ MetricCode string `json:"metric_code"`
+ Namespaces []Namespace `json:"namespaces"`
+}
+
+type AnalysisResult struct {
+ Issues []lsp.Diagnostic `json:"issues"`
+ Metrics []Metric `json:"metrics,omitempty"`
+ IsPassed bool `json:"is_passed"`
+ Errors []Error `json:"errors"`
+ // Errors []lsp.Diagnostic `json:"errors"`
+ ExtraData interface{} `json:"extra_data"`
+}
+
+////////////////////////////////////
+// Default Analysis Result Types //
+///////////////////////////////////
+
+type Error struct {
+ HMessage string `json:"hmessage"`
+ Level int `json:"level"`
+}
+
+type Coordinate struct {
+ Line int `json:"line"`
+ Column int `json:"column"`
+}
+
+type Position struct {
+ Begin Coordinate `json:"begin"`
+ End Coordinate `json:"end"`
+}
+
+type Location struct {
+ Path string `json:"path"`
+ Position Position `json:"position"`
+}
+
+type Issue struct {
+ Code string `json:"issue_code"`
+ Title string `json:"issue_text"`
+ Location Location `json:"location"`
+}
+
+type DefaultAnalysisResult struct {
+ Issues []Issue `json:"issues"`
+ Metrics []Metric `json:"metrics,omitempty"`
+ IsPassed bool `json:"is_passed"`
+ Errors []Error `json:"errors"`
+ ExtraData interface{} `json:"extra_data"`
+}
diff --git a/analysis/config/utils.go b/analysis/config/utils.go
new file mode 100644
index 00000000..4fb360c7
--- /dev/null
+++ b/analysis/config/utils.go
@@ -0,0 +1,78 @@
+package config
+
+import (
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/gobwas/glob"
+)
+
+/* Walks the `CODE_PATH` directory and returns all the files other than the ones present
+ * in the .git folder for analysis in the form of a string array */
+func readAllFiles(codePath string) ([]string, error) {
+ fileCount := 0
+
+ allFiles := make([]string, 0)
+
+ err := filepath.Walk(codePath,
+ func(path string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ fileCount++
+
+ /* Check the following before appending to the list of files:
+ * Should not be a directory
+ * The walked file should not be present in .git folder */
+ if !fileInfo.IsDir() && !strings.HasPrefix(path, filepath.Join(codePath, ".git")) {
+ allFiles = append(allFiles, path)
+ }
+ return nil
+ })
+ if err != nil {
+ return allFiles, err
+ }
+ return allFiles, nil
+}
+
+// Returns the slice of files matching certain glob patterns
+func (r *AnalysisRun) getMatchingFiles(patterns []string) ([]string, error) {
+ matchedFiles := make([]string, 0)
+
+ // Return all the files if no exclude_patterns are configured
+ if len(patterns) == 0 {
+ return matchedFiles, nil
+ }
+
+ for _, file := range r.AnalysisFiles {
+ for i := range patterns {
+ g := glob.MustCompile(path.Join(r.LocalCodePath, patterns[i]))
+ if g.Match(file) {
+ matchedFiles = append(matchedFiles, file)
+ }
+ }
+ }
+ return matchedFiles, nil
+}
+
+// Filters the analysis files and removes the files matching the exclude_patterns from them
+// TODO: Improve the logic here
+func (r *AnalysisRun) filterAnalysisFiles() {
+ excluded := false
+ filteredFiles := []string{}
+ for _, file := range r.AnalysisFiles {
+ excluded = false
+ for _, excludedFile := range r.ExcludedFiles {
+ if file == excludedFile {
+ excluded = true
+ break
+ }
+ }
+ if !excluded {
+ filteredFiles = append(filteredFiles, file)
+ }
+ }
+ r.AnalysisFiles = filteredFiles
+}
diff --git a/analysis/lsp/types.go b/analysis/lsp/types.go
new file mode 100644
index 00000000..b42ead68
--- /dev/null
+++ b/analysis/lsp/types.go
@@ -0,0 +1,63 @@
+package lsp
+
+//////////////////////////////
+// Document identity types //
+/////////////////////////////
+
+type DocumentURI string
+
+type TextDocumentItem struct {
+ URI DocumentURI `json:"uri"`
+ LanguageID string `json:"languageID,omitempty"`
+ Version int `json:"version,omitempty"`
+ Text string `json:"text,omitempty"`
+}
+
+///////////////////////////////
+// Diagnostic related types //
+/////////////////////////////
+
+type DiagnosticSeverity int
+
+const (
+ Error DiagnosticSeverity = 1
+ Warning DiagnosticSeverity = 2
+ Information DiagnosticSeverity = 3
+ Hint DiagnosticSeverity = 4
+)
+
+type Position struct {
+ Line int `json:"line"`
+ Character int `json:"character"`
+}
+type Range struct {
+ Start Position `json:"start"`
+ End Position `json:"end"`
+}
+
+type Location struct {
+ URI string `json:"uri"`
+ Range Range `json:"range"`
+}
+
+type DiagnosticRelatedInformation struct {
+ Location Location `json:"location"`
+ Message string `json:"message"`
+}
+
+type Diagnostic struct {
+ Range Range `json:"range"`
+ Severity DiagnosticSeverity `json:"severity,omitempty"`
+ Code string `json:"code,omitempty"`
+ Source string `json:"source,omitempty"`
+ Message string `json:"message"`
+
+ /**
+ * An array of related diagnostic information, e.g. when symbol-names within
+ * a scope collide all definitions can be marked via this property.
+ * var a,b
+ * a := 2
+ * Issues in line 1 and 2 are related.
+ */
+ RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation"`
+}
diff --git a/analysis/processor/batch_process.go b/analysis/processor/batch_process.go
new file mode 100644
index 00000000..00de911d
--- /dev/null
+++ b/analysis/processor/batch_process.go
@@ -0,0 +1,152 @@
+package processor
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "sort"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+)
+
+var (
+ issueIndex int = 0
+ batchSize int = 30
+ maxIssueDensity int = 100
+)
+
+type fileContentNode struct {
+ Filename string
+ FileContent []string
+}
+
+// While this loop looks like it would have a complexity of len(filesWIssueRange) * len(cachedFiles) * issues * len(processorList)
+// it only has a complexity of O(len(report.Issues)).
+// When there are a lot of files to be processed, opening all of them one by one takes time, while the CPU waits idly.
+// Opening all files and loading them into memory is expensive in terms of space, since there could be a lot of files.
+// Hence, opening files concurrently in batches (of, say, 30 files) and then processing all issues in those 30 files one by one
+// appears to be the best option. We cannot process each file's issues concurrently, because only the file loading operation is
+// IO intensive, and the rest is CPU intensive.
+func (p *ReportProcessor) processIssuesBatch(filesWIssueRange []IssueRange, result *types.AnalysisResult, processedIssues *[]types.Issue) {
+ // Process files in batches of `batchSize` to avoid `too many files open` error
+ for processedFiles := 0; processedFiles < len(filesWIssueRange); {
+ filesToProcess := 0
+
+ // The default batch size is 30. If the number of files is less than this batchsize assign their count
+ // as the number of files to process, else assign the batchsize as the number of files to be processed in
+ // this iteration.
+ if len(filesWIssueRange)-processedFiles < batchSize {
+ filesToProcess = len(filesWIssueRange) - processedFiles
+ } else {
+ filesToProcess = batchSize
+ }
+
+ // The slice containing the data about cached files to be processed.
+ cachedFiles := p.cacheFilesToBeProcessed(filesToProcess, processedFiles, filesWIssueRange)
+
+ // Iterate over the cached files data and process the issues present in them.
+ for j, cachedFile := range cachedFiles {
+ for issueIndex < len(result.Issues) {
+ issue := result.Issues[issueIndex] // initialize the loop
+ // Check if the file is a generated one, this happens if enormous amount of issues are
+ // reported in a single file on a single line.
+ if p.isGeneratedFile(processedFiles+j, &cachedFile, filesWIssueRange, result) {
+ continue
+ }
+
+ // Check if the issue is for another file.
+ // If yes, skip this iteration and go to next file.
+ if cachedFile.Filename != issue.Location.Path {
+ break
+ }
+
+ if err := p.runProcessors(cachedFile, &issue, processedIssues); err != nil {
+ fmt.Println(err.Error())
+ }
+ issueIndex++
+ }
+ }
+
+ // Increase total number of files processed
+ processedFiles += filesToProcess
+ }
+}
+
+// runProcessors runs the supported processors on the issue passed as a parameter
+func (p *ReportProcessor) runProcessors(cachedFile fileContentNode, issueToProcess *types.Issue, processedIssues *[]types.Issue) (err error) {
+ // Loop through processors and execute them on the issue passed as a parameter
+ for _, processor := range p.Processors {
+ err = processor.Process(cachedFile.FileContent, issueToProcess, processedIssues)
+ if err != nil {
+ return fmt.Errorf("failed to execute the processor %s with the following error: %s", processor, err)
+ }
+ }
+ return
+}
+
+// If the number of issues in this file is more than a certain number of issues
+// averaged per line, this may be a generated file. Skip processing of further issues
+// in this file
+func (p *ReportProcessor) isGeneratedFile(fileIndex int, cachedFile *fileContentNode, filesWIssueRange []IssueRange, result *types.AnalysisResult) bool {
+ linesInThisFile := len(cachedFile.FileContent) | 1 // bitwise op to ensure no divisionbyzero errs
+ issuesInThisFile := filesWIssueRange[fileIndex].EndIndex - filesWIssueRange[fileIndex].BeginIndex
+ if (issuesInThisFile / linesInThisFile) > maxIssueDensity {
+ log.Printf(
+ "Skipping file %s. Too many issues per line. Lines: %d, issues: %d\n",
+ cachedFile.Filename,
+ linesInThisFile,
+ issuesInThisFile,
+ )
+ result.Errors = append(result.Errors, types.Error{
+ HMessage: fmt.Sprintf(
+ "Skipped file %s because too many issues were raised. "+
+ "Is this a generated file that can be added in [exclude_patterns](https://deepsource.io/docs/config/deepsource-toml.html#exclude-patterns)?",
+ cachedFile.Filename,
+ ),
+ Level: 1,
+ })
+ return true
+ }
+ return false
+}
+
+// cacheBatchOfFiles receives the count of files to be cached and caches them in a batch by spawning goroutines.
+func (p *ReportProcessor) cacheFilesToBeProcessed(totalFiles, processedFiles int, filesWIssueRange []IssueRange) []fileContentNode {
+ fileContentChannel := make(chan fileContentNode, totalFiles)
+ for j := 0; j < totalFiles; j++ {
+ filename := filesWIssueRange[processedFiles+j].Filename
+ go addFileToCache(fileContentChannel, filename)
+ }
+
+ cachedFiles := []fileContentNode{}
+ for j := 0; j < totalFiles; j++ {
+ cachedFiles = append(cachedFiles, <-fileContentChannel)
+ }
+
+ // sort the cached files by filename, because our issues are sorted by filename
+ sort.Slice(cachedFiles, func(i, j int) bool {
+ return cachedFiles[i].Filename < cachedFiles[j].Filename
+ })
+ return cachedFiles
+}
+
+// addFileToCache reads the file and formats its content into a fileContentNode struct instance
+// and passes that to the cachedFilesChannel channel since this function is run on a goroutine.
+func addFileToCache(cachedFilesChannel chan fileContentNode, filename string) {
+ fileContentSlice := []string{}
+
+ fileContentBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ fmt.Println("Could not process for file: ", filename, ". Err: ", err)
+ } else if string(fileContentBytes) != "" {
+ fileContentSlice = strings.Split(string(fileContentBytes), "\n")
+ } else {
+ fileContentSlice = []string{}
+ }
+
+ cachedFilesChannel <- fileContentNode{
+ Filename: filename,
+ FileContent: fileContentSlice,
+ }
+}
diff --git a/analysis/processor/process.go b/analysis/processor/process.go
new file mode 100644
index 00000000..4467276b
--- /dev/null
+++ b/analysis/processor/process.go
@@ -0,0 +1,56 @@
+package processor
+
+import (
+ "github.com/deepsourcelabs/cli/types"
+)
+
+// Processor interface to receive analysis post-processors.
+type IProcessor interface {
+ String() string
+ Process([]string, *types.Issue, *[]types.Issue) error
+}
+
+// ReportProcessor struct contains the processor data needed to process the analysis results.
+type ReportProcessor struct {
+ LocalSourcePath string // The local source code path which was analyzed by the Analyzer.
+ ContainerCodePath string // The codepath set for the Analysis container.
+ Processors []IProcessor // The list of supported post-analysis processors.
+ Report types.AnalyzerReport // The report generated by the Analyzer post analysis.
+}
+
+// ProcessAnalyzerReport accepts the result as a byte array and processes the results in the form of a
+// AnalyzerReport struct instance.
+// It sorts the issues in an alphabetical order of filenames just to ensure that all issues getting
+// reported for the same files come together & processes the issues for the various required processors.
+// As of now, there are two processors supported:
+// - skipcq : Processes the issues and checks if some of them should be ignored since they have
+// been ignored by the user through suitable `skipcq` comments.
+// - source_code_load : Processes the issues for the source code snippets, highlights the snippets
+// and adds them to the Analysis result.
+func (p *ReportProcessor) Process() types.AnalysisResult {
+ // Covert the Analyzer report from LSP based format to the default results format.
+ analysisResult := p.formatLSPResultsToDefault()
+
+ // Check if there are issues reported actually.
+ if len(analysisResult.Issues) <= 0 {
+ return analysisResult
+ }
+
+ // All the files that appear in the issues are now processed by the processors listed in analyzer conf
+ // We must cache the files in order to not do file IO for every processor.
+ p.sortIssuesByFile(&analysisResult)
+
+ // Get the issues to file range data.
+ filesIndex := createIssueFileRange(analysisResult)
+
+ // Iterate over the filesIndex and read the files in batch and process the issues using the suitable processors.
+ processedIssues := []types.Issue{}
+ p.processIssuesBatch(filesIndex, &analysisResult, &processedIssues)
+ analysisResult.Issues = processedIssues
+
+ // Sort again for consistency (mostly for test to pass).
+ p.sortIssuesByFile(&analysisResult)
+
+ // Return the processed analysis result.
+ return analysisResult
+}
diff --git a/analysis/processor/processors/proc_skip_cq.go b/analysis/processor/processors/proc_skip_cq.go
new file mode 100644
index 00000000..529b2ba9
--- /dev/null
+++ b/analysis/processor/processors/proc_skip_cq.go
@@ -0,0 +1,170 @@
+package processors
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+)
+
+type ProcSkipCQ struct{}
+
+func isSimilarIssue(fileExt, skipCQTag, probableIssueCode, issueCode string) bool {
+ // if it is // skipcq: SCC-S1002 or similar plain skipcq tag, return
+ if skipCQTag == "skipcq" {
+ if probableIssueCode != "" {
+ return strings.EqualFold(strings.TrimSpace(probableIssueCode), issueCode)
+ } else {
+ return true
+ }
+ }
+
+ // if the tag to skip CQ is something else, check here
+ for _, silencer := range languagesMeta[fileExt].Silencers {
+ if strings.EqualFold(strings.TrimSpace(skipCQTag), silencer.SilencerCode) {
+ if len(silencer.Issues) == 0 {
+ // if the silencer doesn't have any issues in it, ignore all the issues
+ return true
+ }
+
+ // similarIssues is a comma separated list of issues corresponding to the raised issue
+ similarIssues, ok := silencer.Issues[strings.Split(issueCode, "-")[1]]
+
+ // if the issue is not found in the silencer's issue map, do not ignore the issue
+ if !ok {
+ return false
+ }
+
+ if probableIssueCode == "" {
+ // if there is no specific issue silenced, ignore all issues
+ return true
+ } else {
+ for _, similarIssue := range strings.Split(similarIssues, ",") {
+ // if the silencer has an issue map that contains this particular issue, ignore it
+ if similarIssue == strings.ToUpper(probableIssueCode) {
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ }
+ }
+ return false
+}
+
+func checkSkipCQ(fileExt string, skipCQre regexp.Regexp, line, issueCode string) bool {
+ matches := skipCQre.FindAllStringSubmatch(line, -1)
+ skipCQTag := ""
+
+ if matches == nil {
+ return false
+ }
+
+ ignoreIssue := true
+
+ for i, name := range skipCQre.SubexpNames() {
+ for _, match := range matches {
+ if i != 0 && name != "" {
+ // note the name of the issue silencer and move on
+ if name == "skipcq_tag" {
+ skipCQTag = match[i]
+ } else if name == "issue_codes" {
+ if match[i] != "" {
+ for _, probableIssueCode := range strings.Split(match[i], ",") {
+ // if an issue is to be ignored in this line of code is same as
+ // the issue that is raised, we have to ignore the issue
+
+ if isSimilarIssue(fileExt, skipCQTag, probableIssueCode, issueCode) {
+ ignoreIssue = true
+ // since we are only dealing with one issue at a time
+ // break at the first occurrence
+ return ignoreIssue
+ } else {
+ ignoreIssue = false
+ }
+ }
+ } else {
+ // in case there is no issue code associated, check for just the silencer tag
+ ignoreIssue = isSimilarIssue(fileExt, skipCQTag, "", issueCode)
+ }
+ }
+ }
+ }
+ }
+ return ignoreIssue
+}
+
+/* Check if a given line of code is eligible to be checked for skip CQ.
+ * Bare minimum eligibility is that the line should either be empty or
+ * contain a comment only line. */
+func analyzeLineForSkipCQ(line, fileExt string) bool {
+ var commentIdentifier string
+ for _, langMeta := range languagesMeta {
+ if fileExt == langMeta.Extension {
+ commentIdentifier = langMeta.CommentIdentifier
+ break
+ }
+ }
+
+ line = strings.TrimSpace(line)
+
+ if line == "" {
+ return true
+ }
+
+ match, err := regexp.Match(fmt.Sprintf("^%s", commentIdentifier), []byte(line))
+ return err == nil && match
+}
+
+// Returns the name of the processor
+func (p ProcSkipCQ) String() string {
+ return "skip_cq"
+}
+
+// Process checks if the issue passed as an argument should be skipped or not
+// If it should be skipped, it is not appended to the processedIssues slice while if it is not skipped, it is appended.
+func (p ProcSkipCQ) Process(fileContentSlice []string, issue *types.Issue, processedIssues *[]types.Issue) error {
+ filePath := issue.Location.Path
+ lineStart := issue.Location.Position.Begin.Line
+ issueCode := issue.IssueCode
+
+ if lineStart < 1 || lineStart > len(fileContentSlice) {
+ return fmt.Errorf("issue position is weird for file of %d lines", len(fileContentSlice))
+ }
+
+ shouldSkipCQ := false
+ line := strings.TrimSpace(fileContentSlice[lineStart-1])
+
+ fileSupported := false
+ var fileExt string
+ var skipCQregex regexp.Regexp
+ for re := range languagesMeta {
+ match, err := regexp.Match(re, []byte(filePath))
+ if err != nil || !match {
+ continue
+ }
+ fileSupported = true
+ fileExt = re
+ break
+ }
+ if !fileSupported {
+ fileExt = "default_extension"
+ }
+ skipCQregex = regexMap[fileExt]
+
+ // Skip code quality checks for the given issue code on the lineStart
+ shouldSkipCQ = checkSkipCQ(fileExt, skipCQregex, line, issueCode)
+
+ // Continue looking only if the previous line is a comment-only line specifying the skipcq condition,
+ // the lineStart is not the first line of the file and shouldSkipCQ is not already true
+ for index := lineStart - 2; !shouldSkipCQ && index >= 0 && analyzeLineForSkipCQ(fileContentSlice[index], fileExt); index-- {
+ shouldSkipCQ = checkSkipCQ(fileExt, skipCQregex, fileContentSlice[index], issueCode)
+ }
+
+ if !shouldSkipCQ {
+ *processedIssues = append(*processedIssues, *issue)
+ }
+ return nil
+}
diff --git a/analysis/processor/processors/proc_source_code_load.go b/analysis/processor/processors/proc_source_code_load.go
new file mode 100644
index 00000000..7521e376
--- /dev/null
+++ b/analysis/processor/processors/proc_source_code_load.go
@@ -0,0 +1,225 @@
+package processors
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/alecthomas/chroma/formatters/html"
+ "github.com/alecthomas/chroma/lexers"
+ "github.com/alecthomas/chroma/styles"
+ "github.com/deepsourcelabs/cli/types"
+)
+
+const sourceCodeOffset int = 3
+
+type ProcSourceCodeLoad struct{}
+
+type formattedFile struct {
+ fileContent []string
+ highlightedContent []string
+}
+
+var (
+ lineStartWithOffset, lineEndWithOffset int
+ formattedFileCache map[string]formattedFile = make(map[string]formattedFile)
+)
+
+// We cache the past file's iterator, taking advantage of the fact that
+// issues are sorted according to filenames
+func getFinalFormattedSlice(fileContentSlice []string, issue *types.Issue) formattedFile {
+ filePath := issue.Location.Path
+
+ // Check if the formatted file data is already present in the cache.
+ // If yes, return the cached data.
+ if formattedFileData, ok := formattedFileCache[filePath]; ok {
+ return formattedFileData
+ }
+
+ // Else, clear the cache.
+ for k := range formattedFileCache {
+ delete(formattedFileCache, k)
+ }
+
+ /* ============================================================
+ * Use alecthomas/chroma to generate syntax highlighted snippet
+ * ============================================================ */
+ fileContentString := strings.Join(fileContentSlice, "\n")
+ lexer := lexers.Match(filePath)
+
+ /* Case: In case of .vue files, use the `html` lexer since the `vue` lexer
+ * breaks in certain cases. The `html` lexer provides comparatively better results.
+ * TODO(SNT): Remove this case if vue lexer is improved in future. */
+ if strings.HasSuffix(filePath, ".vue") {
+ lexer = lexers.Get("html")
+ }
+ if lexer == nil {
+ lexer = lexers.Fallback
+ }
+
+ // Tokenize the file content.
+ iterator, err := lexer.Tokenise(nil, fileContentString)
+ if err != nil {
+ log.Println("Could not tokenize file ", filePath)
+ return formattedFile{
+ fileContent: []string{},
+ highlightedContent: []string{},
+ }
+ }
+
+ // Selecting the chroma format in which we expect the output(html) and use the `monokai` colorscheme to highlight the snippet.
+ formatter := html.New(html.WithLineNumbers(true), html.PreventSurroundingPre(true), html.WithClasses(true))
+ style := styles.Get("monokai")
+ if style == nil {
+ style = styles.Fallback
+ }
+
+ var chromaFormattedBytes bytes.Buffer
+ var chromaFormattedString string
+ err = formatter.Format(&chromaFormattedBytes, style, iterator)
+ if err != nil {
+ fmt.Println(err)
+ return formattedFile{
+ fileContent: []string{},
+ highlightedContent: []string{},
+ }
+ }
+
+ // Convert the generated data in bytes to string and also extract the slice containing
+ // all the lines as the contents.
+ chromaFormattedString = chromaFormattedBytes.String()
+ chromaFormattedSlice := strings.Split(chromaFormattedString, "\n")
+
+ // We need to move the trailing span to the previous line in order.
+ for i := range chromaFormattedSlice {
+ if i != 0 && !strings.HasPrefix(chromaFormattedSlice[i], "") {
+ lineStartIndex := strings.Index(chromaFormattedSlice[i], "")
+
+ if lineStartIndex != -1 {
+ chromaFormattedSlice[i-1] += chromaFormattedSlice[i][:lineStartIndex]
+ chromaFormattedSlice[i] = chromaFormattedSlice[i][lineStartIndex:]
+ }
+ }
+ }
+
+ // Highlight all lines in the file.
+ lexer = lexers.Match(filePath)
+ if lexer == nil {
+ lexer = lexers.Fallback
+ }
+ iterator, err = lexer.Tokenise(nil, fileContentString)
+ if err != nil {
+ fmt.Println("Could not tokenize file ", filePath)
+ return formattedFile{
+ fileContent: []string{},
+ highlightedContent: []string{},
+ }
+ }
+
+ // Specifying the highlight range.
+ lineHighlightRange := [][2]int{{1, len(fileContentSlice)}}
+
+ // Format, style and color the snippet.
+ formatter = html.New(html.WithLineNumbers(true), html.PreventSurroundingPre(true), html.WithClasses(true), html.HighlightLines(lineHighlightRange))
+ style = styles.Get("monokai")
+ if style == nil {
+ style = styles.Fallback
+ }
+ var chromaHighlightedBytes bytes.Buffer
+ err = formatter.Format(&chromaHighlightedBytes, style, iterator)
+ if err != nil {
+ fmt.Println(err)
+ return formattedFile{
+ fileContent: []string{},
+ highlightedContent: []string{},
+ }
+ }
+ chromaHighlightedString := chromaHighlightedBytes.String()
+ chromaHighlightedSlice := strings.Split(chromaHighlightedString, "\n")
+
+ /* Correct the span elements in the slice.
+ * Highlighted lines look like this:
+ *
+ * 1importos
+ * 2importrandom# noqa: F401
+ * 3importthis# noqa
+ * 4importsys */
+
+ // We need to move the trailing span to the previous line in order for our replacement logic to work.
+ for i := range chromaHighlightedSlice {
+ if i != 0 && !strings.HasPrefix(chromaHighlightedSlice[i], "") {
+ lineStartIndex := strings.Index(chromaHighlightedSlice[i], "")
+
+ if lineStartIndex != -1 {
+ chromaHighlightedSlice[i-1] += chromaHighlightedSlice[i][:lineStartIndex]
+ chromaHighlightedSlice[i] = chromaHighlightedSlice[i][lineStartIndex:]
+ }
+ }
+ }
+
+ // Create formattedContent variable of `formattedFile` type and return.
+ formattedContent := formattedFile{
+ fileContent: chromaFormattedSlice,
+ highlightedContent: chromaHighlightedSlice,
+ }
+ formattedFileCache[filePath] = formattedContent
+
+ return formattedContent
+}
+
+// Returns the name of the processor
+func (p ProcSourceCodeLoad) String() string {
+ return "source_code_load"
+}
+
+// Process processes the source code to be highlighted using chroma and writes that into the
+// analysis result post highlighting.
+func (p ProcSourceCodeLoad) Process(fileContentSlice []string, issue *types.Issue, _ *[]types.Issue) error {
+ lineStart := issue.Location.Position.Begin.Line
+ lineEnd := issue.Location.Position.End.Line
+
+ // Count lines in the file
+ numLines := len(fileContentSlice)
+
+ // Calculate the line number from where the highlighting should start
+ if lineStart-sourceCodeOffset < 1 {
+ lineStartWithOffset = 1
+ } else {
+ lineStartWithOffset = lineStart - sourceCodeOffset
+ }
+
+ // Calculate the line number from where the highlighting should end
+ if lineEnd+sourceCodeOffset > numLines {
+ lineEndWithOffset = numLines
+ } else {
+ lineEndWithOffset = lineEnd + sourceCodeOffset
+ }
+
+ formattedFileContent := getFinalFormattedSlice(fileContentSlice, issue)
+ chromaFormattedSlice := formattedFileContent.fileContent
+ chromaHighlightedSlice := formattedFileContent.highlightedContent
+
+ finalFormattedSlice := make([]string, 0)
+ finalFormattedSlice = append(finalFormattedSlice, `
`)
+
+ // Get the file slice to write
+ for i := lineStartWithOffset; i <= lineEndWithOffset; i++ {
+ currentLine := chromaFormattedSlice[i-1]
+
+ // for all lines except the last, append a newline
+ if i < lineEndWithOffset {
+ currentLine = currentLine + "\n"
+ }
+
+ // highlight the lines containing the issue
+ // We need not add a \n at the end of highlighted lines, as chroma does it itself
+ if i >= lineStart && i <= lineEnd {
+ currentLine = chromaHighlightedSlice[i-1]
+ }
+ finalFormattedSlice = append(finalFormattedSlice, currentLine)
+ }
+ finalFormattedSlice = append(finalFormattedSlice, "
")
+ issue.ProcessedData.SourceCode.Rendered = strings.Join(finalFormattedSlice, "")
+ return nil
+}
diff --git a/analysis/processor/processors/silencers.go b/analysis/processor/processors/silencers.go
new file mode 100644
index 00000000..7cfc0e5c
--- /dev/null
+++ b/analysis/processor/processors/silencers.go
@@ -0,0 +1,106 @@
+package processors
+
+import (
+ "regexp"
+ "strings"
+)
+
+var languagesMeta map[string]LanguageMeta
+
+// for the extension e.g .py, .go, .js etc build a map of compiled
+// regex with the corresponding comment identifier in regex.
+var regexMap map[string]regexp.Regexp
+
+// Silencers data structure
+type IssueSilencer struct {
+ PortName string `json:"port_name"`
+ SilencerCode string `json:"silencer_code"`
+ Issues map[string]string `json:"issues"`
+ TagSeparator string `json:"tag_separator"`
+}
+
+type LanguageMeta struct {
+ Extension string `json:"extension"`
+ CommentIdentifier string `json:"comment_identifier"`
+ Silencers []IssueSilencer `json:"issue_silencers"`
+}
+
+// Reads the `silencers.json` file present in `/toolbox` directory
+// and makes a LanguageMeta map which helps in processing skipcq
+func prepareSilencersMeta() []LanguageMeta {
+ // Sane default meta to use when failed to read the silencers file.
+ // Also, appended to the every config
+ issuesMeta := []LanguageMeta{
+ {
+ Extension: "default_extension",
+ CommentIdentifier: `(\#|\/\/)`,
+ Silencers: []IssueSilencer{},
+ },
+ }
+ return issuesMeta
+}
+
+// generateRegExp generates the regex expression used for matching the skipcq comment
+func generateRegExp(fileExt string) regexp.Regexp {
+ // Analyzer specific issues silencer tags, with `skipcq` tag
+ skipCQTags := []string{"skipcq"}
+
+ // Different analyzers may have different ending token for issue silencing. eg. Pylint has `=` while
+ // most others have `:`
+ separators := map[string]int{
+ ":": 1, // default, for skipcq
+ }
+ for _, silencer := range languagesMeta[fileExt].Silencers {
+ skipCQTags = append(skipCQTags, silencer.SilencerCode)
+ separators[silencer.TagSeparator] = 1
+ }
+
+ separatorsList := []string{}
+ for k := range separators {
+ separatorsList = append(separatorsList, k)
+ }
+
+ commentIdentifier := languagesMeta[fileExt].CommentIdentifier
+ regex := *regexp.MustCompile(
+ // case-insensitive
+ `(?i)` +
+
+ // group the silencer tags with a name, matching one of the many issue silencers (eg, noqa, nosec)
+ commentIdentifier + `.*?(?P(` + strings.Join(skipCQTags, "|") + `))` +
+
+ // zero or more occurrences of the issue codes, ends at `)?` later
+ `(` +
+
+ // separators, like `:` and `=`
+ `?:(` + strings.Join(separatorsList, "|") + `)[\s]?` +
+
+ // create a group named issue_codes, with pattern similar to PYL-W0614 or SA1022
+ `(?P([A-Z]*-?[A-Z]*[0-9]+(?:,(\s+)?)?)+)` +
+
+ // zero or one occurrences of `: issuecode1, issuecode2` and so on
+ `)?`,
+ )
+ return regex
+}
+
+// generateSilencersRegexMap generates the silencers regex expression used for skipcq processing
+func GenerateSilencersRegexMap() {
+ langMeta := make(map[string]LanguageMeta)
+ silencerRegexMap := make(map[string]regexp.Regexp)
+
+ // Fetch the silencers meta for the Analyzer
+ // Keeping it as the default silencer for now
+ silencersData := prepareSilencersMeta()
+
+ // Mapping the silencers data to the file extension
+ // in the above declared map `langMeta`
+ for _, silencerData := range silencersData {
+ langMeta[silencerData.Extension] = silencerData
+ }
+ languagesMeta = langMeta
+
+ for ext := range langMeta {
+ silencerRegexMap[ext] = generateRegExp(ext)
+ }
+ regexMap = silencerRegexMap
+}
diff --git a/analysis/processor/utils.go b/analysis/processor/utils.go
new file mode 100644
index 00000000..ad013e6f
--- /dev/null
+++ b/analysis/processor/utils.go
@@ -0,0 +1,112 @@
+package processor
+
+import (
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+)
+
+// sortIssuesByFile sorts the issues in an alphabetical order according to the filenames
+// where they got reported.
+func (p *ReportProcessor) sortIssuesByFile(result *types.AnalysisResult) {
+ sort.Slice(result.Issues, func(i, j int) bool {
+ return result.Issues[i].Location.Path < result.Issues[j].Location.Path
+ })
+}
+
+// Prepare a map with unique filenames as key and the issue range for each file as value
+// This is done to ensure fewer loops when processing issues.
+type IssueRange struct {
+ Filename string // The file which has the issues
+ BeginIndex int // Array index in report.Issues where the particular issue starts
+ EndIndex int // Array index in report.Issues where the particular issue ends
+}
+
+// GenerateIssueRangeSlice generates an array containing the issue ranges with respect to files
+// that helps us to go through them and map them to the files where they got reported instead
+// of opening the file for each of them. The generated index looks like this:
+
+// [{analyzer.go 0 0} {autofix_patch.go 1 1} {difftool.go 2 2} {patch.patch 3 5} {proc_skip_cq_test.go 6 6}]
+
+// Here, the first field if filename and the second and third fields are the index range in which the issues reported
+// in these files lie in the sorted AnalyzerReport slice.
+func createIssueFileRange(report types.AnalysisResult) []IssueRange {
+ fileCount := 0 // for 1 file, 0 based indexing
+ issuesRange := []IssueRange{}
+ prevFilename := report.Issues[0].Location.Path
+
+ issueRange := IssueRange{
+ BeginIndex: 0,
+ EndIndex: len(report.Issues) - 1,
+ Filename: prevFilename,
+ }
+ issuesRange = append(issuesRange, issueRange)
+
+ // Iterating over the issues and creating an array containing issues with index data about
+ // the files in which those issues are present
+ for i := 1; i < len(report.Issues); i++ {
+ issue := report.Issues[i]
+ currentFilename := issue.Location.Path
+
+ // TODO: Check when this condition is implied
+ if issue.Location.Position.End.Line == -1 {
+ issue.Location.Position.End.Line = issue.Location.Position.Begin.Line
+ }
+
+ if currentFilename != prevFilename {
+ fileCount++
+
+ issueRange = issuesRange[fileCount-1]
+ issueRange.EndIndex = i - 1
+ issuesRange[fileCount-1] = issueRange
+
+ // Create for the new file
+ issueRange = IssueRange{
+ Filename: currentFilename,
+ BeginIndex: i,
+ EndIndex: len(report.Issues) - 1,
+ }
+
+ issuesRange = append(issuesRange, issueRange)
+ prevFilename = currentFilename
+ }
+ }
+ return issuesRange
+}
+
+// formatLSPResultsToDefault converts the LSP based analysis results into the default format supported by DeepSource.
+func (p *ReportProcessor) formatLSPResultsToDefault() types.AnalysisResult {
+ analysisResult := types.AnalysisResult{}
+ analysisResult.IsPassed = p.Report.IsPassed
+ analysisResult.Metrics = append(p.Report.Metrics, analysisResult.Metrics...)
+ analysisResult.Errors = append(p.Report.Errors, analysisResult.Errors...)
+
+ // Appending the issues to the default format of Analysis report
+ for _, issue := range p.Report.Issues {
+ analysisIssue := types.Issue{
+ IssueCode: issue.Code,
+ IssueText: issue.Message,
+ Location: types.Location{
+ Path: p.sanitizeFilePath(issue.RelatedInformation[0].Location.URI),
+ Position: types.Position{
+ Begin: types.Coordinate{
+ Line: issue.Range.Start.Line,
+ Column: issue.Range.Start.Character,
+ },
+ End: types.Coordinate{
+ Line: issue.Range.End.Line,
+ Column: issue.Range.End.Character,
+ },
+ },
+ },
+ }
+ analysisResult.Issues = append(analysisResult.Issues, analysisIssue)
+ }
+ return analysisResult
+}
+
+func (p *ReportProcessor) sanitizeFilePath(filePath string) string {
+ return path.Join(p.LocalSourcePath, strings.TrimPrefix(filePath, p.ContainerCodePath))
+}
diff --git a/analyzers/backend/docker/build.go b/analyzers/backend/docker/build.go
new file mode 100644
index 00000000..fb975c78
--- /dev/null
+++ b/analyzers/backend/docker/build.go
@@ -0,0 +1,115 @@
+package docker
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ cliTypes "github.com/deepsourcelabs/cli/types"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/pkg/archive"
+)
+
+// Timeout for build and container operations (10 minutes)
+const buildTimeout = 10 * time.Minute
+
+type DockerClient struct {
+ Client *client.Client
+ ContainerName string
+ ContainerID string
+ ImageName string
+ ImageTag string
+ ImagePlatform string
+ DockerfilePath string
+ AnalysisOpts AnalysisParams
+ ShowLogs bool
+}
+
+type DockerBuildError struct {
+ Message string
+}
+
+func (d *DockerBuildError) Error() string {
+ return d.Message
+}
+
+// SetupClient initializes the Docker client with opts.
+func (d *DockerClient) SetupClient() error {
+ var err error
+ d.Client, err = client.NewClientWithOpts(client.FromEnv)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// BuildAnalyzerDockerImage is the docker image build API used by various CLI commands
+func (d *DockerClient) BuildAnalyzerDockerImage() (context.CancelFunc, io.ReadCloser, *DockerBuildError) {
+ var err error
+
+ cancelFunc, responseReader, err := d.executeImageBuild()
+ if err != nil {
+ return cancelFunc, nil, &DockerBuildError{
+ Message: err.Error(),
+ }
+ }
+ return cancelFunc, responseReader, nil
+}
+
+// Executes the docker image build
+func (d *DockerClient) executeImageBuild() (context.CancelFunc, io.ReadCloser, error) {
+ ctx, ctxCancelFunc := context.WithTimeout(context.Background(), buildTimeout)
+ cwd, _ := os.Getwd()
+
+ tarOptions := &archive.TarOptions{
+ ExcludePatterns: []string{".git/**"},
+ }
+ tar, err := archive.TarWithOptions(cwd, tarOptions)
+ if err != nil {
+ return ctxCancelFunc, nil, err
+ }
+
+ opts := types.ImageBuildOptions{
+ Dockerfile: d.DockerfilePath,
+ Tags: []string{fmt.Sprintf("%s:%s", d.ImageName, d.ImageTag)},
+ Remove: true,
+ Platform: d.ImagePlatform,
+ }
+ res, err := d.Client.ImageBuild(ctx, tar, opts)
+ if err != nil {
+ ctxCancelFunc()
+ return ctxCancelFunc, nil, err
+ }
+ return ctxCancelFunc, res.Body, nil
+}
+
+// Returns the docker image details to build
+func GetDockerImageDetails(analyzerTOMLData *cliTypes.AnalyzerTOML) (string, string) {
+ var dockerFilePath, dockerFileName string
+ dockerFilePath = "Dockerfile"
+
+ // Read config for the value if specified
+ if analyzerTOMLData.Build.Dockerfile != "" {
+ dockerFilePath = analyzerTOMLData.Build.Dockerfile
+ }
+
+ // Removing the @ from the shortcode since docker build doesn't accept it as a valid image name
+ if analyzerTOMLData.Shortcode != "" {
+ dockerFileName = strings.TrimPrefix(analyzerTOMLData.Shortcode, "@")
+ }
+ return dockerFilePath, dockerFileName
+}
+
+func GenerateImageVersion(length int) string {
+ b := make([]byte, length)
+ if _, err := rand.Read(b); err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%x", b)
+}
diff --git a/analyzers/backend/docker/container.go b/analyzers/backend/docker/container.go
new file mode 100644
index 00000000..7576e444
--- /dev/null
+++ b/analyzers/backend/docker/container.go
@@ -0,0 +1,156 @@
+package docker
+
+import (
+ "archive/tar"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/pkg/archive"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type AnalysisParams struct {
+ AnalyzerName string
+ AnalyzerShortcode string
+ HostCodePath string
+ HostToolBoxPath string
+ AnalysisCommand string
+ ContainerCodePath string
+ ContainerToolBoxPath string
+ AnalysisResultsPath string
+ AnalysisResultsFilename string
+ AnalysisConfigPath string
+}
+
+const containerRunTimeout = 10 * time.Minute
+
+/* Creates a Docker container with the volume mount in which the source code to be analyzed and the CMD instruction being the
+ * analysis command configured by the user.
+ * Having started the container, streams the logs to STDOUT. On completion of the streaming,
+ * copies the `analysis_results.json` result file generated in the container to the host directory
+ */
+func (d *DockerClient) StartDockerContainer() error {
+ /* ==========================================================
+ * Prepare the container config with the following data:
+ * - ImageName
+ * - CMD instruction
+ * - Environment variables
+ * ========================================================== */
+ config := container.Config{
+ Image: fmt.Sprintf("%s:%s", d.ImageName, d.ImageTag),
+ Cmd: strings.Split(d.AnalysisOpts.AnalysisCommand, " "),
+ Env: []string{
+ "TOOLBOX_PATH=" + d.AnalysisOpts.ContainerToolBoxPath,
+ "CODE_PATH=" + d.AnalysisOpts.ContainerCodePath,
+ },
+ }
+
+ /* Host config containing the mounted volumes
+ * The host machine's temporary code path and toolbox path is mounted in the container */
+ hostConfig := container.HostConfig{
+ Binds: []string{
+ fmt.Sprintf("%s:%s", d.AnalysisOpts.HostCodePath, d.AnalysisOpts.ContainerCodePath),
+ },
+ }
+
+ // Prepare the network config
+ networkConfig := network.NetworkingConfig{}
+
+ // Configure the platform(mostly architecture) for the container. If specified by the user, use that else
+ // determine it using runtime.GOARCH.
+ containerArch := runtime.GOARCH
+ if d.ImagePlatform != "" {
+ containerArch = strings.SplitN(d.ImagePlatform, "/", 2)[1]
+ }
+
+ platform := v1.Platform{
+ Architecture: containerArch,
+ OS: "linux",
+ }
+
+ /* ===============================================================================
+ * Create container with the above configs and copy the analysis_config.json to it
+ * =============================================================================== */
+ ctx, cancel := context.WithTimeout(context.Background(), containerRunTimeout)
+ defer cancel()
+ containerCreateResp, err := d.Client.ContainerCreate(ctx, &config, &hostConfig, &networkConfig, &platform, d.ContainerName)
+ if err != nil {
+ return err
+ }
+ d.ContainerID = containerCreateResp.ID
+
+ tr, err := archive.Tar(d.AnalysisOpts.AnalysisConfigPath, archive.Uncompressed)
+ if err != nil {
+ return err
+ }
+
+ opts := types.CopyToContainerOptions{}
+ if err = d.Client.CopyToContainer(ctx, d.ContainerID, path.Join(d.AnalysisOpts.ContainerToolBoxPath), tr, opts); err != nil {
+ return err
+ }
+
+ /* =========================================
+ * Start the container
+ * ========================================= */
+ containerOpts := types.ContainerStartOptions{}
+ err = d.Client.ContainerStart(ctx, containerCreateResp.ID, containerOpts)
+ if err != nil {
+ return err
+ }
+
+ /* ===================================================================
+ * Stream container logs to STDOUT
+ * TODO: Check if the logs are needed only in --verbose/--debug mode?
+ * =================================================================== */
+ reader, err := d.Client.ContainerLogs(ctx, containerCreateResp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true, Timestamps: false})
+ if err != nil {
+ return err
+ }
+ defer reader.Close()
+
+ _, err = io.Copy(os.Stdout, reader)
+ if err != nil && err != io.EOF {
+ return err
+ }
+ return nil
+}
+
+/* Fetch analysis results generated after analysis from the container */
+func (d *DockerClient) FetchAnalysisResults() ([]byte, string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
+ defer cancel()
+
+ // If no error is found from the above step, copy the analysis results file to the host
+ contentReader, _, err := d.Client.CopyFromContainer(ctx, d.ContainerID, path.Join(d.AnalysisOpts.ContainerToolBoxPath, d.AnalysisOpts.AnalysisResultsFilename))
+ if err != nil {
+ return nil, "", err
+ }
+ defer contentReader.Close()
+
+ tr := tar.NewReader(contentReader)
+
+ // Read the TAR archive returned by docker
+ result, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ return nil, "", err
+ }
+ }
+
+ // Read the contents of the TAR archive into a byte buffer
+ buf, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, "", err
+ }
+ return buf, result.Name, nil
+}
diff --git a/analyzers/backend/docker/pull.go b/analyzers/backend/docker/pull.go
new file mode 100644
index 00000000..75fc3610
--- /dev/null
+++ b/analyzers/backend/docker/pull.go
@@ -0,0 +1,21 @@
+package docker
+
+import (
+ "context"
+ "io"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PullImage pulls an image from a registry.
+func (d *DockerClient) PullImage(imageName string) (context.CancelFunc, io.ReadCloser, error) {
+ ctx, ctxCancelFunc := context.WithTimeout(context.Background(), buildTimeout)
+
+ reader, err := d.Client.ImagePull(ctx, imageName, types.ImagePullOptions{})
+ if err != nil {
+ ctxCancelFunc()
+ return ctxCancelFunc, nil, err
+ }
+
+ return ctxCancelFunc, reader, nil
+}
diff --git a/analyzers/backend/docker/push.go b/analyzers/backend/docker/push.go
new file mode 100644
index 00000000..0d720689
--- /dev/null
+++ b/analyzers/backend/docker/push.go
@@ -0,0 +1,40 @@
+package docker
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/docker/docker/api/types"
+)
+
+const imagePushTimeout = 10 * time.Minute
+
+// PushImageToRegistry pushes the Analyzer image to the docker registry post authenticating using the
+// credentials passed as arguments.
+func (d *DockerClient) PushImageToRegistry(user, token string) (context.CancelFunc, io.ReadCloser, error) {
+ ctx, ctxCancelFunc := context.WithTimeout(context.Background(), imagePushTimeout)
+ authConfig := types.AuthConfig{
+ Username: user,
+ Password: token,
+ }
+
+ // Encode the authentication config as a JSON.
+ encodedJSON, err := json.Marshal(authConfig)
+ if err != nil {
+ return ctxCancelFunc, nil, err
+ }
+ // Encode the config to base64.
+ authStr := base64.URLEncoding.EncodeToString(encodedJSON)
+
+ // Push the image.
+ imagePushRespReader, err := d.Client.ImagePush(ctx, fmt.Sprintf("%s:%s", d.ImageName, d.ImageTag), types.ImagePushOptions{RegistryAuth: authStr})
+ if err != nil {
+ return ctxCancelFunc, nil, err
+ }
+
+ return ctxCancelFunc, imagePushRespReader, nil
+}
diff --git a/analyzers/backend/docker/utils.go b/analyzers/backend/docker/utils.go
new file mode 100644
index 00000000..df0a6608
--- /dev/null
+++ b/analyzers/backend/docker/utils.go
@@ -0,0 +1,90 @@
+package docker
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type ErrorLine struct {
+ Error string `json:"error"`
+ ErrorDetail ErrorDetail `json:"errorDetail"`
+}
+
+type ErrorDetail struct {
+ Message string `json:"message"`
+}
+
+type DockerBuildResponse struct {
+ Stream string `json:"stream"`
+}
+
+type DockerPullResponse struct {
+ Status string `json:"status"`
+ Progress string `json:"progress"`
+ ID string `json:"id"`
+}
+
+/* Checks the docker build response and prints all the logs if `showAllLogs` is true
+ * Used in `deepsource analyzer run` and `deepsource analyzer verify` commands */
+func CheckBuildResponse(rd io.Reader, showAllLogs bool) error { // skipcq: RVV-A0005
+ var lastLine []byte
+ count := 0
+ var currentStream string
+
+ scanner := bufio.NewScanner(rd)
+ for scanner.Scan() {
+ lastLine = scanner.Bytes()
+ d := &DockerBuildResponse{}
+ err := json.Unmarshal(lastLine, d)
+ if err != nil {
+ return err
+ }
+ if d.Stream == "" || d.Stream == "\n" || strings.Contains(d.Stream, "--->") || strings.TrimSuffix(d.Stream, "\n") == currentStream {
+ continue
+ }
+ currentStream = strings.TrimSuffix(d.Stream, "\n")
+ if showAllLogs {
+ fmt.Println(currentStream)
+ }
+ count++
+ }
+
+ errLine := &ErrorLine{}
+ json.Unmarshal([]byte(lastLine), errLine)
+ if errLine.Error != "" {
+ return errors.New(errLine.Error)
+ }
+ return scanner.Err()
+}
+
+/* Checks the docker pull response and prints all the logs if `showAllLogs` is true
+ * Used in `deepsource analyzer dry-run` command. */
+func CheckPullResponse(rd io.Reader, showAllLogs bool) error { // skipcq: RVV-A0005
+ var lastLine []byte
+ count := 0
+
+ scanner := bufio.NewScanner(rd)
+ for scanner.Scan() {
+ lastLine = scanner.Bytes()
+ d := &DockerPullResponse{}
+ err := json.Unmarshal(lastLine, d)
+ if err != nil {
+ return err
+ }
+ if showAllLogs {
+ fmt.Printf("%s %s\n", d.Status, d.Progress)
+ }
+ count++
+ }
+
+ errLine := &ErrorLine{}
+ json.Unmarshal([]byte(lastLine), errLine)
+ if errLine.Error != "" {
+ return errors.New(errLine.Error)
+ }
+ return scanner.Err()
+}
diff --git a/analyzers/config/config.go b/analyzers/config/config.go
new file mode 100644
index 00000000..8d486e16
--- /dev/null
+++ b/analyzers/config/config.go
@@ -0,0 +1,132 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+ "github.com/deepsourcelabs/cli/utils"
+ "github.com/pelletier/go-toml/v2"
+)
+
+var (
+ projectRoot string
+ analyzerTOMLPath string
+ issuesDirectoryPath string
+ configFolder string = ".deepsource/analyzer"
+)
+
+func InitAnalyzerConfigurationPaths() (string, string, string) {
+ resolveAnalyzerConfigurationPaths()
+ return projectRoot, analyzerTOMLPath, issuesDirectoryPath
+}
+
+// Read the types and read the config
+func resolveAnalyzerConfigurationPaths() {
+ cwd, _ := os.Getwd()
+
+ // Extracting the path of the project root
+ projectRoot, err := utils.ExtractProjectRootPath()
+ if err != nil {
+ projectRoot = cwd
+ }
+
+ // Configuring the paths of analyzer.toml and issues directory
+ analyzerTOMLPath = filepath.Join(projectRoot, configFolder, "analyzer.toml")
+ issuesDirectoryPath = filepath.Join(projectRoot, configFolder, "issues/")
+}
+
+// Verify the paths of analyzer configurations like analyzer.toml and
+// issue descriptions
+func VerifyAnalyzerConfigs() error {
+ resolveAnalyzerConfigurationPaths()
+
+ // Check if `analyzer.toml` is present in `.deepsource/analyzer` folder
+ if _, err := os.Stat(analyzerTOMLPath); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("the analyzer.toml file doesn't exist\n")
+ }
+ }
+
+ // Check if `issues/` directory is present in `.deepsource/analyzer` folder and is not empty.
+ if _, err := os.Stat(issuesDirectoryPath); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("the issue descriptions directory doesn't exist\n")
+ }
+ }
+
+ // Check if there are any toml files in the `issues/` directory
+ files, err := ioutil.ReadDir(issuesDirectoryPath)
+ if err != nil {
+ return fmt.Errorf("failed to read the files present in the issues directory at %s\n", issuesDirectoryPath)
+ }
+
+ // Check if its an empty directory
+ if len(files) < 1 {
+ return fmt.Errorf("found 0 issues configured in the issues directory at %s\n", issuesDirectoryPath)
+ }
+
+ tomlPresent := false
+ // Check if there are TOML files configured in the issues/ directory
+ for _, file := range files {
+ if strings.HasSuffix(file.Name(), ".toml") {
+ tomlPresent = true
+ break
+ }
+ }
+ if !tomlPresent {
+ return fmt.Errorf("found no toml files in the issues directory at %s\n", issuesDirectoryPath)
+ }
+ return nil
+}
+
+// Get the analyzer.toml data
+func GetAnalyzerTOML() (*types.AnalyzerTOML, error) {
+ resolveAnalyzerConfigurationPaths()
+ config := types.AnalyzerTOML{}
+
+ // Read the contents of analyzer.toml file
+ analyzerTOMLContent, err := ioutil.ReadFile(analyzerTOMLPath)
+ if err != nil {
+ return &config, errors.New("failed to read analyzer.toml file")
+ }
+
+ // Unmarshal TOML into config
+ if err = toml.Unmarshal(analyzerTOMLContent, &config); err != nil {
+ return &config, err
+ }
+ return &config, nil
+}
+
+// Get the list of issue descriptions
+func GetIssueDescriptions() (*[]types.AnalyzerIssue, error) {
+ resolveAnalyzerConfigurationPaths()
+ issueDescriptions := []types.AnalyzerIssue{}
+
+ issuesList, err := ioutil.ReadDir(issuesDirectoryPath)
+ if err != nil {
+ return nil, err
+ }
+ for _, issuePath := range issuesList {
+ issue := types.AnalyzerIssue{}
+ // Set the issue shortcode as the filename
+ issue.Shortcode = strings.TrimSuffix(issuePath.Name(), ".toml")
+
+ // Read the contents of issue toml file
+ issueTOMLContent, err := ioutil.ReadFile(filepath.Join(issuesDirectoryPath, issuePath.Name()))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read file: %s", filepath.Join(issuesDirectoryPath, issuePath.Name()))
+ }
+
+ // Unmarshal TOML into config
+ if err = toml.Unmarshal(issueTOMLContent, &issue); err != nil {
+ return nil, err
+ }
+ issueDescriptions = append(issueDescriptions, issue)
+ }
+ return &issueDescriptions, nil
+}
diff --git a/analyzers/diagnostics/diagnostics.go b/analyzers/diagnostics/diagnostics.go
new file mode 100644
index 00000000..88439e8a
--- /dev/null
+++ b/analyzers/diagnostics/diagnostics.go
@@ -0,0 +1,116 @@
+package diagnostics
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/analyzers/validator"
+ "github.com/morikuni/aec"
+)
+
+// Diagnostic represents a diagnostics reported by the DeepSource CLI validators.
+type Diagnostic struct {
+ Line int
+ Codeframe string
+ ErrorMessage string
+}
+
+// GetDiagnostics returns diagnostics as strings.
+func GetDiagnostics(failure validator.ValidationFailure) ([]string, error) {
+ diagnostics := []string{}
+
+ fileContent, err := readFileContent(failure.File)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get diagnostics using the file's content.
+ fileDiagnostics := getDiagnosticsFromFile(fileContent, failure.Errors)
+
+ // Pretty-print diagnostics.
+ for _, diag := range fileDiagnostics {
+ message := constructDiagnostic(diag)
+ diagnostics = append(diagnostics, message)
+ }
+
+ return diagnostics, nil
+}
+
+// constructDiagnostic returns the diagnostic as a pretty-printed string.
+func constructDiagnostic(diag Diagnostic) string {
+ errMsg := ""
+ errMsg += aec.LightRedF.Apply(fmt.Sprintf("%s\n", diag.ErrorMessage))
+ errMsg += diag.Codeframe
+ errMsg += "\n"
+
+ return errMsg
+}
+
+// readFileContent reads the file and returns its content.
+func readFileContent(filename string) (string, error) {
+ content, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(content), nil
+}
+
+// getDiagnosticsFromFile uses the file content to return diagnostics with metadata like line number, content, etc.
+func getDiagnosticsFromFile(fileContent string, errors []validator.ErrorMeta) []Diagnostic {
+ diagnostics := []Diagnostic{}
+
+ lines := strings.Split(string(fileContent), "\n")
+
+ // Iterate over each error and check line-by-line.
+ for _, err := range errors {
+ for lineNum, line := range lines {
+ // If the line contains the field name, and if it doesn't have a comment prefix, then we can proceed to diagnostic generation.
+ // TODO(burntcarrot): Replace these conditions with regex.
+ if strings.HasPrefix(line, err.Field) ||
+ strings.HasPrefix(line, fmt.Sprintf("# %s", err.Field)) ||
+ strings.HasPrefix(line, fmt.Sprintf("#%s", err.Field)) {
+ // Prepare code frame for the current line.
+ codeFrame := prepareCodeFrame(lineNum, lines)
+
+ // Generate a diagnostic.
+ diag := Diagnostic{
+ Line: lineNum,
+ Codeframe: codeFrame,
+ ErrorMessage: err.Message,
+ }
+
+ diagnostics = append(diagnostics, diag)
+ }
+ }
+ }
+
+ return diagnostics
+}
+
+// prepareCodeFrame prepares a code frame using the file content. The code frame is meant to be displayed on the console while reporting diagnostics.
+// NOTE: lineNum always starts from 0.
+func prepareCodeFrame(lineNum int, lines []string) string {
+ frame := ""
+
+ if lineNum <= 1 {
+ // Case 1: When the line is near the top of the file.
+ // Generate a frame with the current and next line only.
+ frame += aec.LightRedF.Apply(fmt.Sprintf("> %d | %s\n", lineNum+1, lines[lineNum]))
+ frame += fmt.Sprintf(" %d | %s\n", lineNum+2, lines[lineNum+1])
+
+ } else if lineNum >= (len(lines) - 1) {
+ // Case 2: When the line is near the bottom of the file.
+ // Generate a frame with the current line only.
+ frame += aec.LightRedF.Apply(fmt.Sprintf("> %d | %s\n", lineNum, lines[lineNum-1]))
+ } else {
+ // Case 3: When the line is in between the top and the bottom.
+ // Generate a frame with the the previous, current and the next line.
+ frame += fmt.Sprintf(" %d | %s\n", lineNum, lines[lineNum-1])
+ frame += aec.LightRedF.Apply(fmt.Sprintf("> %d | %s\n", lineNum+1, lines[lineNum]))
+ frame += fmt.Sprintf(" %d | %s\n", lineNum+2, lines[lineNum+1])
+ }
+
+ return frame
+}
diff --git a/analyzers/diagnostics/diagnostics_test.go b/analyzers/diagnostics/diagnostics_test.go
new file mode 100644
index 00000000..f936f8b7
--- /dev/null
+++ b/analyzers/diagnostics/diagnostics_test.go
@@ -0,0 +1,176 @@
+package diagnostics
+
+import (
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/deepsourcelabs/cli/analyzers/validator"
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestPrepareCodeFrame(t *testing.T) {
+ type test struct {
+ description string
+ lineNum int // lineNum always starts from 0.
+ linesFilename string
+ wantFilename string
+ }
+
+ tests := []test{
+ {
+ description: "single line",
+ lineNum: 1,
+ linesFilename: "./testdata/test_codeframe/single_line/test.toml",
+ wantFilename: "./testdata/test_codeframe/single_line/test_want.toml",
+ },
+ {
+ description: "multiple lines",
+ lineNum: 2,
+ linesFilename: "./testdata/test_codeframe/multiple_lines/test.toml",
+ wantFilename: "./testdata/test_codeframe/multiple_lines/test_want.toml",
+ },
+ {
+ description: "multiple lines at bottom",
+ lineNum: 5,
+ linesFilename: "./testdata/test_codeframe/multiple_lines_bottom/test.toml",
+ wantFilename: "./testdata/test_codeframe/multiple_lines_bottom/test_want.toml",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.description, func(t *testing.T) {
+ // Read file for getting line content.
+ fileContentLines, err := os.ReadFile(tc.linesFilename)
+ if err != nil {
+ t.Errorf("error raised while running test (%s): %s\n", tc.description, err)
+ }
+ linesStr := string(fileContentLines)
+ lines := strings.Split(linesStr, "\n")
+
+ // Prepare code frame.
+ got := prepareCodeFrame(tc.lineNum, lines)
+
+ // Strip ANSI escape codes.
+ got = stripANSI(got)
+ got = strings.TrimSpace(got)
+
+ fileContent, err := os.ReadFile(tc.wantFilename)
+ if err != nil {
+ t.Errorf("error raised while running test (%s): %s\n", tc.description, err)
+ }
+
+ // Convert file content to string.
+ want := string(fileContent)
+ want = strings.TrimSpace(want)
+
+ diff := cmp.Diff(got, want)
+ if len(diff) != 0 {
+ t.Errorf("test failed (%s)\ngot != want:\n%s\n", tc.description, diff)
+ }
+ })
+ }
+}
+
+func TestGetDiagnostics(t *testing.T) {
+ type test struct {
+ description string
+ failure validator.ValidationFailure
+ wantFilename string
+ }
+
+ tests := []test{
+ {
+ description: "single error",
+ failure: validator.ValidationFailure{
+ File: "./testdata/test_getdiagnostics/single_error/test.toml",
+ Errors: []validator.ErrorMeta{
+ {
+ Field: "engine",
+ Message: "Invalid build engine \"docke\". The following build engines are supported: [docker]",
+ },
+ },
+ },
+ wantFilename: "./testdata/test_getdiagnostics/single_error/test_want.toml",
+ },
+ {
+ description: "multiple errors",
+ failure: validator.ValidationFailure{
+ File: "./testdata/test_getdiagnostics/multiple_errors/test.toml",
+ Errors: []validator.ErrorMeta{
+ {
+ Field: "shortcode",
+ Message: "Analyzer shortcode should begin with '@'",
+ },
+ {
+ Field: "engine",
+ Message: "Invalid build engine \"docke\". The following build engines are supported: [docker]",
+ },
+ },
+ },
+ wantFilename: "./testdata/test_getdiagnostics/multiple_errors/test_want.toml",
+ },
+ {
+ description: "no errors",
+ failure: validator.ValidationFailure{
+ File: "./testdata/test_getdiagnostics/no_errors/test.toml",
+ Errors: []validator.ErrorMeta{},
+ },
+ wantFilename: "./testdata/test_getdiagnostics/no_errors/test_want.toml",
+ },
+ {
+ description: "file with less lines",
+ failure: validator.ValidationFailure{
+ File: "./testdata/test_getdiagnostics/less_lines/test.toml",
+ Errors: []validator.ErrorMeta{
+ {
+ Field: "shortcode",
+ Message: "Analyzer shortcode should begin with '@'",
+ },
+ },
+ },
+ wantFilename: "./testdata/test_getdiagnostics/less_lines/test_want.toml",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.description, func(t *testing.T) {
+ got, err := GetDiagnostics(tc.failure)
+ if err != nil {
+ t.Errorf("error raised while running test (%s): %s\n", tc.description, err)
+ }
+
+ // Prepare a string for comparing.
+ gotStr := ""
+ for _, str := range got {
+ gotStr += str + "\n"
+ }
+ gotStr = strings.TrimSpace(gotStr)
+
+ fileContent, err := os.ReadFile(tc.wantFilename)
+ if err != nil {
+ t.Errorf("error raised while running test (%s): %s\n", tc.description, err)
+ }
+
+ // Convert file content to string.
+ want := string(fileContent)
+ want = strings.TrimSpace(want)
+
+ // Strip ANSI escape codes.
+ gotStr = stripANSI(gotStr)
+
+ diff := cmp.Diff(gotStr, want)
+ if len(diff) != 0 {
+ t.Errorf("test failed (%s)\ngot != want:\n%s\n", tc.description, diff)
+ }
+ })
+ }
+}
+
+// Strip ANSI codes for testing.
+func stripANSI(str string) string {
+ ansi := "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
+ re := regexp.MustCompile(ansi)
+ return re.ReplaceAllString(str, "")
+}
diff --git a/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test.toml b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test.toml
new file mode 100644
index 00000000..89acf0c7
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test.toml
@@ -0,0 +1,7 @@
+
+[build]
+ engine = "docke"
+ dockerfile = ""
+
+[test]
+ command = ""
diff --git a/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test_want.toml b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test_want.toml
new file mode 100644
index 00000000..a549e5ab
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines/test_want.toml
@@ -0,0 +1,3 @@
+ 2 | [build]
+> 3 | engine = "docke"
+ 4 | dockerfile = ""
diff --git a/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test.toml b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test.toml
new file mode 100644
index 00000000..c26b1a2f
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test.toml
@@ -0,0 +1,5 @@
+
+[build]
+ engine = "docke"
+ dockerfile = ""
+ error = ""
diff --git a/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test_want.toml b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test_want.toml
new file mode 100644
index 00000000..1e5eac35
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/multiple_lines_bottom/test_want.toml
@@ -0,0 +1 @@
+> 5 | error = ""
diff --git a/analyzers/diagnostics/testdata/test_codeframe/single_line/test.toml b/analyzers/diagnostics/testdata/test_codeframe/single_line/test.toml
new file mode 100644
index 00000000..65af834b
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/single_line/test.toml
@@ -0,0 +1,3 @@
+name = ""
+shortcode = "@aadhav-deepsource/2do-checker"
+example = "hello"
diff --git a/analyzers/diagnostics/testdata/test_codeframe/single_line/test_want.toml b/analyzers/diagnostics/testdata/test_codeframe/single_line/test_want.toml
new file mode 100644
index 00000000..5d698653
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_codeframe/single_line/test_want.toml
@@ -0,0 +1,2 @@
+> 2 | shortcode = "@aadhav-deepsource/2do-checker"
+ 3 | example = "hello"
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test.toml
new file mode 100644
index 00000000..f05c4b7e
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test.toml
@@ -0,0 +1,2 @@
+name = "2do-Checker"
+shortcode = "aadhav-deepsource/2do-checker"
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test_want.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test_want.toml
new file mode 100644
index 00000000..893a2ea4
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/less_lines/test_want.toml
@@ -0,0 +1,3 @@
+Analyzer shortcode should begin with '@'
+> 2 | shortcode = "aadhav-deepsource/2do-checker"
+ 3 |
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test.toml
new file mode 100644
index 00000000..c1cdc1d9
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test.toml
@@ -0,0 +1,20 @@
+name = "2do-Checker"
+shortcode = "aadhav-deepsource/2do-checker"
+description = "Checks for TODO."
+tags = ["todo", "TODO", "checker"]
+repository = "https://github.com/siddhant-deepsource/2do-checker"
+documentation = ""
+bug_tracker = ""
+
+[environment_variables]
+
+[analysis]
+ command = "/app/todo-checker"
+
+[build]
+ engine = "docke"
+ dockerfile = ""
+ script = ""
+
+[test]
+ command = ""
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test_want.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test_want.toml
new file mode 100644
index 00000000..21b0918d
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/multiple_errors/test_want.toml
@@ -0,0 +1,9 @@
+Analyzer shortcode should begin with '@'
+> 2 | shortcode = "aadhav-deepsource/2do-checker"
+ 3 | description = "Checks for TODO."
+
+
+Invalid build engine "docke". The following build engines are supported: [docker]
+ 14 | [build]
+> 15 | engine = "docke"
+ 16 | dockerfile = ""
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/no_errors/test.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/no_errors/test.toml
new file mode 100644
index 00000000..12aa7fe3
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/no_errors/test.toml
@@ -0,0 +1,20 @@
+name = "2do-Checker"
+shortcode = "@aadhav-deepsource/2do-checker"
+description = "Checks for TODO."
+tags = ["todo", "TODO", "checker"]
+repository = "https://github.com/siddhant-deepsource/2do-checker"
+documentation = ""
+bug_tracker = ""
+
+[environment_variables]
+
+[analysis]
+ command = "/app/todo-checker"
+
+[build]
+ engine = "docker"
+ dockerfile = ""
+ script = ""
+
+[test]
+ command = ""
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/no_errors/test_want.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/no_errors/test_want.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test.toml
new file mode 100644
index 00000000..c1cdc1d9
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test.toml
@@ -0,0 +1,20 @@
+name = "2do-Checker"
+shortcode = "aadhav-deepsource/2do-checker"
+description = "Checks for TODO."
+tags = ["todo", "TODO", "checker"]
+repository = "https://github.com/siddhant-deepsource/2do-checker"
+documentation = ""
+bug_tracker = ""
+
+[environment_variables]
+
+[analysis]
+ command = "/app/todo-checker"
+
+[build]
+ engine = "docke"
+ dockerfile = ""
+ script = ""
+
+[test]
+ command = ""
diff --git a/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test_want.toml b/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test_want.toml
new file mode 100644
index 00000000..1aa78552
--- /dev/null
+++ b/analyzers/diagnostics/testdata/test_getdiagnostics/single_error/test_want.toml
@@ -0,0 +1,4 @@
+Invalid build engine "docke". The following build engines are supported: [docker]
+ 14 | [build]
+> 15 | engine = "docke"
+ 16 | dockerfile = ""
diff --git a/analyzers/validator/analyzer_toml.go b/analyzers/validator/analyzer_toml.go
new file mode 100644
index 00000000..9e2fb921
--- /dev/null
+++ b/analyzers/validator/analyzer_toml.go
@@ -0,0 +1,86 @@
+package validator
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+ validate "github.com/go-playground/validator/v10"
+)
+
+var supportedEngines []string = []string{"docker"}
+
+func validateAnalyzerTOMLFields(config *types.AnalyzerTOML, filePath string) (*ValidationFailure, error) {
+ var supportedEnginesString string
+ analyzerTOMLValidationErrors := ValidationFailure{}
+
+ // Validate analyzer.toml fields based on type and sanity checks
+ v := validate.New()
+ // Custom validators for shortcode and engine
+ v.RegisterValidation("shortcode", ValidateShortcode)
+ v.RegisterValidation("engine", ValidateEngine)
+
+ // Start the validation
+ if err := v.Struct(config); err != nil {
+ // List the missing required fields
+ missingRequiredFields := getMissingRequiredFields(err, *config)
+ analyzerTOMLValidationErrors = ValidationFailure{
+ File: filePath,
+ }
+
+ // Find any missing "required" fields from analyzer.toml
+ for _, missingField := range missingRequiredFields {
+ analyzerTOMLValidationErrors.Errors = append(analyzerTOMLValidationErrors.Errors, ErrorMeta{
+ Level: Error,
+ Field: missingField,
+ Message: fmt.Sprintf("Missing required field: %s", missingField),
+ },
+ )
+ }
+
+ // Check if the shortcode begins with @ and the right build engine is configured
+ errs := err.(validate.ValidationErrors)
+ for _, err := range errs {
+ if err.Tag() == "shortcode" {
+ analyzerTOMLValidationErrors.Errors = append(analyzerTOMLValidationErrors.Errors, ErrorMeta{
+ Level: Error,
+ Field: "shortcode",
+ Message: "Analyzer shortcode should begin with '@'",
+ })
+ }
+
+ if err.Tag() == "engine" {
+ if len(supportedEngines) > 1 {
+ supportedEnginesString = strings.Join(supportedEngines, ", ")
+ } else {
+ supportedEnginesString = supportedEngines[0]
+ }
+
+ analyzerTOMLValidationErrors.Errors = append(analyzerTOMLValidationErrors.Errors, ErrorMeta{
+ Level: Error,
+ Field: "engine",
+ Message: fmt.Sprintf("Invalid build engine \"%s\". The following build engines are supported: [%s]", config.Build.Engine, supportedEnginesString),
+ })
+ }
+ }
+ }
+ if len(analyzerTOMLValidationErrors.Errors) > 0 {
+ return &analyzerTOMLValidationErrors, nil
+ }
+ return nil, nil
+}
+
+// Validates if the shortcode begins with `@`
+func ValidateShortcode(fl validate.FieldLevel) bool {
+ return strings.HasPrefix(fl.Field().String(), "@")
+}
+
+// Validates the supported engines. As of now, only docker is supported.
+func ValidateEngine(fl validate.FieldLevel) bool {
+ for _, supportedEngine := range supportedEngines {
+ if fl.Field().String() == supportedEngine {
+ return true
+ }
+ }
+ return false
+}
diff --git a/analyzers/validator/issues.go b/analyzers/validator/issues.go
new file mode 100644
index 00000000..38372d78
--- /dev/null
+++ b/analyzers/validator/issues.go
@@ -0,0 +1,72 @@
+package validator
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+ validate "github.com/go-playground/validator/v10"
+)
+
+var supportedIssueCategories = []string{
+ "bug-risk",
+ "antipattern",
+ "security",
+ "style",
+ "performance",
+ "doc",
+ "typecheck",
+ "coverage",
+}
+
+func validateIssueTOML(config *types.AnalyzerIssue, issuePath string) *ValidationFailure {
+ issueValidationError := ValidationFailure{}
+ // Validate the issue data
+ v := validate.New()
+ v.RegisterValidation("category", ValidateCategory)
+ if err := v.Struct(config); err != nil {
+ // validationFailed = true
+ missingRequiredFields := getMissingRequiredFields(err, *config)
+ issueValidationError = ValidationFailure{
+ File: issuePath,
+ }
+
+ // TODO: Tweak this to accomodate other error types.
+ for _, missingField := range missingRequiredFields {
+ issueValidationError.Errors = append(issueValidationError.Errors, ErrorMeta{
+ Level: Error,
+ Field: missingField,
+ Message: fmt.Sprintf("Missing required field: %s", missingField),
+ },
+ )
+ }
+
+ // Check if the category is supported
+ errs := err.(validate.ValidationErrors)
+ for _, err := range errs {
+ if err.Tag() == "category" {
+ supportedCategories := strings.Join(supportedIssueCategories, ", ")
+ issueValidationError.Errors = append(issueValidationError.Errors, ErrorMeta{
+ Level: Error,
+ Field: "category",
+ Message: fmt.Sprintf("Invalid issue category \"%s\". The following issue categories are supported: [%s]", config.Category, supportedCategories),
+ })
+ }
+ }
+ }
+
+ // Return nil if no validation errors found
+ if len(issueValidationError.Errors) > 0 {
+ return &issueValidationError
+ }
+ return nil
+}
+
+func ValidateCategory(fl validate.FieldLevel) bool {
+ for _, supportedCategory := range supportedIssueCategories {
+ if fl.Field().String() == supportedCategory {
+ return true
+ }
+ }
+ return false
+}
diff --git a/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/analyzer.toml b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/analyzer.toml
new file mode 100644
index 00000000..858d23e6
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/analyzer.toml
@@ -0,0 +1,14 @@
+name = "todo comments checker"
+shortcode = "@deepsource/demo-python"
+description = "finds the todo comments in codebase."
+tags = ["documentation","todo"]
+
+[environment_variables]
+ CODE_PATH = "/code"
+
+repository = "https://github.com/deepsourcelabs/2do-checker"
+documentation = "https://deepsource.io/docs"
+bug_tracker = "https://bugtracker.deepsource.io"
+
+[analysis]
+ command = "/app/todo-checker"
diff --git a/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W001.toml b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W001.toml
new file mode 100644
index 00000000..01cdbe26
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W001.toml
@@ -0,0 +1,6 @@
+# shortcode = "PTC-W001"
+# title = "Potential hardcoded credential detected"
+# category = "security"
+description = """
+Potential hardcoded credential. Sensitive information like password shouldn't be hardcoded. Use an environment variable instead.
+"""
diff --git a/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W002.toml b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W002.toml
new file mode 100644
index 00000000..f38fb4f2
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer1/.deepsource/analyzer/issues/PTC-W002.toml
@@ -0,0 +1,5 @@
+# shortcode = "PTC-W001"
+category = "security"
+description = """
+Potential hardcoded credential. Sensitive information like password shouldn't be hardcoded. Use an environment variable instead.
+"""
diff --git a/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/analyzer.toml b/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/analyzer.toml
new file mode 100644
index 00000000..0c6d13c6
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/analyzer.toml
@@ -0,0 +1,16 @@
+name = "Todo comments checker"
+# shortcode = "@deepsource/demo-python" # test invalid config
+description = "Finds the TODO comments in codebase."
+# category = "doc"
+tags = ["documentation","todo"]
+
+[environment_variables]
+ CODE_PATH = "/code"
+
+[urls]
+ source = "https://github.com/deepsourcelabs/2do-checker"
+ documentation = "https://deepsource.io/docs"
+ bug_tracker = "https://bugtracker.deepsource.io"
+
+[analysis]
+ command = "/app/todo-checker"
diff --git a/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/issues/JS-001.toml b/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/issues/JS-001.toml
new file mode 100644
index 00000000..d7272ebd
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/issues/JS-001.toml
@@ -0,0 +1,4 @@
+category = "security"
+description = """
+Potential hardcoded credential. Sensitive information like password shouldn't be hardcoded. Use an environment variable instead.
+"""
diff --git a/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/issues/JS-002.toml b/analyzers/validator/testdata/analyzer2/.deepsource/analyzer/issues/JS-002.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/analyzers/validator/testdata/analyzer3/.deepsource/analyzer/analyzer.toml b/analyzers/validator/testdata/analyzer3/.deepsource/analyzer/analyzer.toml
new file mode 100644
index 00000000..3e95d2d9
--- /dev/null
+++ b/analyzers/validator/testdata/analyzer3/.deepsource/analyzer/analyzer.toml
@@ -0,0 +1,14 @@
+name = "Todo comments checker"
+shortcode = "deepsource/demo-python"
+description = "Finds the TODO comments in codebase."
+tags = ["documentation","todo"]
+
+[environment_variables]
+ CODE_PATH = "/code"
+
+repository = "https://github.com/deepsourcelabs/2do-checker"
+documentation = "https://deepsource.io/docs"
+bug_tracker = "https://bugtracker.deepsource.io"
+
+[analysis]
+ command = "/app/todo-checker"
diff --git a/analyzers/validator/utils.go b/analyzers/validator/utils.go
new file mode 100644
index 00000000..7fa29172
--- /dev/null
+++ b/analyzers/validator/utils.go
@@ -0,0 +1,103 @@
+package validator
+
+import (
+ "errors"
+ "fmt"
+ "path"
+ "reflect"
+ "strings"
+
+ validate "github.com/go-playground/validator/v10"
+ "github.com/pelletier/go-toml/v2"
+)
+
+// Returns the list of required fields from the error message returned by the `go-playground/validator` library
+func getMissingRequiredFields(err error, config interface{}) []string {
+ missingRequiredFields := []string{}
+ errs := err.(validate.ValidationErrors)
+ for _, err := range errs {
+ if err.Tag() == "required" {
+ c := reflect.ValueOf(config)
+ for i := 0; i < c.Type().NumField(); i++ {
+ if err.Field() == c.Type().Field(i).Name {
+ missingRequiredFields = append(missingRequiredFields, c.Type().Field(i).Tag.Get("toml"))
+ }
+ }
+ }
+ }
+ return missingRequiredFields
+}
+
+// Handle decoding errors reported by go-toml
+func handleTOMLDecodeErrors(err error, filePath string) *ValidationFailure {
+ var usefulResponse, expectedType, receivedType, fieldName, decodeErrorMessage string
+
+ // Get the DecodeError exported by go-toml
+ // Ref: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
+ var decodeErr *toml.DecodeError
+ if !errors.As(err, &decodeErr) {
+ decodeErrorMessage = err.Error()
+
+ // Handle strict mode error when some alien fields are added in the user configured TOML
+ if strings.HasPrefix(err.Error(), "strict mode") {
+ decodeErrorMessage = fmt.Sprintf("Failed to parse %s. Invalid fields detected.", path.Base(filePath))
+ }
+ validationError := ValidationFailure{
+ File: filePath,
+ Errors: []ErrorMeta{
+ {
+ Level: DecodeErr,
+ Field: "",
+ Message: decodeErrorMessage,
+ },
+ },
+ }
+ return &validationError
+ }
+
+ /* =================================================
+ * Extract the data about the decoding failure and return
+ * a validation failure response
+ * ================================================= */
+
+ errorMessage := decodeErr.Error()
+ // Error case 1: `toml: cannot decode TOML integer into struct field types.AnalyzerTOML.Name of type string"`
+ if strings.HasPrefix(errorMessage, "toml: cannot decode TOML") {
+
+ usefulResponse = strings.TrimPrefix(errorMessage, "toml: cannot decode TOML ")
+ responseArray := strings.Split(usefulResponse, " ")
+
+ expectedType = responseArray[len(responseArray)-1]
+ receivedType = responseArray[0]
+ fieldData := responseArray[len(responseArray)-4]
+ index := strings.LastIndex(fieldData, ".")
+ fieldName = strings.ToLower(fieldData[index:])
+ // Framing the decoding failure error message
+ decodeErrorMessage = fmt.Sprintf("expected the field \"%s\" of type %s. Got %s.", fieldName, expectedType, receivedType)
+
+ } else if strings.HasPrefix(errorMessage, "toml: cannot store TOML") {
+
+ // Error case 2: `toml: cannot store TOML string into a Go slice`
+ usefulResponse = strings.TrimPrefix(errorMessage, "toml: cannot store TOML ")
+ responseArray := strings.Split(usefulResponse, " ")
+
+ expectedType = responseArray[len(responseArray)-1]
+ receivedType = responseArray[0]
+ decodeErrorMessage = fmt.Sprintf("expected type for one of the fields : %s. Received: %s.", expectedType, receivedType)
+ } else {
+ decodeErrorMessage = errorMessage
+ fieldName = ""
+ }
+
+ validationError := ValidationFailure{
+ File: filePath,
+ Errors: []ErrorMeta{
+ {
+ Level: DecodeErr,
+ Field: fieldName,
+ Message: decodeErrorMessage,
+ },
+ },
+ }
+ return &validationError
+}
diff --git a/analyzers/validator/validator.go b/analyzers/validator/validator.go
new file mode 100644
index 00000000..d88d6611
--- /dev/null
+++ b/analyzers/validator/validator.go
@@ -0,0 +1,157 @@
+package validator
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/deepsourcelabs/cli/types"
+ "github.com/pelletier/go-toml/v2"
+)
+
+/* ==================================================
+ * Types used to report validation failure error data
+ * ================================================== */
+type ErrLevel int
+
+const (
+ DecodeErr ErrLevel = iota
+ Error
+ Warning
+ Information
+)
+
+type ErrorMeta struct {
+ Level ErrLevel
+ Field string
+ Message string
+}
+
+type ValidationFailure struct {
+ File string
+ Errors []ErrorMeta
+}
+
+// CheckForAnalyzerConfig receives the path of the `analyzer.toml` and issue descriptions and
+// checks if they are actually present
+func CheckForAnalyzerConfig(analyzerTOMLPath, issuesDirectoryPath string) (err error) {
+ // Check if `analyzer.toml` is present in `.deepsource/analyzer` folder
+ if _, err := os.Stat(analyzerTOMLPath); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("the analyzer.toml file doesn't exist")
+ }
+ }
+
+ // Check if `issues/` directory is present in `.deepsource/analyzer` folder and is not empty.
+ if _, err := os.Stat(issuesDirectoryPath); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("the issue descriptions directory doesn't exist")
+ }
+ }
+
+ // Check if there are any toml files in the `issues/` directory
+ files, err := ioutil.ReadDir(issuesDirectoryPath)
+ if err != nil {
+ return fmt.Errorf("failed to read the files present in the issues directory at %s", issuesDirectoryPath)
+ }
+
+ // Check if its an empty directory
+ if len(files) < 1 {
+ return fmt.Errorf("found 0 issues configured in the issues directory at %s", issuesDirectoryPath)
+ }
+
+ tomlPresent := false
+ // Check if there are TOML files configured in the issues/ directory
+ for _, file := range files {
+ if strings.HasSuffix(file.Name(), ".toml") {
+ tomlPresent = true
+ break
+ }
+ }
+ if !tomlPresent {
+ return fmt.Errorf("found no toml files in the issues directory at %s", issuesDirectoryPath)
+ }
+
+ return
+}
+
+// ValidateAnalyzerTOML receives the path of analyzer.toml and reads as well as validates it for
+// the type checks, required fields etc. Returns the analyzer.toml content and the validation failures
+// if any in the form of ValidationFailure struct.
+func ValidateAnalyzerTOML(analyzerTOMLPath string) (*types.AnalyzerTOML, *ValidationFailure, error) {
+ config := types.AnalyzerTOML{}
+
+ // Read the contents of analyzer.toml file
+ analyzerTOMLContent, err := ioutil.ReadFile(analyzerTOMLPath)
+ if err != nil {
+ return nil, nil, errors.New("failed to read analyzer.toml file")
+ }
+
+ // Decode the TOML into the struct
+ d := toml.NewDecoder(bytes.NewBuffer(analyzerTOMLContent))
+ d.DisallowUnknownFields()
+ if err := d.Decode(&config); err != nil {
+ decodeErrorResp := handleTOMLDecodeErrors(err, analyzerTOMLPath)
+ if decodeErrorResp != nil {
+ return nil, decodeErrorResp, nil
+ }
+ return nil, nil, err
+ }
+
+ // Validate the analyzer.toml fields for default/custom type checks, required fields
+ analyzerTOMLValidationErrors, err := validateAnalyzerTOMLFields(&config, analyzerTOMLPath)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &config, analyzerTOMLValidationErrors, nil
+}
+
+// ValidateIssueDescriptions receives the path of issues directory for reading and validating them
+// for type checks and required fields. Returns an array of ValidationFailure struct containing
+// validation errors for each issue TOML file.
+func ValidateIssueDescriptions(issuesDirectoryPath string) (*[]ValidationFailure, error) {
+ issueValidationErrors := []ValidationFailure{}
+
+ // TODO: List only TOML files here
+ issuesList, err := ioutil.ReadDir(issuesDirectoryPath)
+ if err != nil {
+ return nil, err
+ }
+
+ // Iterate over the issues one by one, read and decode them, validate the fields and return the
+ // validation result.
+ for _, issuePath := range issuesList {
+ // Set the issue shortcode as the filename
+ config := types.AnalyzerIssue{}
+ config.Shortcode = strings.TrimSuffix(issuePath.Name(), ".toml")
+
+ // Read the contents of issue toml file
+ issueTOMLContent, err := ioutil.ReadFile(filepath.Join(issuesDirectoryPath, issuePath.Name()))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read file: %s", filepath.Join(issuesDirectoryPath, issuePath.Name()))
+ }
+
+ // Decode the TOML content into the AnalyzerIssue struct object
+ d := toml.NewDecoder(bytes.NewBuffer(issueTOMLContent))
+ d.DisallowUnknownFields()
+ if err = d.Decode(&config); err != nil {
+ decodeErrorResp := handleTOMLDecodeErrors(err, issuePath.Name())
+ if decodeErrorResp != nil {
+ // Append the error to the array created for reporting issue validation errors and return it
+ issueValidationErrors = append(issueValidationErrors, *decodeErrorResp)
+ continue
+ }
+ }
+
+ // Validate the analyzer.toml fields for default/custom type checks, required fields
+ issueValidationError := validateIssueTOML(&config, issuePath.Name())
+ if issueValidationError != nil {
+ issueValidationErrors = append(issueValidationErrors, *issueValidationError)
+ }
+ }
+ return &issueValidationErrors, nil
+}
diff --git a/analyzers/validator/validator_test.go b/analyzers/validator/validator_test.go
new file mode 100644
index 00000000..26104e5b
--- /dev/null
+++ b/analyzers/validator/validator_test.go
@@ -0,0 +1,65 @@
+package validator
+
+import (
+ "testing"
+)
+
+func TestValidateAnalyzerToml(t *testing.T) {
+ type test struct {
+ tomlPath string
+ validTOML bool
+ }
+
+ tests := []test{
+ {
+ tomlPath: "./testdata/analyzer1/.deepsource/analyzer/analyzer.toml",
+ validTOML: true,
+ },
+ {
+ tomlPath: "./testdata/analyzer2/.deepsource/analyzer/analyzer.toml",
+ validTOML: false,
+ },
+ {
+ tomlPath: "./testdata/analyzer3/.deepsource/analyzer/analyzer.toml",
+ validTOML: false,
+ },
+ }
+
+ for _, tc := range tests {
+ _, validationErr, _ := ValidateAnalyzerTOML(tc.tomlPath)
+ if validationErr != nil && tc.validTOML {
+ t.Errorf("Expected valid TOML for %s. Got: %v", tc.tomlPath, validationErr)
+ }
+ if validationErr == nil && !tc.validTOML {
+ t.Errorf("Expected invalid TOML for %s. Got: %v", tc.tomlPath, validationErr)
+ }
+ }
+}
+
+func TestValidateIssueDescriptions(t *testing.T) {
+ type test struct {
+ issuesDirPath string
+ validIssues bool
+ }
+
+ tests := []test{
+ {
+ issuesDirPath: "./testdata/analyzer1/.deepsource/analyzer/issues/",
+ validIssues: false,
+ },
+ {
+ issuesDirPath: "./testdata/analyzer2/.deepsource/analyzer/issues/",
+ validIssues: false,
+ },
+ }
+
+ for _, tc := range tests {
+ validationErrors, err := ValidateIssueDescriptions(tc.issuesDirPath)
+ if validationErrors != nil && tc.validIssues {
+ t.Errorf("Expected valid TOML for %s. Got: %v", tc.issuesDirPath, err)
+ }
+ if validationErrors == nil && !tc.validIssues {
+ t.Errorf("Expected invalid TOML for %s. Got: %v", tc.issuesDirPath, err)
+ }
+ }
+}
diff --git a/command/analyzer/analyzer.go b/command/analyzer/analyzer.go
new file mode 100644
index 00000000..b5d9dd8f
--- /dev/null
+++ b/command/analyzer/analyzer.go
@@ -0,0 +1,26 @@
+package analyzer
+
+import (
+ "github.com/spf13/cobra"
+
+ dryrun "github.com/deepsourcelabs/cli/command/analyzer/dryrun"
+ initialize "github.com/deepsourcelabs/cli/command/analyzer/initialize"
+ push "github.com/deepsourcelabs/cli/command/analyzer/push"
+ verify "github.com/deepsourcelabs/cli/command/analyzer/verify"
+)
+
+// Options holds the metadata.
+type Options struct{}
+
+// NewCmdVersion returns the current version of cli being used
+func NewCmdAnalyzer() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "analyzer",
+ Short: "Operations related to DeepSource Analyzers",
+ }
+ cmd.AddCommand(dryrun.NewCmdAnalyzerRun())
+ cmd.AddCommand(verify.NewCmdAnalyzerVerify())
+ cmd.AddCommand(initialize.NewCmdAnalyzerInit())
+ cmd.AddCommand(push.NewCmdAnalyzerPush())
+ return cmd
+}
diff --git a/command/analyzer/dryrun/client.go b/command/analyzer/dryrun/client.go
new file mode 100644
index 00000000..de015f5f
--- /dev/null
+++ b/command/analyzer/dryrun/client.go
@@ -0,0 +1,46 @@
+package dryrun
+
+import (
+ "strings"
+
+ "github.com/deepsourcelabs/cli/analyzers/backend/docker"
+ "github.com/deepsourcelabs/cli/analyzers/config"
+)
+
+// Reads the analyzer.toml data and environment variables and creates a docker client
+// to be used in the docker related ops by the run command
+func (a *AnalyzerDryRun) createDockerClient() error {
+ // Get the Analyzer.toml contents
+ analyzerTOMLData, err := config.GetAnalyzerTOML()
+ if err != nil {
+ return err
+ }
+
+ // Fetch environment variables set by the user
+ fetchEnvironmentVariables()
+
+ // Extracting the docker file and path details
+ dockerFilePath, dockerFileName := docker.GetDockerImageDetails(analyzerTOMLData)
+ analyzerName := strings.Split(dockerFileName, "/")[1]
+
+ /* ====================================== */
+ // Create a Docker Client
+ /* ====================================== */
+
+ a.Client = &docker.DockerClient{
+ ImageName: dockerFileName,
+ ImagePlatform: a.DockerImagePlatform,
+ ImageTag: docker.GenerateImageVersion(7),
+ ContainerName: analyzerName + "-" + docker.GenerateImageVersion(7),
+ DockerfilePath: dockerFilePath,
+ AnalysisOpts: docker.AnalysisParams{
+ AnalyzerName: analyzerTOMLData.Name,
+ AnalyzerShortcode: analyzerTOMLData.Shortcode,
+ AnalysisCommand: analyzerTOMLData.Analysis.Command,
+ ContainerCodePath: containerCodePath,
+ ContainerToolBoxPath: containerToolBoxPath,
+ AnalysisResultsFilename: analysisResultsName + analysisResultsExt,
+ },
+ }
+ return nil
+}
diff --git a/command/analyzer/dryrun/config.go b/command/analyzer/dryrun/config.go
new file mode 100644
index 00000000..71587b7d
--- /dev/null
+++ b/command/analyzer/dryrun/config.go
@@ -0,0 +1,42 @@
+package dryrun
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+
+ analysis_config "github.com/deepsourcelabs/cli/analysis/config"
+)
+
+// Prepares the analysis config and writes it to TOOLBOX_PATH
+func (a *AnalyzerDryRun) prepareAnalysisConfig() (err error) {
+ /* Prepare the analysis_config.json here and mount into the container at `TOOLBOX_PATH/analysis_config.json`
+ * The analysis_config.json will have path prepended with the CODE_PATH of the container and not local CODE_PATH */
+ analysisRun := analysis_config.AnalysisRun{
+ AnalyzerName: a.Client.AnalysisOpts.AnalyzerName,
+ LocalCodePath: a.Client.AnalysisOpts.HostCodePath,
+ ContainerCodePath: containerCodePath,
+ }
+
+ if a.AnalysisConfig, err = analysisRun.ConfigureAnalysis(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Writes the analysis_config.json into a temporary directory which shall be mounted as TOOLBOX directory in the container
+func (a *AnalyzerDryRun) writeAnalysisConfig() (err error) {
+ // Modify the paths of analysis_config.json file to use the container based CODE_PATH instead
+ // of the local CODE_PATH
+ modifyAnalysisConfigFilepaths(a.AnalysisConfig, a.Client.AnalysisOpts.HostCodePath, a.Client.AnalysisOpts.ContainerCodePath)
+
+ // Marshal the analysis_config data into JSON
+ analysisConfigJSON, err := json.Marshal(a.AnalysisConfig)
+ if err != nil {
+ return err
+ }
+ a.Client.AnalysisOpts.AnalysisConfigPath = path.Join(a.TempToolBoxDirectory, analysisConfigName+analysisConfigExt)
+
+ // Create a temporary directory
+ return os.WriteFile(path.Join(a.TempToolBoxDirectory, analysisConfigName+analysisConfigExt), analysisConfigJSON, 0o644)
+}
diff --git a/command/analyzer/dryrun/process.go b/command/analyzer/dryrun/process.go
new file mode 100644
index 00000000..81a2b922
--- /dev/null
+++ b/command/analyzer/dryrun/process.go
@@ -0,0 +1,42 @@
+package dryrun
+
+import (
+ "encoding/json"
+
+ "github.com/deepsourcelabs/cli/analysis/processor"
+ "github.com/deepsourcelabs/cli/analysis/processor/processors"
+ "github.com/deepsourcelabs/cli/types"
+)
+
+// processAnalyzerReport processes the analysis report generated by the Analyzer
+func (a *AnalyzerDryRun) processAnalyzerReport(reportBytes []byte) (types.AnalysisResult, error) {
+ report := types.AnalyzerReport{}
+
+ // Creating instances of skipcq and source code highlighting processors
+ skipCQProcessor := processors.ProcSkipCQ{}
+ sourceCodeHighlightingProcessor := processors.ProcSourceCodeLoad{}
+
+ // Initializing the processors using the IProcessor interface provided by the `processor` package
+ var skip_cq, source_code_load processor.IProcessor
+
+ // Assigning the instances to the IProcessor interface
+ skip_cq = skipCQProcessor
+ source_code_load = sourceCodeHighlightingProcessor
+
+ // Start the processors workflow
+ processor := processor.ReportProcessor{
+ LocalSourcePath: a.SourcePath,
+ ContainerCodePath: a.Client.AnalysisOpts.ContainerCodePath,
+ Processors: []processor.IProcessor{source_code_load, skip_cq},
+ }
+
+ // Generate the silencers regexMap.
+ processors.GenerateSilencersRegexMap()
+ if err := json.Unmarshal(reportBytes, &report); err != nil {
+ return types.AnalysisResult{}, err
+ }
+
+ processor.Report = report
+ analysisResult := processor.Process()
+ return analysisResult, nil
+}
diff --git a/command/analyzer/dryrun/render/category.go b/command/analyzer/dryrun/render/category.go
new file mode 100644
index 00000000..82c6f573
--- /dev/null
+++ b/command/analyzer/dryrun/render/category.go
@@ -0,0 +1,26 @@
+package render
+
+// fetchIssueCategoryData creates a map of issue category to issue occurences count of that category.
+func (r *ResultRenderOpts) fetchIssueCategoryData() {
+ // Iterate over the map and then keep adding the issue counts.
+ issueCategoryMap := make(map[string]int)
+
+ // Creating a map of issue categories present to their count.
+ for _, occurenceData := range r.AnalysisResultData.IssuesOccurenceMap {
+ if _, ok := issueCategoryMap[occurenceData.IssueMeta.Category]; !ok {
+ issueCategoryMap[occurenceData.IssueMeta.Category] = len(occurenceData.Occurences)
+ continue
+ }
+ issueCategoryMap[occurenceData.IssueMeta.Category] = issueCategoryMap[occurenceData.IssueMeta.Category] + len(occurenceData.Occurences)
+ }
+
+ // Add remaining categories to the map other than what are reported in the issues by the Analyzer since
+ // need to render all the categories.
+ for categoryShortcode := range r.IssueCategoryNameMap {
+ if _, ok := issueCategoryMap[categoryShortcode]; !ok {
+ issueCategoryMap[categoryShortcode] = 0
+ continue
+ }
+ }
+ r.AnalysisResultData.IssueCategoryCountMap = issueCategoryMap
+}
diff --git a/command/analyzer/dryrun/render/metrics.go b/command/analyzer/dryrun/render/metrics.go
new file mode 100644
index 00000000..25fcf920
--- /dev/null
+++ b/command/analyzer/dryrun/render/metrics.go
@@ -0,0 +1,14 @@
+package render
+
+// fetchIssueMetricsData fetches the metrics data to be rendered.
+func (r *ResultRenderOpts) fetchIssueMetricsData() {
+ metricsMap := make(map[string]float64)
+ for _, metric := range r.AnalysisResultData.AnalysisResult.Metrics {
+ if _, ok := r.MetricNameMap[metric.MetricCode]; !ok {
+ continue
+ }
+ metricName := r.MetricNameMap[metric.MetricCode]
+ metricsMap[metricName] = metric.Namespaces[0].Value
+ }
+ r.AnalysisResultData.MetricsMap = metricsMap
+}
diff --git a/command/analyzer/dryrun/render/occurences.go b/command/analyzer/dryrun/render/occurences.go
new file mode 100644
index 00000000..b91ca293
--- /dev/null
+++ b/command/analyzer/dryrun/render/occurences.go
@@ -0,0 +1,87 @@
+package render
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+// fetchIssueOccurencesData collects all the occurence related data.
+func (r *ResultRenderOpts) fetchIssueOccurencesData(cwd string) {
+ // Create a map of occurences of the issues.
+ issueOccurenceMap := make(map[string]OccurenceData)
+
+ // Iterate over the analysis result issues.
+ for _, issue := range r.AnalysisResultData.AnalysisResult.Issues {
+ currentOccurence := OccurenceData{}
+
+ // Fix path of the issues(remove cwd prefix from them).
+ issue.Location.Path = strings.TrimPrefix(issue.Location.Path, r.AnalysisResultData.SourcePath+string(os.PathSeparator))
+
+ if _, ok := issueOccurenceMap[issue.IssueCode]; !ok {
+ // Fetch issue meta for the issue code raised.
+ issueMeta, err := getIssueMeta(cwd, issue.IssueCode)
+ if err != nil {
+ fmt.Println("Couldn't resolve issue meta for the issue:", issue.IssueCode)
+ continue
+ }
+ currentOccurence = OccurenceData{
+ IssueMeta: issueMeta,
+ }
+ currentOccurence.Occurences = append(currentOccurence.Occurences, issue)
+ currentOccurence.Files = append(currentOccurence.Files, issue.Location.Path)
+ issueOccurenceMap[issue.IssueCode] = currentOccurence
+ continue
+ }
+
+ // Get past occurences and append to it since maps don't allow direct append to a slice value.
+ pastOccurences := issueOccurenceMap[issue.IssueCode]
+ currentOccurence.IssueMeta = pastOccurences.IssueMeta
+ currentOccurence.Occurences = append(pastOccurences.Occurences, issue)
+ currentOccurence.Files = append(pastOccurences.Files, issue.Location.Path)
+ issueOccurenceMap[issue.IssueCode] = currentOccurence
+ }
+
+ // Remove duplicates from the files array.
+ for issueCode, occurenceData := range issueOccurenceMap {
+ filesMap := make(map[string]int, 0)
+ uniqueFiles := make([]string, 0)
+
+ // Setting the map value to 1 for the files in order to identify unique files.
+ for _, file := range occurenceData.Files {
+ filesMap[file] = 1
+ }
+
+ // Creating a slice of unique files.
+ for file := range filesMap {
+ uniqueFiles = append(uniqueFiles, file)
+ }
+
+ // Assign the unique files slice to the map.
+ issueOccurence := issueOccurenceMap[issueCode]
+ issueOccurence.Files = uniqueFiles
+ }
+
+ // Create the files information string.
+ for issueCode, occurenceData := range issueOccurenceMap {
+ switch len(occurenceData.Files) - 1 {
+ case 0:
+ occurenceData.FilesInfo = fmt.Sprintf("Found in %s", occurenceData.Files[0])
+ case 1:
+ occurenceData.FilesInfo = fmt.Sprintf("Found in %s and %d other file", occurenceData.Files[0], len(occurenceData.Files)-1)
+ default:
+ occurenceData.FilesInfo = fmt.Sprintf("Found in %s and %d other files", occurenceData.Files[0], len(occurenceData.Files)-1)
+ }
+ issueOccurenceMap[issueCode] = occurenceData
+ }
+ // Assign the value of local IssueOccurenceMap to global struct field.
+ r.AnalysisResultData.IssuesOccurenceMap = issueOccurenceMap
+
+ // Find out total number of occurences of all the issues.
+ for _, v := range issueOccurenceMap {
+ r.AnalysisResultData.TotalOccurences = r.AnalysisResultData.TotalOccurences + len(v.Occurences)
+ }
+
+ // Finds the unique issues count(the length of the occurences map since its mapped by issue codes which are unique).
+ r.AnalysisResultData.UniqueIssuesCount = len(r.AnalysisResultData.IssuesOccurenceMap)
+}
diff --git a/command/analyzer/dryrun/render/render.go b/command/analyzer/dryrun/render/render.go
new file mode 100644
index 00000000..3eef16d8
--- /dev/null
+++ b/command/analyzer/dryrun/render/render.go
@@ -0,0 +1,88 @@
+package render
+
+import (
+ "context"
+ "embed"
+ "fmt"
+ "html/template"
+ "io/fs"
+ "net/http"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/cli/browser"
+ "github.com/pterm/pterm"
+)
+
+/* Embedding the required files in views folder
+ into the binary using go embed */
+
+//go:embed views/*.html views/assets
+var tmplFS embed.FS
+
+// renderResultsOnBrowser renders the results on the browser through a local server,
+// go template and an awesome frontend.
+func (r *ResultRenderOpts) RenderResultsOnBrowser(server IRenderServer) (err error) {
+ // Collect all other data to be rendered.
+ r.collectResultToBeRendered()
+
+ // In order to serve the static css files, this creates a handler to serve any static assets stored under
+ // `views/` at `/static/assets/*`.
+ fsys, err := fs.Sub(tmplFS, "views")
+ if err != nil {
+ return err
+ }
+
+ // Parse the HTML templates.
+ r.Template = template.Must(template.ParseFS(tmplFS, "views/*.html"))
+
+ // Define the routes using echo and start the server.
+ r.EchoServer = server.GetEchoContext()
+ server.DeclareRoutes(http.FS((fsys)))
+ serverPort := getServerPort()
+
+ // Spawn the server in a goroutine.
+ go func() {
+ if err := r.EchoServer.Start(fmt.Sprintf(":%s", serverPort)); err != nil && err != http.ErrServerClosed {
+ r.EchoServer.Logger.Fatal("Shutting down the server")
+ }
+ }()
+ pterm.Success.Printf("Analysis results live at http://localhost:%s..\n", serverPort)
+
+ // Having received the user code, open the browser at the localhost.
+ browser.OpenURL(fmt.Sprintf("http://localhost:%s", serverPort))
+
+ // Wait for interrupt signal to gracefully shutdown the server with a timeout of 10 seconds.
+ // Use a buffered channel to avoid missing signals as recommended for signal.Notify
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, os.Interrupt)
+ <-quit
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ return r.EchoServer.Shutdown(ctx)
+}
+
+// collectResultToBeRendered formats all the result received after post-processing and then adds the
+// extra data required for rendering on the browser
+func (r *ResultRenderOpts) collectResultToBeRendered() {
+ cwd, _ := os.Getwd()
+
+ // Fetch the run summary data.
+ r.Summary.RunDuration, r.Summary.TimeSinceRun = fetchRunSummary(r.Summary.AnalysisStartTime, r.Summary.AnalysisEndTime)
+
+ // Inject the Analyzer VCS information.
+ r.VCSInfo.Branch, r.VCSInfo.CommitSHA = fetchVCSDetails(cwd)
+
+ // Fetch the data as to status of Analyzer w.r.t the latest version/tag.
+ r.VCSInfo.VersionDiff = fetchAnalyzerVCSData(cwd)
+
+ // Get occurence data.
+ r.fetchIssueOccurencesData(cwd)
+
+ // Get the category data.
+ r.fetchIssueCategoryData()
+
+ // Fetch metrics data.
+ r.fetchIssueMetricsData()
+}
diff --git a/command/analyzer/dryrun/render/route.go b/command/analyzer/dryrun/render/route.go
new file mode 100644
index 00000000..dde9008b
--- /dev/null
+++ b/command/analyzer/dryrun/render/route.go
@@ -0,0 +1,78 @@
+package render
+
+import (
+ "fmt"
+ "html/template"
+ "net/http"
+
+ "github.com/labstack/echo/v4"
+)
+
+type IRenderServer interface {
+ GetEchoContext() *echo.Echo
+ DeclareRoutes(http.FileSystem)
+}
+
+/* Declared Routes:
+ * /
+ * /issues
+ * /issue/{issue_code}/occurences
+ * /issues?category=all
+ * /issues?category={issue_category} */
+
+// getEchoContext returns a new Echo server instance.
+func (*ResultRenderOpts) GetEchoContext() *echo.Echo {
+ e := echo.New()
+ e.HideBanner = true
+ return e
+}
+
+// declareRoutes declares routes for various incoming requests to the Analyzer dry run local server.
+func (r *ResultRenderOpts) DeclareRoutes(staticFS http.FileSystem) {
+ // Issues page containing all the reported issues.
+ r.EchoServer.GET("/", r.IssuesHandler)
+ r.EchoServer.GET("/issues", r.IssuesHandler)
+
+ // Handle serving static assets.
+ assetHandler := http.FileServer(staticFS)
+ r.EchoServer.GET("/static/*", echo.WrapHandler(http.StripPrefix("/static/", assetHandler)))
+
+ // Handle showing issues for a certain category.
+ r.EchoServer.GET("/issue/:issue_code/occurences", r.IssueOccurencesHandler)
+}
+
+// IssuesHandler handles serving the list of issues reported
+func (r *ResultRenderOpts) IssuesHandler(c echo.Context) error {
+ // Check URL query parameters
+ qParams := c.QueryParams()
+
+ if qParams.Has("category") {
+ r.SelectedCategory = qParams.Get("category")
+ } else {
+ r.SelectedCategory = "all"
+ }
+
+ err := r.Template.ExecuteTemplate(c.Response().Writer, "index.html", *r)
+ if err != nil {
+ fmt.Println(err)
+ return c.String(http.StatusInternalServerError, err.Error())
+ }
+ return c.NoContent(http.StatusOK)
+}
+
+// IssuesOccurencesHandler handles serving the issue occurences.
+func (r *ResultRenderOpts) IssueOccurencesHandler(c echo.Context) error {
+ // Fetch the issue code from URI.
+ r.SelectedIssueCode = c.Param("issue_code")
+
+ issueOccurences := r.AnalysisResultData.IssuesOccurenceMap[r.SelectedIssueCode]
+ for _, occurence := range issueOccurences.Occurences {
+ r.AnalysisResultData.RenderedSourceCode = append(r.AnalysisResultData.RenderedSourceCode, template.HTML(occurence.ProcessedData.SourceCode.Rendered)) // skipcq: GSC-G203
+ }
+ err := r.Template.ExecuteTemplate(c.Response().Writer, "occurence.html", *r)
+ if err != nil {
+ fmt.Println(err)
+ return c.String(http.StatusInternalServerError, err.Error())
+ }
+ return c.NoContent(http.StatusOK)
+}
diff --git a/command/analyzer/dryrun/render/types.go b/command/analyzer/dryrun/render/types.go
new file mode 100644
index 00000000..e6340fb9
--- /dev/null
+++ b/command/analyzer/dryrun/render/types.go
@@ -0,0 +1,54 @@
+package render
+
+import (
+ "html/template"
+ "time"
+
+ "github.com/deepsourcelabs/cli/types"
+ "github.com/labstack/echo/v4"
+)
+
+type RunSummary struct {
+ RunDuration string // Time taken to complete analysis.
+ TimeSinceRun string // Time elapsed since the completion of the analysis run.
+ AnalysisStartTime time.Time
+ AnalysisEndTime time.Time
+}
+
+type VCSInfo struct {
+ Branch string // VCS branch of the Analyzer.
+ CommitSHA string // The latest commit SHA of the Analyzer.
+ VersionDiff string // The string specifying the status of Analyzer w.r.t previous version.
+}
+
+type OccurenceData struct {
+ IssueMeta types.AnalyzerIssue // Contains the data stored in issue TOMLs for the respective issue.
+ Files []string // Files where this issue has been reported.
+ FilesInfo string // The string containing the data of which files the issue has been reported in.
+ Occurences []types.Issue // The slice of occurences for a certain issue code.
+}
+
+type ResultData struct {
+ UniqueIssuesCount int // The unique issues count.
+ TotalOccurences int // Total issues reported by the Analyzer.
+ SourcePath string // The path where the source code to be analyzer is stored.
+ IssuesOccurenceMap map[string]OccurenceData // The map of issue code to its occurences data.
+ IssueCategoryCountMap map[string]int // The map of issue category to the count of the issues of that category.
+ AnalysisResult types.AnalysisResult // The analysis result post running processors.
+ MetricsMap map[string]float64 // The map of metric names to their values.
+ RenderedSourceCode []template.HTML // The slice containing the source code snippets for each occurence.
+}
+
+type ResultRenderOpts struct {
+ EchoServer *echo.Echo // The Echo server instance to run the renderer server.
+ Template *template.Template // The go template field so that it can be accessible in `route.go` as well.
+ PageTitle string // The title of the HTML page.
+ AnalyzerShortcode string // The shortcode of the Analyzer.
+ VCSInfo VCSInfo // The VCS information of the Analyzer.
+ Summary RunSummary // The run summary.
+ AnalysisResultData ResultData // The analysis result data.
+ SelectedIssueCode string // The field used to recognize which issue code the user has clicked on to check its occurences.
+ SelectedCategory string // The field used to recognize which category the user has clicked to filter the issues based on it.
+ IssueCategoryNameMap map[string]string // The map used to route category names to their codes. Eg: `Documentation`->`doc`.
+ MetricNameMap map[string]string // The map of metrics shortcodes with their names.
+}
diff --git a/command/analyzer/dryrun/render/utils.go b/command/analyzer/dryrun/render/utils.go
new file mode 100644
index 00000000..9b30fa93
--- /dev/null
+++ b/command/analyzer/dryrun/render/utils.go
@@ -0,0 +1,217 @@
+package render
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/deepsourcelabs/cli/types"
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/hako/durafmt"
+ "github.com/pelletier/go-toml/v2"
+ "github.com/yuin/goldmark"
+)
+
+// fetchRunSummary fetches the data for the run summary section involving the time since latest run
+// and the run duration.
+func fetchRunSummary(startTime, endTime time.Time) (string, string) {
+ // Find the time elapsed since the analysis run.
+ timeSinceRun := fmt.Sprintf("%s ago", durafmt.Parse(time.Since(startTime)).LimitFirstN(1).String())
+
+ // Find the run duration i.e. the time between the analysis start and end time.
+ runDuration := durafmt.Parse(endTime.Sub(startTime)).LimitFirstN(1).String()
+ return runDuration, timeSinceRun
+}
+
+// fetchHeadManually fetches the latest commit hash using the command `git rev-parse HEAD`
+// through go-git.
+func fetchHeadManually(directoryPath string) (string, error) {
+ gitOpts := &git.PlainOpenOptions{
+ DetectDotGit: true,
+ }
+
+ // Open a new repository targeting the given path (the .git folder)
+ repo, err := git.PlainOpenWithOptions(directoryPath, gitOpts)
+ if err != nil {
+ return "", err
+ }
+
+ // Resolve revision into a sha1 commit
+ commitHash, err := repo.ResolveRevision(plumbing.Revision("HEAD"))
+ if err != nil {
+ return "", err
+ }
+ return commitHash.String(), nil
+}
+
+// fetchVCSDetails fetches the VCS details to be shown on the dashboard.
+func fetchVCSDetails(dir string) (string, string) {
+ branch := ""
+ latestCommitHash := ""
+
+ repo, err := git.PlainOpen(dir)
+ if err != nil {
+ return "", ""
+ }
+
+ // Fetch the repository HEAD reference.
+ headRef, _ := repo.Head()
+
+ // Fetch the commit SHA of the latest commit
+ latestCommitHash, _ = fetchHeadManually(dir)
+
+ // Fetch the branch name.
+ branchData := headRef.String()
+ branch = branchData[strings.LastIndex(branchData, "/")+1:]
+
+ return branch, latestCommitHash[:7]
+}
+
+// fetchAnalyzerVCSDetails fetches Analyzer VCS details like how many commits is the Analyzer
+// ahead of the latest git tag.
+func fetchAnalyzerVCSData(dir string) string {
+ // Open the Analyzer's git directory.
+ repo, err := git.PlainOpenWithOptions(dir, &git.PlainOpenOptions{
+ DetectDotGit: true,
+ })
+ if err != nil {
+ fmt.Println(err)
+ return ""
+ }
+
+ // Fetch the repo tags list.
+ tagReferences, _ := repo.Tags()
+ currentTagRef := []string{}
+ if err = tagReferences.ForEach(func(t *plumbing.Reference) error {
+ currentTagRef = append(currentTagRef, t.Name().String())
+ return nil
+ }); err != nil {
+ fmt.Println(err)
+ return ""
+ }
+
+ // currentTagRef slice is empty if there are not tags in the Analyzer git directory.
+ if len(currentTagRef) == 0 {
+ return ""
+ }
+
+ // Convert refs/tags/v0.2.1 -> v0.2.1
+ latestTag := strings.TrimPrefix(currentTagRef[len(currentTagRef)-1], "refs/tags/")
+
+ // Fetch the iterator to the tag objects latest git tag.
+ tagsIter, _ := repo.TagObjects()
+
+ // Fetch the current tag and the commit pointed by the current tag.
+ currentTag := ""
+ var currentCommitSHA plumbing.Hash
+ var currentTagPushTime time.Time
+ if err = tagsIter.ForEach(func(t *object.Tag) (err error) {
+ if t.Name != latestTag {
+ return nil
+ }
+ currentTag = t.Name
+ commit, err := t.Commit()
+ if err != nil {
+ fmt.Println(err)
+ return err
+ }
+
+ // Finds the hash of the commit and the timestamp of when the commit was pushed.
+ currentCommitSHA = commit.Hash
+ currentTagPushTime = commit.Author.When
+ return nil
+ }); err != nil {
+ fmt.Println(err)
+ return ""
+ }
+
+ // Retrieve the commit history from the current tag.
+ commitIter, err := repo.Log(&git.LogOptions{
+ Order: git.LogOrderCommitterTime,
+ Since: ¤tTagPushTime,
+ })
+ if err != nil {
+ fmt.Println(err)
+ return ""
+ }
+
+ // Just iterates over the commits and finds the count of how many commits have been
+ // made since the current git tag.
+ commitsSinceCurrentTag := 0
+ if err = commitIter.ForEach(func(c *object.Commit) error {
+ if c.Hash == currentCommitSHA {
+ return nil
+ }
+ commitsSinceCurrentTag++
+ return nil
+ }); err != nil {
+ fmt.Println(err)
+ return ""
+ }
+
+ // Return the Analyzer diff info.
+ switch commitsSinceCurrentTag {
+ case 0:
+ return fmt.Sprintf("This Analyzer is up to date with %s", currentTag)
+ case 1:
+ return fmt.Sprintf("This Analyzer is %d commit ahead of %s", commitsSinceCurrentTag, currentTag)
+ }
+
+ return fmt.Sprintf("This Analyzer is %d commits ahead of %s", commitsSinceCurrentTag, currentTag)
+}
+
+// getServerPort returns the port used to render the server.
+func getServerPort() string {
+ serverPort := ":8080"
+
+ // Check if the default port(8080) is available.
+ listener, err := net.Listen("tcp", serverPort)
+ if err == nil {
+ // Close the listener if it starts to listen on the default port.
+ listener.Close()
+ return strings.TrimPrefix(serverPort, ":")
+ }
+
+ // If the port is busy, get a new port.
+ listener, _ = net.Listen("tcp", ":0")
+ // Close the listener if it starts to listen on the default port.
+ serverPort = strings.TrimPrefix(listener.Addr().String(), "[::]:")
+ listener.Close()
+ return serverPort
+}
+
+// getIssueMeta receives the issuecode that is raised and it reads the TOML of that issue and returns
+// its details configured in the TOML like title, description and category.
+func getIssueMeta(cwd, issueCode string) (types.AnalyzerIssue, error) {
+ analyzerIssue := types.AnalyzerIssue{}
+ // Read the toml file of the issue in .deepsource/analyzer/issues directory
+ issueFilePath := filepath.Join(cwd, ".deepsource/analyzer/issues", fmt.Sprintf("%s.toml", issueCode))
+
+ // Read the issue and populate the data of issue category and description
+ issueData, err := os.ReadFile(issueFilePath)
+ if err != nil {
+ return analyzerIssue, err
+ }
+
+ // Unmarshal the data from the issue TOMLs into the struct
+ if err = toml.Unmarshal(issueData, &analyzerIssue); err != nil {
+ return analyzerIssue, err
+ }
+
+ // Parsing the markdown issue description and passing it as an HTML string.
+ var buf bytes.Buffer
+ if err := goldmark.Convert([]byte(analyzerIssue.Description), &buf); err != nil {
+ return types.AnalyzerIssue{}, err
+ }
+
+ // Goldmark already provides a secure HTML. Ref: https://github.com/yuin/goldmark#security
+ analyzerIssue.HTMLDescription = template.HTML(buf.String()) // skipcq: GSC-G203
+ return analyzerIssue, nil
+}
diff --git a/command/analyzer/dryrun/render/views/assets/cli.css b/command/analyzer/dryrun/render/views/assets/cli.css
new file mode 100644
index 00000000..268cfeb0
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/cli.css
@@ -0,0 +1,285 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+.button {
+ @apply text-xs h-full py-2 px-4 gap-2 flex items-center rounded-sm;
+}
+
+.sidebar {
+ @apply text-sm p-2 h-8 gap-2 flex items-center rounded-sm hover:bg-ink-300 w-full text-vanilla-400 hover:text-vanilla-100;
+}
+
+.sidebar.selected {
+ @apply bg-ink-200 hover:bg-ink-300 text-vanilla-100 hover:text-vanilla-100;
+}
+
+.sidebar .tag {
+ @apply px-2 text-xs rounded-full py-1 leading-none bg-ink-200 justify-self-end;
+}
+
+.sidebar .label {
+ @apply leading-none flex-grow text-left;
+}
+
+.info {
+ @apply flex items-center gap-1 text-sm text-vanilla-400;
+}
+
+.highlight .hll,
+.highlight .hl {
+ background-color: #21242c;
+ display: block;
+}
+.highlight .hlr {
+ background-color: #582c27;
+ display: inherit;
+}
+.highlight .hlg {
+ background-color: #004f41;
+ display: inherit;
+}
+.highlight .c {
+ color: #75715e;
+} /* Comment */
+.highlight .err {
+ color: #960050;
+ background-color: #1e0010;
+} /* Error */
+.highlight .k {
+ color: #66d9ef;
+} /* Keyword */
+.highlight .l {
+ color: #ae81ff;
+} /* Literal */
+.highlight .n {
+ color: #f8f8f2;
+} /* Name */
+.highlight .o {
+ color: #f92672;
+} /* Operator */
+.highlight .p {
+ color: #f8f8f2;
+} /* Punctuation */
+.highlight .cm {
+ color: #75715e;
+} /* Comment.Multiline */
+.highlight .cp {
+ color: #75715e;
+} /* Comment.Preproc */
+.highlight .c1,
+.highlight .ch {
+ color: #75715e;
+} /* Comment.Single */
+.highlight .cs {
+ color: #75715e;
+} /* Comment.Special */
+.highlight .ge {
+ font-style: italic;
+} /* Generic.Emph */
+.highlight .gs {
+ font-weight: bold;
+} /* Generic.Strong */
+.highlight .kc {
+ color: #66d9ef;
+} /* Keyword.Constant */
+.highlight .kd {
+ color: #66d9ef;
+} /* Keyword.Declaration */
+.highlight .kn {
+ color: #f92672;
+} /* Keyword.Namespace */
+.highlight .kp {
+ color: #66d9ef;
+} /* Keyword.Pseudo */
+.highlight .kr {
+ color: #66d9ef;
+} /* Keyword.Reserved */
+.highlight .kt {
+ color: #66d9ef;
+} /* Keyword.Type */
+.highlight .ld {
+ color: #e6db74;
+} /* Literal.Date */
+.highlight .m {
+ color: #ae81ff;
+} /* Literal.Number */
+.highlight .s {
+ color: #e6db74;
+} /* Literal.String */
+.highlight .na {
+ color: #a6e22e;
+} /* Name.Attribute */
+.highlight .nb {
+ color: #f8f8f2;
+} /* Name.Builtin */
+.highlight .nc {
+ color: #a6e22e;
+} /* Name.Class */
+.highlight .no {
+ color: #66d9ef;
+} /* Name.Constant */
+.highlight .nd {
+ color: #a6e22e;
+} /* Name.Decorator */
+.highlight .ni {
+ color: #f8f8f2;
+} /* Name.Entity */
+.highlight .ne {
+ color: #a6e22e;
+} /* Name.Exception */
+.highlight .nf {
+ color: #a6e22e;
+} /* Name.Function */
+.highlight .fm {
+ color: #a6e22e;
+} /* Name.SpecialFunction */
+.highlight .nl {
+ color: #f8f8f2;
+} /* Name.Label */
+.highlight .nn {
+ color: #f8f8f2;
+} /* Name.Namespace */
+.highlight .nx {
+ color: #a6e22e;
+} /* Name.Other */
+.highlight .py {
+ color: #f8f8f2;
+} /* Name.Property */
+.highlight .nt {
+ color: #f92672;
+} /* Name.Tag */
+.highlight .nv {
+ color: #f8f8f2;
+} /* Name.Variable */
+.highlight .ow {
+ color: #f92672;
+} /* Operator.Word */
+.highlight .w {
+ color: #f8f8f2;
+} /* Text.Whitespace */
+.highlight .mf {
+ color: #ae81ff;
+} /* Literal.Number.Float */
+.highlight .mh {
+ color: #ae81ff;
+} /* Literal.Number.Hex */
+.highlight .mi {
+ color: #ae81ff;
+} /* Literal.Number.Integer */
+.highlight .mo {
+ color: #ae81ff;
+} /* Literal.Number.Oct */
+.highlight .sb {
+ color: #e6db74;
+} /* Literal.String.Backtick */
+.highlight .sc {
+ color: #e6db74;
+} /* Literal.String.Char */
+.highlight .sd {
+ color: #e6db74;
+} /* Literal.String.Doc */
+.highlight .s2 {
+ color: #e6db74;
+} /* Literal.String.Double */
+.highlight .se {
+ color: #ae81ff;
+} /* Literal.String.Escape */
+.highlight .sh {
+ color: #e6db74;
+} /* Literal.String.Heredoc */
+.highlight .si {
+ color: #e6db74;
+} /* Literal.String.Interpol */
+.highlight .sx {
+ color: #e6db74;
+} /* Literal.String.Other */
+.highlight .sr {
+ color: #e6db74;
+} /* Literal.String.Regex */
+.highlight .s1 {
+ color: #e6db74;
+} /* Literal.String.Single */
+.highlight .ss {
+ color: #e6db74;
+} /* Literal.String.Symbol */
+.highlight .bp {
+ color: #f8f8f2;
+} /* Name.Builtin.Pseudo */
+.highlight .vc {
+ color: #f8f8f2;
+} /* Name.Variable.Class */
+.highlight .vg {
+ color: #f8f8f2;
+} /* Name.Variable.Global */
+.highlight .vi {
+ color: #f8f8f2;
+} /* Name.Variable.Instance */
+.highlight .il {
+ color: #ae81ff;
+} /* Literal.Number.Integer.Long */
+.highlight .vm {
+ color: #a6e22e;
+}
+
+.highlight .gh {
+} /* Generic Heading & Diff Header */
+.highlight .gu {
+ color: #75715e;
+} /* Generic.Subheading & Diff Unified/Comment? */
+.highlight .gd {
+ color: #f92672;
+} /* Generic.Deleted & Diff Deleted */
+.highlight .gi {
+ color: #a6e22e;
+} /* Generic.Inserted & Diff Inserted */
+
+.highlight .ln {
+ padding-right: 0.75rem;
+ padding-left: 0.75rem;
+ color: #6a737d;
+}
+
+.highlight .hl .ln {
+ color: #c0c1c3;
+}
+
+.highlight .hl .ln,
+.highlight .hll .ln {
+ background-color: #21242c;
+}
+.highlight .hlg .ln {
+ background-color: #004f41;
+}
+.highlight .hlr .ln {
+ background-color: #582c27;
+}
+
+.highlight pre {
+ font-size: 13px;
+ padding-top: 0.75rem;
+ padding-bottom: 0.5rem;
+ color: #ccc;
+ overflow-x: auto;
+ position: relative;
+}
+
+.highlight :not(.prose) pre {
+ background-color: #16181d !important;
+}
+
+.highlighttable {
+ background: #16181d;
+ width: 100%;
+}
+.highlighttable td.code {
+ width: 100%;
+ padding-left: 0.75rem;
+}
+.highlighttable .linenos {
+ padding-left: 0.75rem;
+ color: #6a737d;
+}
+.highlighttable .linenos pre {
+ font-size: 13px;
+}
diff --git a/command/analyzer/dryrun/render/views/assets/deepsource-cli.css b/command/analyzer/dryrun/render/views/assets/deepsource-cli.css
new file mode 100644
index 00000000..1d1623d3
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/deepsource-cli.css
@@ -0,0 +1,2326 @@
+/*
+! tailwindcss v3.1.4 | MIT License | https://tailwindcss.com
+*/
+
+/*
+1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
+2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
+*/
+
+*,
+::before,
+::after {
+ box-sizing: border-box;
+ /* 1 */
+ border-width: 0;
+ /* 2 */
+ border-style: solid;
+ /* 2 */
+ border-color: currentColor;
+ /* 2 */
+}
+
+::before,
+::after {
+ --tw-content: '';
+}
+
+/*
+1. Use a consistent sensible line-height in all browsers.
+2. Prevent adjustments of font size after orientation changes in iOS.
+3. Use a more readable tab size.
+4. Use the user's configured `sans` font-family by default.
+*/
+
+html {
+ line-height: 1.5;
+ /* 1 */
+ -webkit-text-size-adjust: 100%;
+ /* 2 */
+ -moz-tab-size: 4;
+ /* 3 */
+ -o-tab-size: 4;
+ tab-size: 4;
+ /* 3 */
+ font-family: Inter, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
+ /* 4 */
+}
+
+/*
+1. Remove the margin in all browsers.
+2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
+*/
+
+body {
+ margin: 0;
+ /* 1 */
+ line-height: inherit;
+ /* 2 */
+}
+
+/*
+1. Add the correct height in Firefox.
+2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
+3. Ensure horizontal rules are visible by default.
+*/
+
+hr {
+ height: 0;
+ /* 1 */
+ color: inherit;
+ /* 2 */
+ border-top-width: 1px;
+ /* 3 */
+}
+
+/*
+Add the correct text decoration in Chrome, Edge, and Safari.
+*/
+
+abbr:where([title]) {
+ -webkit-text-decoration: underline dotted;
+ text-decoration: underline dotted;
+}
+
+/*
+Remove the default font size and weight for headings.
+*/
+
+h1,
+h2,
+h3,
+h4,
+h5,
+h6 {
+ font-size: inherit;
+ font-weight: inherit;
+}
+
+/*
+Reset links to optimize for opt-in styling instead of opt-out.
+*/
+
+a {
+ color: inherit;
+ text-decoration: inherit;
+}
+
+/*
+Add the correct font weight in Edge and Safari.
+*/
+
+b,
+strong {
+ font-weight: bolder;
+}
+
+/*
+1. Use the user's configured `mono` font family by default.
+2. Correct the odd `em` font sizing in all browsers.
+*/
+
+code,
+kbd,
+samp,
+pre {
+ font-family: ui-monospace, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
+ /* 1 */
+ font-size: 1em;
+ /* 2 */
+}
+
+/*
+Add the correct font size in all browsers.
+*/
+
+small {
+ font-size: 80%;
+}
+
+/*
+Prevent `sub` and `sup` elements from affecting the line height in all browsers.
+*/
+
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline;
+}
+
+sub {
+ bottom: -0.25em;
+}
+
+sup {
+ top: -0.5em;
+}
+
+/*
+1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
+2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
+3. Remove gaps between table borders by default.
+*/
+
+table {
+ text-indent: 0;
+ /* 1 */
+ border-color: inherit;
+ /* 2 */
+ border-collapse: collapse;
+ /* 3 */
+}
+
+/*
+1. Change the font styles in all browsers.
+2. Remove the margin in Firefox and Safari.
+3. Remove default padding in all browsers.
+*/
+
+button,
+input,
+optgroup,
+select,
+textarea {
+ font-family: inherit;
+ /* 1 */
+ font-size: 100%;
+ /* 1 */
+ font-weight: inherit;
+ /* 1 */
+ line-height: inherit;
+ /* 1 */
+ color: inherit;
+ /* 1 */
+ margin: 0;
+ /* 2 */
+ padding: 0;
+ /* 3 */
+}
+
+/*
+Remove the inheritance of text transform in Edge and Firefox.
+*/
+
+button,
+select {
+ text-transform: none;
+}
+
+/*
+1. Correct the inability to style clickable types in iOS and Safari.
+2. Remove default button styles.
+*/
+
+button,
+[type='button'],
+[type='reset'],
+[type='submit'] {
+ -webkit-appearance: button;
+ /* 1 */
+ background-color: transparent;
+ /* 2 */
+ background-image: none;
+ /* 2 */
+}
+
+/*
+Use the modern Firefox focus style for all focusable elements.
+*/
+
+:-moz-focusring {
+ outline: auto;
+}
+
+/*
+Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
+*/
+
+:-moz-ui-invalid {
+ box-shadow: none;
+}
+
+/*
+Add the correct vertical alignment in Chrome and Firefox.
+*/
+
+progress {
+ vertical-align: baseline;
+}
+
+/*
+Correct the cursor style of increment and decrement buttons in Safari.
+*/
+
+::-webkit-inner-spin-button,
+::-webkit-outer-spin-button {
+ height: auto;
+}
+
+/*
+1. Correct the odd appearance in Chrome and Safari.
+2. Correct the outline style in Safari.
+*/
+
+[type='search'] {
+ -webkit-appearance: textfield;
+ /* 1 */
+ outline-offset: -2px;
+ /* 2 */
+}
+
+/*
+Remove the inner padding in Chrome and Safari on macOS.
+*/
+
+::-webkit-search-decoration {
+ -webkit-appearance: none;
+}
+
+/*
+1. Correct the inability to style clickable types in iOS and Safari.
+2. Change font properties to `inherit` in Safari.
+*/
+
+::-webkit-file-upload-button {
+ -webkit-appearance: button;
+ /* 1 */
+ font: inherit;
+ /* 2 */
+}
+
+/*
+Add the correct display in Chrome and Safari.
+*/
+
+summary {
+ display: list-item;
+}
+
+/*
+Removes the default spacing and border for appropriate elements.
+*/
+
+blockquote,
+dl,
+dd,
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+hr,
+figure,
+p,
+pre {
+ margin: 0;
+}
+
+fieldset {
+ margin: 0;
+ padding: 0;
+}
+
+legend {
+ padding: 0;
+}
+
+ol,
+ul,
+menu {
+ list-style: none;
+ margin: 0;
+ padding: 0;
+}
+
+/*
+Prevent resizing textareas horizontally by default.
+*/
+
+textarea {
+ resize: vertical;
+}
+
+/*
+1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
+2. Set the default placeholder color to the user's configured gray 400 color.
+*/
+
+input::-moz-placeholder, textarea::-moz-placeholder {
+ opacity: 1;
+ /* 1 */
+ color: #9ca3af;
+ /* 2 */
+}
+
+input:-ms-input-placeholder, textarea:-ms-input-placeholder {
+ opacity: 1;
+ /* 1 */
+ color: #9ca3af;
+ /* 2 */
+}
+
+input::placeholder,
+textarea::placeholder {
+ opacity: 1;
+ /* 1 */
+ color: #9ca3af;
+ /* 2 */
+}
+
+/*
+Set the default cursor for buttons.
+*/
+
+button,
+[role="button"] {
+ cursor: pointer;
+}
+
+/*
+Make sure disabled buttons don't get the pointer cursor.
+*/
+
+:disabled {
+ cursor: default;
+}
+
+/*
+1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
+2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
+ This can trigger a poorly considered lint error in some tools but is included by design.
+*/
+
+img,
+svg,
+video,
+canvas,
+audio,
+iframe,
+embed,
+object {
+ display: block;
+ /* 1 */
+ vertical-align: middle;
+ /* 2 */
+}
+
+/*
+Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
+*/
+
+img,
+video {
+ max-width: 100%;
+ height: auto;
+}
+
+*, ::before, ::after {
+ --tw-border-spacing-x: 0;
+ --tw-border-spacing-y: 0;
+ --tw-translate-x: 0;
+ --tw-translate-y: 0;
+ --tw-rotate: 0;
+ --tw-skew-x: 0;
+ --tw-skew-y: 0;
+ --tw-scale-x: 1;
+ --tw-scale-y: 1;
+ --tw-pan-x: ;
+ --tw-pan-y: ;
+ --tw-pinch-zoom: ;
+ --tw-scroll-snap-strictness: proximity;
+ --tw-ordinal: ;
+ --tw-slashed-zero: ;
+ --tw-numeric-figure: ;
+ --tw-numeric-spacing: ;
+ --tw-numeric-fraction: ;
+ --tw-ring-inset: ;
+ --tw-ring-offset-width: 0px;
+ --tw-ring-offset-color: #fff;
+ --tw-ring-color: rgb(59 130 246 / 0.5);
+ --tw-ring-offset-shadow: 0 0 #0000;
+ --tw-ring-shadow: 0 0 #0000;
+ --tw-shadow: 0 0 #0000;
+ --tw-shadow-colored: 0 0 #0000;
+ --tw-blur: ;
+ --tw-brightness: ;
+ --tw-contrast: ;
+ --tw-grayscale: ;
+ --tw-hue-rotate: ;
+ --tw-invert: ;
+ --tw-saturate: ;
+ --tw-sepia: ;
+ --tw-drop-shadow: ;
+ --tw-backdrop-blur: ;
+ --tw-backdrop-brightness: ;
+ --tw-backdrop-contrast: ;
+ --tw-backdrop-grayscale: ;
+ --tw-backdrop-hue-rotate: ;
+ --tw-backdrop-invert: ;
+ --tw-backdrop-opacity: ;
+ --tw-backdrop-saturate: ;
+ --tw-backdrop-sepia: ;
+}
+
+::-webkit-backdrop {
+ --tw-border-spacing-x: 0;
+ --tw-border-spacing-y: 0;
+ --tw-translate-x: 0;
+ --tw-translate-y: 0;
+ --tw-rotate: 0;
+ --tw-skew-x: 0;
+ --tw-skew-y: 0;
+ --tw-scale-x: 1;
+ --tw-scale-y: 1;
+ --tw-pan-x: ;
+ --tw-pan-y: ;
+ --tw-pinch-zoom: ;
+ --tw-scroll-snap-strictness: proximity;
+ --tw-ordinal: ;
+ --tw-slashed-zero: ;
+ --tw-numeric-figure: ;
+ --tw-numeric-spacing: ;
+ --tw-numeric-fraction: ;
+ --tw-ring-inset: ;
+ --tw-ring-offset-width: 0px;
+ --tw-ring-offset-color: #fff;
+ --tw-ring-color: rgb(59 130 246 / 0.5);
+ --tw-ring-offset-shadow: 0 0 #0000;
+ --tw-ring-shadow: 0 0 #0000;
+ --tw-shadow: 0 0 #0000;
+ --tw-shadow-colored: 0 0 #0000;
+ --tw-blur: ;
+ --tw-brightness: ;
+ --tw-contrast: ;
+ --tw-grayscale: ;
+ --tw-hue-rotate: ;
+ --tw-invert: ;
+ --tw-saturate: ;
+ --tw-sepia: ;
+ --tw-drop-shadow: ;
+ --tw-backdrop-blur: ;
+ --tw-backdrop-brightness: ;
+ --tw-backdrop-contrast: ;
+ --tw-backdrop-grayscale: ;
+ --tw-backdrop-hue-rotate: ;
+ --tw-backdrop-invert: ;
+ --tw-backdrop-opacity: ;
+ --tw-backdrop-saturate: ;
+ --tw-backdrop-sepia: ;
+}
+
+::backdrop {
+ --tw-border-spacing-x: 0;
+ --tw-border-spacing-y: 0;
+ --tw-translate-x: 0;
+ --tw-translate-y: 0;
+ --tw-rotate: 0;
+ --tw-skew-x: 0;
+ --tw-skew-y: 0;
+ --tw-scale-x: 1;
+ --tw-scale-y: 1;
+ --tw-pan-x: ;
+ --tw-pan-y: ;
+ --tw-pinch-zoom: ;
+ --tw-scroll-snap-strictness: proximity;
+ --tw-ordinal: ;
+ --tw-slashed-zero: ;
+ --tw-numeric-figure: ;
+ --tw-numeric-spacing: ;
+ --tw-numeric-fraction: ;
+ --tw-ring-inset: ;
+ --tw-ring-offset-width: 0px;
+ --tw-ring-offset-color: #fff;
+ --tw-ring-color: rgb(59 130 246 / 0.5);
+ --tw-ring-offset-shadow: 0 0 #0000;
+ --tw-ring-shadow: 0 0 #0000;
+ --tw-shadow: 0 0 #0000;
+ --tw-shadow-colored: 0 0 #0000;
+ --tw-blur: ;
+ --tw-brightness: ;
+ --tw-contrast: ;
+ --tw-grayscale: ;
+ --tw-hue-rotate: ;
+ --tw-invert: ;
+ --tw-saturate: ;
+ --tw-sepia: ;
+ --tw-drop-shadow: ;
+ --tw-backdrop-blur: ;
+ --tw-backdrop-brightness: ;
+ --tw-backdrop-contrast: ;
+ --tw-backdrop-grayscale: ;
+ --tw-backdrop-hue-rotate: ;
+ --tw-backdrop-invert: ;
+ --tw-backdrop-opacity: ;
+ --tw-backdrop-saturate: ;
+ --tw-backdrop-sepia: ;
+}
+
+.prose {
+ color: #ffffff;
+ max-width: 65ch;
+}
+
+.prose :where([class~="lead"]):not(:where([class~="not-prose"] *)) {
+ color: #c0c1c3;
+ font-size: 1.25em;
+ line-height: 1.6;
+ margin-top: 1.2em;
+ margin-bottom: 1.2em;
+}
+
+.prose :where(a):not(:where([class~="not-prose"] *)) {
+ color: #33cb9a;
+ -webkit-text-decoration: normal;
+ text-decoration: normal;
+ font-weight: 500;
+}
+
+.prose :where(strong):not(:where([class~="not-prose"] *)) {
+ color: #ffffff;
+ font-weight: 600;
+}
+
+.prose :where(ol):not(:where([class~="not-prose"] *)) {
+ list-style-type: decimal;
+ padding-left: 1.625em;
+}
+
+.prose :where(ol[type="A"]):not(:where([class~="not-prose"] *)) {
+ list-style-type: upper-alpha;
+}
+
+.prose :where(ol[type="a"]):not(:where([class~="not-prose"] *)) {
+ list-style-type: lower-alpha;
+}
+
+.prose :where(ol[type="A" s]):not(:where([class~="not-prose"] *)) {
+ list-style-type: upper-alpha;
+}
+
+.prose :where(ol[type="a" s]):not(:where([class~="not-prose"] *)) {
+ list-style-type: lower-alpha;
+}
+
+.prose :where(ol[type="I"]):not(:where([class~="not-prose"] *)) {
+ list-style-type: upper-roman;
+}
+
+.prose :where(ol[type="i"]):not(:where([class~="not-prose"] *)) {
+ list-style-type: lower-roman;
+}
+
+.prose :where(ol[type="I" s]):not(:where([class~="not-prose"] *)) {
+ list-style-type: upper-roman;
+}
+
+.prose :where(ol[type="i" s]):not(:where([class~="not-prose"] *)) {
+ list-style-type: lower-roman;
+}
+
+.prose :where(ol[type="1"]):not(:where([class~="not-prose"] *)) {
+ list-style-type: decimal;
+}
+
+.prose :where(ul):not(:where([class~="not-prose"] *)) {
+ list-style-type: none;
+ padding-left: 1.625em;
+ list-style: none;
+}
+
+.prose :where(ol > li):not(:where([class~="not-prose"] *))::marker {
+ font-weight: 400;
+ color: var(--tw-prose-counters);
+}
+
+.prose :where(ul > li):not(:where([class~="not-prose"] *))::marker {
+ color: var(--tw-prose-bullets);
+}
+
+.prose :where(hr):not(:where([class~="not-prose"] *)) {
+ border-color: #2a2e37;
+ border-top-width: 1px;
+ margin-top: 3em;
+ margin-bottom: 3em;
+}
+
+.prose :where(blockquote):not(:where([class~="not-prose"] *)) {
+ font-weight: 500;
+ font-style: normal;
+ color: inherit;
+ border-left-width: 0.25rem;
+}
+
+.prose :where(blockquote):not(:where([class~="not-prose"] *)) borderLeftColor {
+ 100: #b5ecda;
+ 150: #a5e8d2;
+ 200: #94e4ca;
+ 300: #74dcba;
+ 400: #53d4aa;
+ 500: #33cb9a;
+ 600: #2eb78b;
+ -d-e-f-a-u-l-t: #33cb9a;
+}
+
+.prose :where(blockquote):not(:where([class~="not-prose"] *)) {
+ quotes: "\201C""\201D""\2018""\2019";
+ margin-top: 1.6em;
+ margin-bottom: 1.6em;
+ padding-left: 1em;
+ line-height: 1.5;
+ font-size: 1.125rem;
+ font-size: [object Object];
+}
+
+.prose :where(blockquote p:first-of-type):not(:where([class~="not-prose"] *))::before {
+ content: open-quote;
+}
+
+.prose :where(blockquote p:last-of-type):not(:where([class~="not-prose"] *))::after {
+ content: close-quote;
+}
+
+.prose :where(h1):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-headings);
+ font-weight: 700;
+ font-size: 2.25em;
+ margin-top: 0;
+ margin-bottom: 0.8888889em;
+ line-height: 1.1111111;
+}
+
+.prose :where(h1 strong):not(:where([class~="not-prose"] *)) {
+ font-weight: 900;
+}
+
+.prose :where(h2):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-headings);
+ font-weight: 600;
+ font-size: 1.5em;
+ margin-top: 2em;
+ margin-bottom: 1em;
+ line-height: 1.3333333;
+}
+
+.prose :where(h2 strong):not(:where([class~="not-prose"] *)) {
+ font-weight: 800;
+}
+
+.prose :where(h3):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-headings);
+ font-weight: 600;
+ font-size: 1.25em;
+ margin-top: 1.6em;
+ margin-bottom: 0.6em;
+ line-height: 1.6;
+}
+
+.prose :where(h3 strong):not(:where([class~="not-prose"] *)) {
+ font-weight: 700;
+}
+
+.prose :where(h4):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-headings);
+ font-weight: 600;
+ margin-top: 1.5em;
+ margin-bottom: 0.5em;
+ line-height: 1.5;
+}
+
+.prose :where(h4 strong):not(:where([class~="not-prose"] *)) {
+ font-weight: 700;
+}
+
+.prose :where(figure > *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+.prose :where(figcaption):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-captions);
+ font-size: 0.875em;
+ line-height: 1.4285714;
+ margin-top: 0.8571429em;
+}
+
+.prose :where(code):not(:where([class~="not-prose"] *)) {
+ color: #eeeeee;
+ font-weight: 600;
+ font-size: 0.875em;
+}
+
+.prose :where(code):not(:where([class~="not-prose"] *))::before {
+ content: "`";
+}
+
+.prose :where(code):not(:where([class~="not-prose"] *))::after {
+ content: "`";
+}
+
+.prose :where(a code):not(:where([class~="not-prose"] *)) {
+ color: #33cb9a;
+}
+
+.prose :where(pre):not(:where([class~="not-prose"] *)) {
+ color: #c0c1c3;
+ background-color: #23262e;
+ overflow-x: auto;
+ font-weight: 400;
+ font-size: 0.875em;
+ line-height: 1.7142857;
+ margin-top: 1.7142857em;
+ margin-bottom: 1.7142857em;
+ border-radius: 0.25rem;
+ padding-top: 0.8571429em;
+ padding-right: 1.1428571em;
+ padding-bottom: 0.8571429em;
+ padding-left: 1.1428571em;
+}
+
+.prose :where(pre code):not(:where([class~="not-prose"] *)) {
+ background-color: transparent;
+ border-width: 0;
+ border-radius: 0;
+ padding: 0;
+ font-weight: 400;
+ color: inherit;
+ font-size: inherit;
+ font-family: inherit;
+ line-height: inherit;
+}
+
+.prose :where(pre code):not(:where([class~="not-prose"] *))::before {
+ content: none;
+}
+
+.prose :where(pre code):not(:where([class~="not-prose"] *))::after {
+ content: none;
+}
+
+.prose :where(table):not(:where([class~="not-prose"] *)) {
+ width: 100%;
+ table-layout: auto;
+ text-align: left;
+ margin-top: 2em;
+ margin-bottom: 2em;
+ font-size: 0.875em;
+ line-height: 1.7142857;
+}
+
+.prose :where(thead):not(:where([class~="not-prose"] *)) {
+ border-bottom-width: 1px;
+ border-bottom-color: var(--tw-prose-th-borders);
+ color: #c0c1c3;
+}
+
+.prose :where(thead th):not(:where([class~="not-prose"] *)) {
+ color: var(--tw-prose-headings);
+ font-weight: 500;
+ vertical-align: bottom;
+ padding-right: 0.5714286em;
+ padding-bottom: 0.5714286em;
+ padding-left: 0.5714286em;
+}
+
+.prose :where(tbody tr):not(:where([class~="not-prose"] *)) {
+ border-bottom-width: 1px;
+ border-bottom-color: var(--tw-prose-td-borders);
+}
+
+.prose :where(tbody tr:last-child):not(:where([class~="not-prose"] *)) {
+ border-bottom-width: 0;
+}
+
+.prose :where(tbody td):not(:where([class~="not-prose"] *)) {
+ vertical-align: top;
+ padding-top: 0.5714286em;
+ padding-right: 0.5714286em;
+ padding-bottom: 0.5714286em;
+ padding-left: 0.5714286em;
+}
+
+.prose {
+ --tw-prose-body: #374151;
+ --tw-prose-headings: #111827;
+ --tw-prose-lead: #4b5563;
+ --tw-prose-links: #111827;
+ --tw-prose-bold: #111827;
+ --tw-prose-counters: #6b7280;
+ --tw-prose-bullets: #d1d5db;
+ --tw-prose-hr: #e5e7eb;
+ --tw-prose-quotes: #111827;
+ --tw-prose-quote-borders: #e5e7eb;
+ --tw-prose-captions: #6b7280;
+ --tw-prose-code: #111827;
+ --tw-prose-pre-code: #e5e7eb;
+ --tw-prose-pre-bg: #1f2937;
+ --tw-prose-th-borders: #d1d5db;
+ --tw-prose-td-borders: #e5e7eb;
+ --tw-prose-invert-body: #d1d5db;
+ --tw-prose-invert-headings: #fff;
+ --tw-prose-invert-lead: #9ca3af;
+ --tw-prose-invert-links: #fff;
+ --tw-prose-invert-bold: #fff;
+ --tw-prose-invert-counters: #9ca3af;
+ --tw-prose-invert-bullets: #4b5563;
+ --tw-prose-invert-hr: #374151;
+ --tw-prose-invert-quotes: #f3f4f6;
+ --tw-prose-invert-quote-borders: #374151;
+ --tw-prose-invert-captions: #9ca3af;
+ --tw-prose-invert-code: #fff;
+ --tw-prose-invert-pre-code: #d1d5db;
+ --tw-prose-invert-pre-bg: rgb(0 0 0 / 50%);
+ --tw-prose-invert-th-borders: #4b5563;
+ --tw-prose-invert-td-borders: #374151;
+ font-size: 1rem;
+ line-height: 1.75;
+}
+
+.prose :where(p):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.25em;
+ margin-bottom: 1.25em;
+}
+
+.prose :where(img):not(:where([class~="not-prose"] *)) {
+ margin-top: 2em;
+ margin-bottom: 2em;
+}
+
+.prose :where(video):not(:where([class~="not-prose"] *)) {
+ margin-top: 2em;
+ margin-bottom: 2em;
+}
+
+.prose :where(figure):not(:where([class~="not-prose"] *)) {
+ margin-top: 2em;
+ margin-bottom: 2em;
+}
+
+.prose :where(h2 code):not(:where([class~="not-prose"] *)) {
+ font-size: 0.875em;
+}
+
+.prose :where(h3 code):not(:where([class~="not-prose"] *)) {
+ font-size: 0.9em;
+}
+
+.prose :where(li):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.5em;
+ margin-bottom: 0.5em;
+}
+
+.prose :where(ol > li):not(:where([class~="not-prose"] *)) {
+ padding-left: 0.375em;
+ position: relative;
+}
+
+.prose :where(ul > li):not(:where([class~="not-prose"] *)) {
+ padding-left: 0.375em;
+ position: relative;
+}
+
+.prose > :where(ul > li p):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.75em;
+ margin-bottom: 0.75em;
+}
+
+.prose > :where(ul > li > *:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.25em;
+}
+
+.prose > :where(ul > li > *:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 1.25em;
+}
+
+.prose > :where(ol > li > *:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.25em;
+}
+
+.prose > :where(ol > li > *:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 1.25em;
+}
+
+.prose :where(ul ul, ul ol, ol ul, ol ol):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.75em;
+ margin-bottom: 0.75em;
+}
+
+.prose :where(hr + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose :where(h2 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose :where(h3 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose :where(h4 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose :where(thead th:first-child):not(:where([class~="not-prose"] *)) {
+ padding-left: 0;
+}
+
+.prose :where(thead th:last-child):not(:where([class~="not-prose"] *)) {
+ padding-right: 0;
+}
+
+.prose :where(tbody td:first-child):not(:where([class~="not-prose"] *)) {
+ padding-left: 0;
+}
+
+.prose :where(tbody td:last-child):not(:where([class~="not-prose"] *)) {
+ padding-right: 0;
+}
+
+.prose > :where(:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose > :where(:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 0;
+}
+
+.prose :where(a:hover):not(:where([class~="not-prose"] *)) {
+ text-decoration: underline;
+}
+
+.prose :where(ol > li):not(:where([class~="not-prose"] *))::before {
+ content: counter(list-item) ".";
+ position: absolute;
+ font-weight: 400;
+ color: #c0c1c3;
+}
+
+.prose :where(ul > li):not(:where([class~="not-prose"] *))::before {
+ content: "—";
+ margin-right: 0.5rem;
+ width: 0;
+ top: 0;
+ left: 0;
+ height: 0;
+ position: absolute;
+}
+
+.prose :where(ul > li):not(:where([class~="not-prose"] *))::before color {
+ 100: #b5ecda;
+ 150: #a5e8d2;
+ 200: #94e4ca;
+ 300: #74dcba;
+ 400: #53d4aa;
+ 500: #33cb9a;
+ 600: #2eb78b;
+ -d-e-f-a-u-l-t: #33cb9a;
+}
+
+.prose :where(h1, h2, h3, h4, h5):not(:where([class~="not-prose"] *)) {
+ color: #ffffff;
+}
+
+.prose :where(h3, h4, h5):not(:where([class~="not-prose"] *)) {
+ font-weight: 500;
+}
+
+.prose :where(figure figcaption):not(:where([class~="not-prose"] *)) {
+ color: #c0c1c3;
+ text-align: center;
+}
+
+@media (min-width: 640px) {
+ .prose :where(figure figcaption):not(:where([class~="not-prose"] *)) {
+ text-align: left;
+ }
+}
+
+.prose :where(figure img):not(:where([class~="not-prose"] *)) {
+ border-radius: 0.25rem;
+}
+
+.prose :where(code::before, code):not(:where([class~="not-prose"] *))::after {
+ content: "";
+}
+
+.prose :where(pre code::before, pre code):not(:where([class~="not-prose"] *))::after {
+ content: "";
+}
+
+.prose :where(thead, tbody tr):not(:where([class~="not-prose"] *)) {
+ border-bottom-width: 1px;
+ border-bottom-color: #2a2e37;
+}
+
+.prose-sm {
+ font-size: 0.875rem;
+ line-height: 1.7142857;
+}
+
+.prose-sm :where(p):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.1428571em;
+ margin-bottom: 1.1428571em;
+}
+
+.prose-sm :where([class~="lead"]):not(:where([class~="not-prose"] *)) {
+ font-size: 1.2857143em;
+ line-height: 1.5555556;
+ margin-top: 0.8888889em;
+ margin-bottom: 0.8888889em;
+}
+
+.prose-sm :where(blockquote):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.3333333em;
+ margin-bottom: 1.3333333em;
+ padding-left: 1.1111111em;
+}
+
+.prose-sm :where(h1):not(:where([class~="not-prose"] *)) {
+ font-size: 2.1428571em;
+ margin-top: 0;
+ margin-bottom: 0.8em;
+ line-height: 1.2;
+}
+
+.prose-sm :where(h2):not(:where([class~="not-prose"] *)) {
+ font-size: 1.4285714em;
+ margin-top: 1.6em;
+ margin-bottom: 0.8em;
+ line-height: 1.4;
+}
+
+.prose-sm :where(h3):not(:where([class~="not-prose"] *)) {
+ font-size: 1.2857143em;
+ margin-top: 1.5555556em;
+ margin-bottom: 0.4444444em;
+ line-height: 1.5555556;
+}
+
+.prose-sm :where(h4):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.4285714em;
+ margin-bottom: 0.5714286em;
+ line-height: 1.4285714;
+}
+
+.prose-sm :where(img):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.7142857em;
+ margin-bottom: 1.7142857em;
+}
+
+.prose-sm :where(video):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.7142857em;
+ margin-bottom: 1.7142857em;
+}
+
+.prose-sm :where(figure):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.7142857em;
+ margin-bottom: 1.7142857em;
+}
+
+.prose-sm :where(figure > *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+.prose-sm :where(figcaption):not(:where([class~="not-prose"] *)) {
+ font-size: 0.8571429em;
+ line-height: 1.3333333;
+ margin-top: 0.6666667em;
+}
+
+.prose-sm :where(code):not(:where([class~="not-prose"] *)) {
+ font-size: 0.8571429em;
+}
+
+.prose-sm :where(h2 code):not(:where([class~="not-prose"] *)) {
+ font-size: 0.9em;
+}
+
+.prose-sm :where(h3 code):not(:where([class~="not-prose"] *)) {
+ font-size: 0.8888889em;
+}
+
+.prose-sm :where(pre):not(:where([class~="not-prose"] *)) {
+ font-size: 0.8571429em;
+ line-height: 1.6666667;
+ margin-top: 1.6666667em;
+ margin-bottom: 1.6666667em;
+ border-radius: 0;
+ padding-top: 0.6666667em;
+ padding-right: 1em;
+ padding-bottom: 0.6666667em;
+ padding-left: 1em;
+ color: #c0c1c3;
+ background-color: #23262e;
+ overflow-x: auto;
+}
+
+.prose-sm :where(ol):not(:where([class~="not-prose"] *)) {
+ padding-left: 1.5714286em;
+}
+
+.prose-sm :where(ul):not(:where([class~="not-prose"] *)) {
+ padding-left: 1.5714286em;
+}
+
+.prose-sm :where(li):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.2857143em;
+ margin-bottom: 0.2857143em;
+}
+
+.prose-sm :where(ol > li):not(:where([class~="not-prose"] *)) {
+ padding-left: 0.4285714em;
+}
+
+.prose-sm :where(ul > li):not(:where([class~="not-prose"] *)) {
+ padding-left: 0.4285714em;
+}
+
+.prose-sm > :where(ul > li p):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.5714286em;
+ margin-bottom: 0.5714286em;
+}
+
+.prose-sm > :where(ul > li > *:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.1428571em;
+}
+
+.prose-sm > :where(ul > li > *:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 1.1428571em;
+}
+
+.prose-sm > :where(ol > li > *:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 1.1428571em;
+}
+
+.prose-sm > :where(ol > li > *:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 1.1428571em;
+}
+
+.prose-sm :where(ul ul, ul ol, ol ul, ol ol):not(:where([class~="not-prose"] *)) {
+ margin-top: 0.5714286em;
+ margin-bottom: 0.5714286em;
+}
+
+.prose-sm :where(hr):not(:where([class~="not-prose"] *)) {
+ margin-top: 2.8571429em;
+ margin-bottom: 2.8571429em;
+}
+
+.prose-sm :where(hr + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose-sm :where(h2 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose-sm :where(h3 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose-sm :where(h4 + *):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose-sm :where(table):not(:where([class~="not-prose"] *)) {
+ font-size: 0.8571429em;
+ line-height: 1.5;
+}
+
+.prose-sm :where(thead th):not(:where([class~="not-prose"] *)) {
+ padding-right: 1em;
+ padding-bottom: 0.6666667em;
+ padding-left: 1em;
+}
+
+.prose-sm :where(thead th:first-child):not(:where([class~="not-prose"] *)) {
+ padding-left: 0;
+}
+
+.prose-sm :where(thead th:last-child):not(:where([class~="not-prose"] *)) {
+ padding-right: 0;
+}
+
+.prose-sm :where(tbody td):not(:where([class~="not-prose"] *)) {
+ padding-top: 0.6666667em;
+ padding-right: 1em;
+ padding-bottom: 0.6666667em;
+ padding-left: 1em;
+}
+
+.prose-sm :where(tbody td:first-child):not(:where([class~="not-prose"] *)) {
+ padding-left: 0;
+}
+
+.prose-sm :where(tbody td:last-child):not(:where([class~="not-prose"] *)) {
+ padding-right: 0;
+}
+
+.prose-sm > :where(:first-child):not(:where([class~="not-prose"] *)) {
+ margin-top: 0;
+}
+
+.prose-sm > :where(:last-child):not(:where([class~="not-prose"] *)) {
+ margin-bottom: 0;
+}
+
+.prose-sm :where(ul > li):not(:where([class~="not-prose"] *))::before {
+ content: "—";
+ margin-right: 0.5rem;
+ width: 0;
+ top: 0;
+ left: 0;
+ height: 0;
+ position: absolute;
+}
+
+.prose-sm :where(ul > li):not(:where([class~="not-prose"] *))::before color {
+ 100: #b5ecda;
+ 150: #a5e8d2;
+ 200: #94e4ca;
+ 300: #74dcba;
+ 400: #53d4aa;
+ 500: #33cb9a;
+ 600: #2eb78b;
+ -d-e-f-a-u-l-t: #33cb9a;
+}
+
+.absolute {
+ position: absolute;
+}
+
+.relative {
+ position: relative;
+}
+
+.sticky {
+ position: -webkit-sticky;
+ position: sticky;
+}
+
+.top-12 {
+ top: 3rem;
+}
+
+.top-0 {
+ top: 0px;
+}
+
+.top-36 {
+ top: 9rem;
+}
+
+.z-20 {
+ z-index: 20;
+}
+
+.z-50 {
+ z-index: 50;
+}
+
+.col-span-2 {
+ grid-column: span 2 / span 2;
+}
+
+.row-span-2 {
+ grid-row: span 2 / span 2;
+}
+
+.mb-3 {
+ margin-bottom: 0.75rem;
+}
+
+.mr-4 {
+ margin-right: 1rem;
+}
+
+.block {
+ display: block;
+}
+
+.flex {
+ display: flex;
+}
+
+.table {
+ display: table;
+}
+
+.grid {
+ display: grid;
+}
+
+.list-item {
+ display: list-item;
+}
+
+.h-24 {
+ height: 6rem;
+}
+
+.h-full {
+ height: 100%;
+}
+
+.h-12 {
+ height: 3rem;
+}
+
+.h-auto {
+ height: auto;
+}
+
+.h-\[calc\(100vh-9rem\)\] {
+ height: calc(100vh - 9rem);
+}
+
+.min-h-screen {
+ min-height: 100vh;
+}
+
+.w-full {
+ width: 100%;
+}
+
+.w-28 {
+ width: 7rem;
+}
+
+.max-w-xs {
+ max-width: 20rem;
+}
+
+.flex-1 {
+ flex: 1 1 0%;
+}
+
+.flex-shrink-0 {
+ flex-shrink: 0;
+}
+
+.flex-grow {
+ flex-grow: 1;
+}
+
+.list-none {
+ list-style-type: none;
+}
+
+.auto-rows-min {
+ grid-auto-rows: -webkit-min-content;
+ grid-auto-rows: min-content;
+}
+
+.grid-cols-1 {
+ grid-template-columns: repeat(1, minmax(0, 1fr));
+}
+
+.grid-cols-\[48px_1fr\] {
+ grid-template-columns: 48px 1fr;
+}
+
+.grid-cols-\[16rem_1fr_20rem\] {
+ grid-template-columns: 16rem 1fr 20rem;
+}
+
+.grid-cols-\[1fr_10rem\] {
+ grid-template-columns: 1fr 10rem;
+}
+
+.grid-cols-\[16rem_1fr\] {
+ grid-template-columns: 16rem 1fr;
+}
+
+.grid-cols-\[1fr_20rem\] {
+ grid-template-columns: 1fr 20rem;
+}
+
+.flex-row {
+ flex-direction: row;
+}
+
+.flex-col {
+ flex-direction: column;
+}
+
+.flex-wrap {
+ flex-wrap: wrap;
+}
+
+.place-content-center {
+ place-content: center;
+}
+
+.items-center {
+ align-items: center;
+}
+
+.items-baseline {
+ align-items: baseline;
+}
+
+.justify-between {
+ justify-content: space-between;
+}
+
+.gap-5 {
+ gap: 1.25rem;
+}
+
+.gap-4 {
+ gap: 1rem;
+}
+
+.gap-2\.5 {
+ gap: 0.625rem;
+}
+
+.gap-2 {
+ gap: 0.5rem;
+}
+
+.gap-1 {
+ gap: 0.25rem;
+}
+
+.gap-x-4 {
+ -moz-column-gap: 1rem;
+ column-gap: 1rem;
+}
+
+.gap-y-2\.5 {
+ row-gap: 0.625rem;
+}
+
+.gap-y-2 {
+ row-gap: 0.5rem;
+}
+
+.space-y-1 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-y-reverse: 0;
+ margin-top: calc(0.25rem * calc(1 - var(--tw-space-y-reverse)));
+ margin-bottom: calc(0.25rem * var(--tw-space-y-reverse));
+}
+
+.space-y-4 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-y-reverse: 0;
+ margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
+ margin-bottom: calc(1rem * var(--tw-space-y-reverse));
+}
+
+.space-x-2 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-x-reverse: 0;
+ margin-right: calc(0.5rem * var(--tw-space-x-reverse));
+ margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
+}
+
+.space-y-3 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-y-reverse: 0;
+ margin-top: calc(0.75rem * calc(1 - var(--tw-space-y-reverse)));
+ margin-bottom: calc(0.75rem * var(--tw-space-y-reverse));
+}
+
+.space-y-2 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-y-reverse: 0;
+ margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
+ margin-bottom: calc(0.5rem * var(--tw-space-y-reverse));
+}
+
+.divide-x > :not([hidden]) ~ :not([hidden]) {
+ --tw-divide-x-reverse: 0;
+ border-right-width: calc(1px * var(--tw-divide-x-reverse));
+ border-left-width: calc(1px * calc(1 - var(--tw-divide-x-reverse)));
+}
+
+.divide-ink-200 > :not([hidden]) ~ :not([hidden]) {
+ --tw-divide-opacity: 1;
+ border-color: rgb(35 38 46 / var(--tw-divide-opacity));
+}
+
+.overflow-hidden {
+ overflow: hidden;
+}
+
+.overflow-x-scroll {
+ overflow-x: scroll;
+}
+
+.truncate {
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.rounded-sm {
+ border-radius: 0.125rem;
+}
+
+.rounded-md {
+ border-radius: 0.375rem;
+}
+
+.rounded-lg {
+ border-radius: 0.5rem;
+}
+
+.border {
+ border-width: 1px;
+}
+
+.border-b {
+ border-bottom-width: 1px;
+}
+
+.border-t {
+ border-top-width: 1px;
+}
+
+.border-dashed {
+ border-style: dashed;
+}
+
+.border-ink-200 {
+ --tw-border-opacity: 1;
+ border-color: rgb(35 38 46 / var(--tw-border-opacity));
+}
+
+.bg-ink-400 {
+ --tw-bg-opacity: 1;
+ background-color: rgb(22 24 29 / var(--tw-bg-opacity));
+}
+
+.bg-ink-300 {
+ --tw-bg-opacity: 1;
+ background-color: rgb(26 29 35 / var(--tw-bg-opacity));
+}
+
+.bg-ink-200 {
+ --tw-bg-opacity: 1;
+ background-color: rgb(35 38 46 / var(--tw-bg-opacity));
+}
+
+.p-6 {
+ padding: 1.5rem;
+}
+
+.p-4 {
+ padding: 1rem;
+}
+
+.p-2 {
+ padding: 0.5rem;
+}
+
+.p-3 {
+ padding: 0.75rem;
+}
+
+.py-16 {
+ padding-top: 4rem;
+ padding-bottom: 4rem;
+}
+
+.py-8 {
+ padding-top: 2rem;
+ padding-bottom: 2rem;
+}
+
+.px-6 {
+ padding-left: 1.5rem;
+ padding-right: 1.5rem;
+}
+
+.py-2 {
+ padding-top: 0.5rem;
+ padding-bottom: 0.5rem;
+}
+
+.px-4 {
+ padding-left: 1rem;
+ padding-right: 1rem;
+}
+
+.py-4 {
+ padding-top: 1rem;
+ padding-bottom: 1rem;
+}
+
+.pt-5 {
+ padding-top: 1.25rem;
+}
+
+.pl-4 {
+ padding-left: 1rem;
+}
+
+.pr-2 {
+ padding-right: 0.5rem;
+}
+
+.text-center {
+ text-align: center;
+}
+
+.text-lg {
+ font-size: 1.125rem;
+ line-height: 1.75rem;
+}
+
+.text-sm {
+ font-size: 0.875rem;
+ line-height: 1.25rem;
+}
+
+.text-xl {
+ font-size: 1.25rem;
+ line-height: 1.75rem;
+}
+
+.text-xs {
+ font-size: 0.75rem;
+ line-height: 1rem;
+}
+
+.text-2xl {
+ font-size: 1.5rem;
+ line-height: 2rem;
+}
+
+.text-base {
+ font-size: 1rem;
+ line-height: 1.5rem;
+}
+
+.font-medium {
+ font-weight: 500;
+}
+
+.font-normal {
+ font-weight: 400;
+}
+
+.font-semibold {
+ font-weight: 600;
+}
+
+.uppercase {
+ text-transform: uppercase;
+}
+
+.leading-tight {
+ line-height: 1.25;
+}
+
+.leading-none {
+ line-height: 1;
+}
+
+.tracking-wider {
+ letter-spacing: 0.05em;
+}
+
+.tracking-wide {
+ letter-spacing: 0.025em;
+}
+
+.text-vanilla-100 {
+ --tw-text-opacity: 1;
+ color: rgb(255 255 255 / var(--tw-text-opacity));
+}
+
+.text-vanilla-400 {
+ --tw-text-opacity: 1;
+ color: rgb(192 193 195 / var(--tw-text-opacity));
+}
+
+.underline {
+ -webkit-text-decoration-line: underline;
+ text-decoration-line: underline;
+}
+
+.button {
+ display: flex;
+ height: 100%;
+ align-items: center;
+ gap: 0.5rem;
+ border-radius: 0.125rem;
+ padding-top: 0.5rem;
+ padding-bottom: 0.5rem;
+ padding-left: 1rem;
+ padding-right: 1rem;
+ font-size: 0.75rem;
+ line-height: 1rem;
+}
+
+.sidebar {
+ display: flex;
+ height: 2rem;
+ width: 100%;
+ align-items: center;
+ gap: 0.5rem;
+ border-radius: 0.125rem;
+ padding: 0.5rem;
+ font-size: 0.875rem;
+ line-height: 1.25rem;
+ --tw-text-opacity: 1;
+ color: rgb(192 193 195 / var(--tw-text-opacity));
+}
+
+.sidebar:hover {
+ --tw-bg-opacity: 1;
+ background-color: rgb(26 29 35 / var(--tw-bg-opacity));
+ --tw-text-opacity: 1;
+ color: rgb(255 255 255 / var(--tw-text-opacity));
+}
+
+.sidebar.selected {
+ --tw-bg-opacity: 1;
+ background-color: rgb(35 38 46 / var(--tw-bg-opacity));
+ --tw-text-opacity: 1;
+ color: rgb(255 255 255 / var(--tw-text-opacity));
+}
+
+.sidebar.selected:hover {
+ --tw-bg-opacity: 1;
+ background-color: rgb(26 29 35 / var(--tw-bg-opacity));
+ --tw-text-opacity: 1;
+ color: rgb(255 255 255 / var(--tw-text-opacity));
+}
+
+.sidebar .tag {
+ justify-self: end;
+ border-radius: 9999px;
+ --tw-bg-opacity: 1;
+ background-color: rgb(35 38 46 / var(--tw-bg-opacity));
+ padding-left: 0.5rem;
+ padding-right: 0.5rem;
+ padding-top: 0.25rem;
+ padding-bottom: 0.25rem;
+ font-size: 0.75rem;
+ line-height: 1rem;
+ line-height: 1;
+}
+
+.sidebar .label {
+ flex-grow: 1;
+ text-align: left;
+ line-height: 1;
+}
+
+.info {
+ display: flex;
+ align-items: center;
+ gap: 0.25rem;
+ font-size: 0.875rem;
+ line-height: 1.25rem;
+ --tw-text-opacity: 1;
+ color: rgb(192 193 195 / var(--tw-text-opacity));
+}
+
+.highlight .hll,
+.highlight .hl {
+ background-color: #21242c;
+ display: block;
+}
+
+.highlight .hlr {
+ background-color: #582c27;
+ display: inherit;
+}
+
+.highlight .hlg {
+ background-color: #004f41;
+ display: inherit;
+}
+
+.highlight .c {
+ color: #75715e;
+}
+
+/* Comment */
+
+.highlight .err {
+ color: #960050;
+ background-color: #1e0010;
+}
+
+/* Error */
+
+.highlight .k {
+ color: #66d9ef;
+}
+
+/* Keyword */
+
+.highlight .l {
+ color: #ae81ff;
+}
+
+/* Literal */
+
+.highlight .n {
+ color: #f8f8f2;
+}
+
+/* Name */
+
+.highlight .o {
+ color: #f92672;
+}
+
+/* Operator */
+
+.highlight .p {
+ color: #f8f8f2;
+}
+
+/* Punctuation */
+
+.highlight .cm {
+ color: #75715e;
+}
+
+/* Comment.Multiline */
+
+.highlight .cp {
+ color: #75715e;
+}
+
+/* Comment.Preproc */
+
+.highlight .c1,
+.highlight .ch {
+ color: #75715e;
+}
+
+/* Comment.Single */
+
+.highlight .cs {
+ color: #75715e;
+}
+
+/* Comment.Special */
+
+.highlight .ge {
+ font-style: italic;
+}
+
+/* Generic.Emph */
+
+.highlight .gs {
+ font-weight: bold;
+}
+
+/* Generic.Strong */
+
+.highlight .kc {
+ color: #66d9ef;
+}
+
+/* Keyword.Constant */
+
+.highlight .kd {
+ color: #66d9ef;
+}
+
+/* Keyword.Declaration */
+
+.highlight .kn {
+ color: #f92672;
+}
+
+/* Keyword.Namespace */
+
+.highlight .kp {
+ color: #66d9ef;
+}
+
+/* Keyword.Pseudo */
+
+.highlight .kr {
+ color: #66d9ef;
+}
+
+/* Keyword.Reserved */
+
+.highlight .kt {
+ color: #66d9ef;
+}
+
+/* Keyword.Type */
+
+.highlight .ld {
+ color: #e6db74;
+}
+
+/* Literal.Date */
+
+.highlight .m {
+ color: #ae81ff;
+}
+
+/* Literal.Number */
+
+.highlight .s {
+ color: #e6db74;
+}
+
+/* Literal.String */
+
+.highlight .na {
+ color: #a6e22e;
+}
+
+/* Name.Attribute */
+
+.highlight .nb {
+ color: #f8f8f2;
+}
+
+/* Name.Builtin */
+
+.highlight .nc {
+ color: #a6e22e;
+}
+
+/* Name.Class */
+
+.highlight .no {
+ color: #66d9ef;
+}
+
+/* Name.Constant */
+
+.highlight .nd {
+ color: #a6e22e;
+}
+
+/* Name.Decorator */
+
+.highlight .ni {
+ color: #f8f8f2;
+}
+
+/* Name.Entity */
+
+.highlight .ne {
+ color: #a6e22e;
+}
+
+/* Name.Exception */
+
+.highlight .nf {
+ color: #a6e22e;
+}
+
+/* Name.Function */
+
+.highlight .fm {
+ color: #a6e22e;
+}
+
+/* Name.SpecialFunction */
+
+.highlight .nl {
+ color: #f8f8f2;
+}
+
+/* Name.Label */
+
+.highlight .nn {
+ color: #f8f8f2;
+}
+
+/* Name.Namespace */
+
+.highlight .nx {
+ color: #a6e22e;
+}
+
+/* Name.Other */
+
+.highlight .py {
+ color: #f8f8f2;
+}
+
+/* Name.Property */
+
+.highlight .nt {
+ color: #f92672;
+}
+
+/* Name.Tag */
+
+.highlight .nv {
+ color: #f8f8f2;
+}
+
+/* Name.Variable */
+
+.highlight .ow {
+ color: #f92672;
+}
+
+/* Operator.Word */
+
+.highlight .w {
+ color: #f8f8f2;
+}
+
+/* Text.Whitespace */
+
+.highlight .mf {
+ color: #ae81ff;
+}
+
+/* Literal.Number.Float */
+
+.highlight .mh {
+ color: #ae81ff;
+}
+
+/* Literal.Number.Hex */
+
+.highlight .mi {
+ color: #ae81ff;
+}
+
+/* Literal.Number.Integer */
+
+.highlight .mo {
+ color: #ae81ff;
+}
+
+/* Literal.Number.Oct */
+
+.highlight .sb {
+ color: #e6db74;
+}
+
+/* Literal.String.Backtick */
+
+.highlight .sc {
+ color: #e6db74;
+}
+
+/* Literal.String.Char */
+
+.highlight .sd {
+ color: #e6db74;
+}
+
+/* Literal.String.Doc */
+
+.highlight .s2 {
+ color: #e6db74;
+}
+
+/* Literal.String.Double */
+
+.highlight .se {
+ color: #ae81ff;
+}
+
+/* Literal.String.Escape */
+
+.highlight .sh {
+ color: #e6db74;
+}
+
+/* Literal.String.Heredoc */
+
+.highlight .si {
+ color: #e6db74;
+}
+
+/* Literal.String.Interpol */
+
+.highlight .sx {
+ color: #e6db74;
+}
+
+/* Literal.String.Other */
+
+.highlight .sr {
+ color: #e6db74;
+}
+
+/* Literal.String.Regex */
+
+.highlight .s1 {
+ color: #e6db74;
+}
+
+/* Literal.String.Single */
+
+.highlight .ss {
+ color: #e6db74;
+}
+
+/* Literal.String.Symbol */
+
+.highlight .bp {
+ color: #f8f8f2;
+}
+
+/* Name.Builtin.Pseudo */
+
+.highlight .vc {
+ color: #f8f8f2;
+}
+
+/* Name.Variable.Class */
+
+.highlight .vg {
+ color: #f8f8f2;
+}
+
+/* Name.Variable.Global */
+
+.highlight .vi {
+ color: #f8f8f2;
+}
+
+/* Name.Variable.Instance */
+
+.highlight .il {
+ color: #ae81ff;
+}
+
+/* Literal.Number.Integer.Long */
+
+.highlight .vm {
+ color: #a6e22e;
+}
+
+.highlight .gh {
+}
+
+/* Generic Heading & Diff Header */
+
+.highlight .gu {
+ color: #75715e;
+}
+
+/* Generic.Subheading & Diff Unified/Comment? */
+
+.highlight .gd {
+ color: #f92672;
+}
+
+/* Generic.Deleted & Diff Deleted */
+
+.highlight .gi {
+ color: #a6e22e;
+}
+
+/* Generic.Inserted & Diff Inserted */
+
+.highlight .ln {
+ padding-right: 0.75rem;
+ padding-left: 0.75rem;
+ color: #6a737d;
+}
+
+.highlight .hl .ln {
+ color: #c0c1c3;
+}
+
+.highlight .hl .ln,
+.highlight .hll .ln {
+ background-color: #21242c;
+}
+
+.highlight .hlg .ln {
+ background-color: #004f41;
+}
+
+.highlight .hlr .ln {
+ background-color: #582c27;
+}
+
+.highlight pre {
+ font-size: 13px;
+ padding-top: 0.75rem;
+ padding-bottom: 0.5rem;
+ color: #ccc;
+ overflow-x: auto;
+ position: relative;
+}
+
+.highlight :not(.prose) pre {
+ background-color: #16181d !important;
+}
+
+.highlighttable {
+ background: #16181d;
+ width: 100%;
+}
+
+.highlighttable td.code {
+ width: 100%;
+ padding-left: 0.75rem;
+}
+
+.highlighttable .linenos {
+ padding-left: 0.75rem;
+ color: #6a737d;
+}
+
+.highlighttable .linenos pre {
+ font-size: 13px;
+}
+
+.hover\:bg-ink-300:hover {
+ --tw-bg-opacity: 1;
+ background-color: rgb(26 29 35 / var(--tw-bg-opacity));
+}
+
+.hover\:bg-ink-100:hover {
+ --tw-bg-opacity: 1;
+ background-color: rgb(42 46 55 / var(--tw-bg-opacity));
+}
+
+.hover\:text-vanilla-200:hover {
+ --tw-text-opacity: 1;
+ color: rgb(245 245 245 / var(--tw-text-opacity));
+}
+
+@media (min-width: 640px) {
+ .sm\:w-auto {
+ width: auto;
+ }
+
+ .sm\:space-y-0 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-y-reverse: 0;
+ margin-top: calc(0px * calc(1 - var(--tw-space-y-reverse)));
+ margin-bottom: calc(0px * var(--tw-space-y-reverse));
+ }
+}
+
+@media (min-width: 768px) {
+ .md\:flex {
+ display: flex;
+ }
+
+ .md\:items-start {
+ align-items: flex-start;
+ }
+
+ .md\:space-x-4 > :not([hidden]) ~ :not([hidden]) {
+ --tw-space-x-reverse: 0;
+ margin-right: calc(1rem * var(--tw-space-x-reverse));
+ margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
+ }
+}
+
+@media (min-width: 1280px) {
+ .xl\:text-sm {
+ font-size: 0.875rem;
+ line-height: 1.25rem;
+ }
+}
diff --git a/command/analyzer/dryrun/render/views/assets/images/deepsource.svg b/command/analyzer/dryrun/render/views/assets/images/deepsource.svg
new file mode 100644
index 00000000..29c007d4
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/images/deepsource.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/command/analyzer/dryrun/render/views/assets/images/favicon-dark.svg b/command/analyzer/dryrun/render/views/assets/images/favicon-dark.svg
new file mode 100644
index 00000000..fe8167c4
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/images/favicon-dark.svg
@@ -0,0 +1,3 @@
+
diff --git a/command/analyzer/dryrun/render/views/assets/images/favicon.svg b/command/analyzer/dryrun/render/views/assets/images/favicon.svg
new file mode 100644
index 00000000..c41e1dcf
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/images/favicon.svg
@@ -0,0 +1,3 @@
+
diff --git a/command/analyzer/dryrun/render/views/assets/images/feather-sprite.svg b/command/analyzer/dryrun/render/views/assets/images/feather-sprite.svg
new file mode 100644
index 00000000..f958c114
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/assets/images/feather-sprite.svg
@@ -0,0 +1,1587 @@
+
diff --git a/command/analyzer/dryrun/render/views/header.html b/command/analyzer/dryrun/render/views/header.html
new file mode 100644
index 00000000..58000188
--- /dev/null
+++ b/command/analyzer/dryrun/render/views/header.html
@@ -0,0 +1,96 @@
+{{ define "header" }}
+
+
+
+
+ {{ .AnalyzerShortcode }}
+
+
+ {{ if not (eq .AnalysisResultData.UniqueIssuesCount 0) }}
+