tons of code
This commit is contained in:
parent
0c47dc0cee
commit
d4e49919c4
51
Makefile
51
Makefile
@ -1,18 +1,51 @@
|
|||||||
|
|
||||||
|
SHELL := /bin/bash
|
||||||
|
tsgo_dir = ./kitcom/internal/tsgo
|
||||||
|
my_package = efprojects.com/kitten-ipc/kitcom/internal/tsgo
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@echo "Please read Makefile for available targets"
|
@echo "Please read Makefile for available targets"
|
||||||
|
|
||||||
vendor_tsgo:
|
vendor_tsgo:
|
||||||
@mkdir -p ./kitcom/internal/tsgo
|
@mkdir -p $(tsgo_dir)
|
||||||
@git clone --depth 1 https://github.com/microsoft/typescript-go
|
@git clone --depth 1 https://github.com/microsoft/typescript-go
|
||||||
@echo Renaming packages...
|
@find ./typescript-go/internal -type file -name "*.go" -exec sed -i -e 's!"github.com/microsoft/typescript-go/internal!"$(my_package)!g' {} \;
|
||||||
@find ./typescript-go/internal -type file -name "*.go" -exec sed -i -e 's!"github.com/microsoft/typescript-go/internal!"efprojects.com/kitten-ipc/kitcom/internal/tsgo!g' {} \;
|
@cp -r ./typescript-go/internal/* $(tsgo_dir)
|
||||||
@cp -r ./typescript-go/internal/* ./kitcom/internal/tsgo
|
|
||||||
@git add ./kitcom/internal/
|
|
||||||
@echo Cleaning up...
|
|
||||||
@rm -rf @rm -rf typescript-go
|
@rm -rf @rm -rf typescript-go
|
||||||
echo Successfully copied tsgo code and renamed packages.
|
|
||||||
|
|
||||||
remove_tsgo_tests:
|
remove_tsgo_tests:
|
||||||
@find ./kitcom/internal/tsgo -name "*_test.go" -exec rm {} \;
|
@find $(tsgo_dir) -name "*_test.go" -exec rm {} \;
|
||||||
|
|
||||||
.PHONY: vendor_tsgo remove_tsgo_tests
|
# just for "fun"
|
||||||
|
remove_tsgo_unused:
|
||||||
|
@set -e ; \
|
||||||
|
dirs=`find $(tsgo_dir) -type d -mindepth 1 -maxdepth 1` ; \
|
||||||
|
nessesary_old="parser " ; \
|
||||||
|
nessesary="$$nessesary_old" ; \
|
||||||
|
while true; do \
|
||||||
|
for d in $$dirs; do \
|
||||||
|
pkg=`basename "$$d"` ; \
|
||||||
|
for usedIn in $$nessesary; do \
|
||||||
|
if grep -q -R "$(my_package)/$$pkg" "$(tsgo_dir)/$$usedIn" > /dev/null; then \
|
||||||
|
if [[ "$$nessesary" != *"$$pkg "* ]]; then \
|
||||||
|
nessesary="$$nessesary $$pkg " ; \
|
||||||
|
fi ; \
|
||||||
|
break ; \
|
||||||
|
fi ; \
|
||||||
|
done ; \
|
||||||
|
done ; \
|
||||||
|
if [[ "$$nessesary" == "$$nessesary_old" ]]; then \
|
||||||
|
break ; \
|
||||||
|
fi ; \
|
||||||
|
nessesary_old="$$nessesary" ; \
|
||||||
|
done ; \
|
||||||
|
for d in $$dirs; do \
|
||||||
|
pkg=`basename $$d` ; \
|
||||||
|
if [[ "$$nessesary" != *"$$pkg "* ]]; then \
|
||||||
|
echo "removing $$pkg" ; \
|
||||||
|
rm -rf $(tsgo_dir)/$$pkg ; \
|
||||||
|
fi ; \
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: vendor_tsgo remove_tsgo_tests remove_tsgo_unused
|
||||||
|
|||||||
@ -8706,6 +8706,10 @@ func (node *TemplateHead) Clone(f NodeFactoryCoercible) *Node {
|
|||||||
return cloneNode(f.AsNodeFactory().NewTemplateHead(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
return cloneNode(f.AsNodeFactory().NewTemplateHead(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsTemplateHead(node *Node) bool {
|
||||||
|
return node.Kind == KindTemplateHead
|
||||||
|
}
|
||||||
|
|
||||||
// TemplateMiddle
|
// TemplateMiddle
|
||||||
|
|
||||||
type TemplateMiddle struct {
|
type TemplateMiddle struct {
|
||||||
@ -8726,6 +8730,10 @@ func (node *TemplateMiddle) Clone(f NodeFactoryCoercible) *Node {
|
|||||||
return cloneNode(f.AsNodeFactory().NewTemplateMiddle(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
return cloneNode(f.AsNodeFactory().NewTemplateMiddle(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsTemplateMiddle(node *Node) bool {
|
||||||
|
return node.Kind == KindTemplateMiddle
|
||||||
|
}
|
||||||
|
|
||||||
// TemplateTail
|
// TemplateTail
|
||||||
|
|
||||||
type TemplateTail struct {
|
type TemplateTail struct {
|
||||||
@ -8746,6 +8754,10 @@ func (node *TemplateTail) Clone(f NodeFactoryCoercible) *Node {
|
|||||||
return cloneNode(f.AsNodeFactory().NewTemplateTail(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
return cloneNode(f.AsNodeFactory().NewTemplateTail(node.Text, node.RawText, node.TemplateFlags), node.AsNode(), f.AsNodeFactory().hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsTemplateTail(node *Node) bool {
|
||||||
|
return node.Kind == KindTemplateTail
|
||||||
|
}
|
||||||
|
|
||||||
// TemplateLiteralTypeNode
|
// TemplateLiteralTypeNode
|
||||||
|
|
||||||
type TemplateLiteralTypeNode struct {
|
type TemplateLiteralTypeNode struct {
|
||||||
@ -9635,6 +9647,10 @@ func (node *JSDocTypeExpression) Clone(f NodeFactoryCoercible) *Node {
|
|||||||
return cloneNode(f.AsNodeFactory().NewJSDocTypeExpression(node.Type), node.AsNode(), f.AsNodeFactory().hooks)
|
return cloneNode(f.AsNodeFactory().NewJSDocTypeExpression(node.Type), node.AsNode(), f.AsNodeFactory().hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsJSDocTypeExpression(node *Node) bool {
|
||||||
|
return node.Kind == KindJSDocTypeExpression
|
||||||
|
}
|
||||||
|
|
||||||
// JSDocNonNullableType
|
// JSDocNonNullableType
|
||||||
|
|
||||||
type JSDocNonNullableType struct {
|
type JSDocNonNullableType struct {
|
||||||
@ -10565,6 +10581,10 @@ func (node *JSDocTypeLiteral) Clone(f NodeFactoryCoercible) *Node {
|
|||||||
return cloneNode(f.AsNodeFactory().NewJSDocTypeLiteral(node.JSDocPropertyTags, node.IsArrayType), node.AsNode(), f.AsNodeFactory().hooks)
|
return cloneNode(f.AsNodeFactory().NewJSDocTypeLiteral(node.JSDocPropertyTags, node.IsArrayType), node.AsNode(), f.AsNodeFactory().hooks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsJSDocTypeLiteral(node *Node) bool {
|
||||||
|
return node.Kind == KindJSDocTypeLiteral
|
||||||
|
}
|
||||||
|
|
||||||
// JSDocSignature
|
// JSDocSignature
|
||||||
type JSDocSignature struct {
|
type JSDocSignature struct {
|
||||||
TypeNodeBase
|
TypeNodeBase
|
||||||
|
|||||||
@ -43,7 +43,6 @@ const (
|
|||||||
InternalSymbolNameClass = InternalSymbolNamePrefix + "class" // Unnamed class expression
|
InternalSymbolNameClass = InternalSymbolNamePrefix + "class" // Unnamed class expression
|
||||||
InternalSymbolNameFunction = InternalSymbolNamePrefix + "function" // Unnamed function expression
|
InternalSymbolNameFunction = InternalSymbolNamePrefix + "function" // Unnamed function expression
|
||||||
InternalSymbolNameComputed = InternalSymbolNamePrefix + "computed" // Computed property name declaration with dynamic name
|
InternalSymbolNameComputed = InternalSymbolNamePrefix + "computed" // Computed property name declaration with dynamic name
|
||||||
InternalSymbolNameResolving = InternalSymbolNamePrefix + "resolving" // Indicator symbol used to mark partially resolved type aliases
|
|
||||||
InternalSymbolNameInstantiationExpression = InternalSymbolNamePrefix + "instantiationExpression" // Instantiation expressions
|
InternalSymbolNameInstantiationExpression = InternalSymbolNamePrefix + "instantiationExpression" // Instantiation expressions
|
||||||
InternalSymbolNameImportAttributes = InternalSymbolNamePrefix + "importAttributes"
|
InternalSymbolNameImportAttributes = InternalSymbolNamePrefix + "importAttributes"
|
||||||
InternalSymbolNameExportEquals = "export=" // Export assignment symbol
|
InternalSymbolNameExportEquals = "export=" // Export assignment symbol
|
||||||
|
|||||||
@ -2641,7 +2641,7 @@ func GetNodeAtPosition(file *SourceFile, position int, includeJSDoc bool) *Node
|
|||||||
}
|
}
|
||||||
if child == nil {
|
if child == nil {
|
||||||
current.ForEachChild(func(node *Node) bool {
|
current.ForEachChild(func(node *Node) bool {
|
||||||
if nodeContainsPosition(node, position) {
|
if nodeContainsPosition(node, position) && node.Kind != KindJSExportAssignment && node.Kind != KindCommonJSExport {
|
||||||
child = node
|
child = node
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,653 +0,0 @@
|
|||||||
package astnav
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/ast"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/core"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/scanner"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetTouchingPropertyName(sourceFile *ast.SourceFile, position int) *ast.Node {
|
|
||||||
return getReparsedNodeForNode(getTokenAtPosition(sourceFile, position, false /*allowPositionInLeadingTrivia*/, func(node *ast.Node) bool {
|
|
||||||
return ast.IsPropertyNameLiteral(node) || ast.IsKeywordKind(node.Kind) || ast.IsPrivateIdentifier(node)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the given node is a declaration name node in a JSDoc comment that is subject to reparsing, return the declaration name node
|
|
||||||
// for the corresponding reparsed construct. Otherwise, just return the node.
|
|
||||||
func getReparsedNodeForNode(node *ast.Node) *ast.Node {
|
|
||||||
if node.Flags&ast.NodeFlagsJSDoc != 0 && (ast.IsIdentifier(node) || ast.IsPrivateIdentifier(node)) {
|
|
||||||
parent := node.Parent
|
|
||||||
if (ast.IsJSDocTypedefTag(parent) || ast.IsJSDocCallbackTag(parent) || ast.IsJSDocPropertyTag(parent) || ast.IsJSDocParameterTag(parent) || ast.IsImportClause(parent) || ast.IsImportSpecifier(parent)) && parent.Name() == node {
|
|
||||||
// Reparsing preserves the location of the name. Thus, a search at the position of the name with JSDoc excluded
|
|
||||||
// finds the containing reparsed declaration node.
|
|
||||||
if reparsed := ast.GetNodeAtPosition(ast.GetSourceFileOfNode(node), node.Pos(), false); reparsed != nil {
|
|
||||||
if name := reparsed.Name(); name != nil && name.Pos() == node.Pos() {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetTouchingToken(sourceFile *ast.SourceFile, position int) *ast.Node {
|
|
||||||
return getTokenAtPosition(sourceFile, position, false /*allowPositionInLeadingTrivia*/, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetTokenAtPosition(sourceFile *ast.SourceFile, position int) *ast.Node {
|
|
||||||
return getTokenAtPosition(sourceFile, position, true /*allowPositionInLeadingTrivia*/, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTokenAtPosition(
|
|
||||||
sourceFile *ast.SourceFile,
|
|
||||||
position int,
|
|
||||||
allowPositionInLeadingTrivia bool,
|
|
||||||
includePrecedingTokenAtEndPosition func(node *ast.Node) bool,
|
|
||||||
) *ast.Node {
|
|
||||||
// getTokenAtPosition returns a token at the given position in the source file.
|
|
||||||
// The token can be a real node in the AST, or a synthesized token constructed
|
|
||||||
// with information from the scanner. Synthesized tokens are only created when
|
|
||||||
// needed, and they are stored in the source file's token cache such that multiple
|
|
||||||
// calls to getTokenAtPosition with the same position will return the same object
|
|
||||||
// in memory. If there is no token at the given position (possible when
|
|
||||||
// `allowPositionInLeadingTrivia` is false), the lowest node that encloses the
|
|
||||||
// position is returned.
|
|
||||||
|
|
||||||
// `next` tracks the node whose children will be visited on the next iteration.
|
|
||||||
// `prevSubtree` is a node whose end position is equal to the target position,
|
|
||||||
// only if `includePrecedingTokenAtEndPosition` is provided. Once set, the next
|
|
||||||
// iteration of the loop will test the rightmost token of `prevSubtree` to see
|
|
||||||
// if it should be returned.
|
|
||||||
var next, prevSubtree *ast.Node
|
|
||||||
current := sourceFile.AsNode()
|
|
||||||
// `left` tracks the lower boundary of the node/token that could be returned,
|
|
||||||
// and is eventually the scanner's start position, if the scanner is used.
|
|
||||||
left := 0
|
|
||||||
|
|
||||||
testNode := func(node *ast.Node) int {
|
|
||||||
if node.Kind != ast.KindEndOfFile && node.End() == position && includePrecedingTokenAtEndPosition != nil {
|
|
||||||
prevSubtree = node
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.End() < position || node.Kind != ast.KindEndOfFile && node.End() == position {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if getPosition(node, sourceFile, allowPositionInLeadingTrivia) > position {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// We zero in on the node that contains the target position by visiting each
|
|
||||||
// child and JSDoc comment of the current node. Node children are walked in
|
|
||||||
// order, while node lists are binary searched.
|
|
||||||
visitNode := func(node *ast.Node, _ *ast.NodeVisitor) *ast.Node {
|
|
||||||
// We can't abort visiting children, so once a match is found, we set `next`
|
|
||||||
// and do nothing on subsequent visits.
|
|
||||||
if node != nil && node.Flags&ast.NodeFlagsReparsed == 0 && next == nil {
|
|
||||||
switch testNode(node) {
|
|
||||||
case -1:
|
|
||||||
if !ast.IsJSDocKind(node.Kind) {
|
|
||||||
// We can't move the left boundary into or beyond JSDoc,
|
|
||||||
// because we may end up returning the token after this JSDoc,
|
|
||||||
// constructing it with the scanner, and we need to include
|
|
||||||
// all its leading trivia in its position.
|
|
||||||
left = node.End()
|
|
||||||
}
|
|
||||||
case 0:
|
|
||||||
next = node
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
visitNodeList := func(nodeList *ast.NodeList, _ *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if nodeList != nil && len(nodeList.Nodes) > 0 && next == nil {
|
|
||||||
if nodeList.End() == position && includePrecedingTokenAtEndPosition != nil {
|
|
||||||
left = nodeList.End()
|
|
||||||
prevSubtree = nodeList.Nodes[len(nodeList.Nodes)-1]
|
|
||||||
} else if nodeList.End() <= position {
|
|
||||||
left = nodeList.End()
|
|
||||||
} else if nodeList.Pos() <= position {
|
|
||||||
nodes := nodeList.Nodes
|
|
||||||
index, match := core.BinarySearchUniqueFunc(nodes, func(middle int, node *ast.Node) int {
|
|
||||||
if node.Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
cmp := testNode(node)
|
|
||||||
if cmp < 0 {
|
|
||||||
left = node.End()
|
|
||||||
}
|
|
||||||
return cmp
|
|
||||||
})
|
|
||||||
if match && nodes[index].Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
// filter and search again
|
|
||||||
nodes = core.Filter(nodes, func(node *ast.Node) bool {
|
|
||||||
return node.Flags&ast.NodeFlagsReparsed == 0
|
|
||||||
})
|
|
||||||
index, match = core.BinarySearchUniqueFunc(nodes, func(middle int, node *ast.Node) int {
|
|
||||||
cmp := testNode(node)
|
|
||||||
if cmp < 0 {
|
|
||||||
left = node.End()
|
|
||||||
}
|
|
||||||
return cmp
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if match {
|
|
||||||
next = nodes[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
VisitEachChildAndJSDoc(current, sourceFile, visitNode, visitNodeList)
|
|
||||||
// If prevSubtree was set on the last iteration, it ends at the target position.
|
|
||||||
// Check if the rightmost token of prevSubtree should be returned based on the
|
|
||||||
// `includePrecedingTokenAtEndPosition` callback.
|
|
||||||
if prevSubtree != nil {
|
|
||||||
child := FindPrecedingTokenEx(sourceFile, position, prevSubtree, false /*excludeJSDoc*/)
|
|
||||||
if child != nil && child.End() == position && includePrecedingTokenAtEndPosition(child) {
|
|
||||||
// Optimization: includePrecedingTokenAtEndPosition only ever returns true
|
|
||||||
// for real AST nodes, so we don't run the scanner here.
|
|
||||||
return child
|
|
||||||
}
|
|
||||||
prevSubtree = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// No node was found that contains the target position, so we've gone as deep as
|
|
||||||
// we can in the AST. We've either found a token, or we need to run the scanner
|
|
||||||
// to construct one that isn't stored in the AST.
|
|
||||||
if next == nil {
|
|
||||||
if ast.IsTokenKind(current.Kind) || shouldSkipChild(current) {
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
scanner := scanner.GetScannerForSourceFile(sourceFile, left)
|
|
||||||
for left < current.End() {
|
|
||||||
token := scanner.Token()
|
|
||||||
tokenFullStart := scanner.TokenFullStart()
|
|
||||||
tokenStart := core.IfElse(allowPositionInLeadingTrivia, tokenFullStart, scanner.TokenStart())
|
|
||||||
tokenEnd := scanner.TokenEnd()
|
|
||||||
if tokenStart <= position && (position < tokenEnd) {
|
|
||||||
if token == ast.KindIdentifier || !ast.IsTokenKind(token) {
|
|
||||||
if ast.IsJSDocKind(current.Kind) {
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("did not expect %s to have %s in its trivia", current.Kind.String(), token.String()))
|
|
||||||
}
|
|
||||||
return sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current)
|
|
||||||
}
|
|
||||||
if includePrecedingTokenAtEndPosition != nil && tokenEnd == position {
|
|
||||||
prevToken := sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current)
|
|
||||||
if includePrecedingTokenAtEndPosition(prevToken) {
|
|
||||||
return prevToken
|
|
||||||
}
|
|
||||||
}
|
|
||||||
left = tokenEnd
|
|
||||||
scanner.Scan()
|
|
||||||
}
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
current = next
|
|
||||||
left = current.Pos()
|
|
||||||
next = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPosition(node *ast.Node, sourceFile *ast.SourceFile, allowPositionInLeadingTrivia bool) int {
|
|
||||||
if allowPositionInLeadingTrivia {
|
|
||||||
return node.Pos()
|
|
||||||
}
|
|
||||||
return scanner.GetTokenPosOfNode(node, sourceFile, true /*includeJSDoc*/)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findRightmostNode(node *ast.Node) *ast.Node {
|
|
||||||
var next *ast.Node
|
|
||||||
current := node
|
|
||||||
visitNode := func(node *ast.Node, _ *ast.NodeVisitor) *ast.Node {
|
|
||||||
if node != nil {
|
|
||||||
next = node
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
visitNodes := func(nodeList *ast.NodeList, visitor *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if nodeList != nil {
|
|
||||||
if rightmost := ast.FindLastVisibleNode(nodeList.Nodes); rightmost != nil {
|
|
||||||
next = rightmost
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
visitor := getNodeVisitor(visitNode, visitNodes)
|
|
||||||
|
|
||||||
for {
|
|
||||||
current.VisitEachChild(visitor)
|
|
||||||
if next == nil {
|
|
||||||
return current
|
|
||||||
}
|
|
||||||
current = next
|
|
||||||
next = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func VisitEachChildAndJSDoc(
|
|
||||||
node *ast.Node,
|
|
||||||
sourceFile *ast.SourceFile,
|
|
||||||
visitNode func(*ast.Node, *ast.NodeVisitor) *ast.Node,
|
|
||||||
visitNodes func(*ast.NodeList, *ast.NodeVisitor) *ast.NodeList,
|
|
||||||
) {
|
|
||||||
visitor := getNodeVisitor(visitNode, visitNodes)
|
|
||||||
if node.Flags&ast.NodeFlagsHasJSDoc != 0 {
|
|
||||||
for _, jsdoc := range node.JSDoc(sourceFile) {
|
|
||||||
if visitor.Hooks.VisitNode != nil {
|
|
||||||
visitor.Hooks.VisitNode(jsdoc, visitor)
|
|
||||||
} else {
|
|
||||||
visitor.VisitNode(jsdoc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
node.VisitEachChild(visitor)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
comparisonLessThan = -1
|
|
||||||
comparisonEqualTo = 0
|
|
||||||
comparisonGreaterThan = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// Finds the leftmost token satisfying `position < token.End()`.
|
|
||||||
// If the leftmost token satisfying `position < token.End()` is invalid, or if position
|
|
||||||
// is in the trivia of that leftmost token,
|
|
||||||
// we will find the rightmost valid token with `token.End() <= position`.
|
|
||||||
func FindPrecedingToken(sourceFile *ast.SourceFile, position int) *ast.Node {
|
|
||||||
return FindPrecedingTokenEx(sourceFile, position, nil, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func FindPrecedingTokenEx(sourceFile *ast.SourceFile, position int, startNode *ast.Node, excludeJSDoc bool) *ast.Node {
|
|
||||||
var find func(node *ast.Node) *ast.Node
|
|
||||||
find = func(n *ast.Node) *ast.Node {
|
|
||||||
if ast.IsNonWhitespaceToken(n) && n.Kind != ast.KindEndOfFile {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// `foundChild` is the leftmost node that contains the target position.
|
|
||||||
// `prevChild` is the last visited child of the current node.
|
|
||||||
var foundChild, prevChild *ast.Node
|
|
||||||
visitNode := func(node *ast.Node, _ *ast.NodeVisitor) *ast.Node {
|
|
||||||
// skip synthesized nodes (that will exist now because of jsdoc handling)
|
|
||||||
if node == nil || node.Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
if foundChild != nil { // We cannot abort visiting children, so once the desired child is found, we do nothing.
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
if position < node.End() && (prevChild == nil || prevChild.End() <= position) {
|
|
||||||
foundChild = node
|
|
||||||
} else {
|
|
||||||
prevChild = node
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
visitNodes := func(nodeList *ast.NodeList, _ *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if foundChild != nil {
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
if nodeList != nil && len(nodeList.Nodes) > 0 {
|
|
||||||
nodes := nodeList.Nodes
|
|
||||||
index, match := core.BinarySearchUniqueFunc(nodes, func(middle int, _ *ast.Node) int {
|
|
||||||
// synthetic jsdoc nodes should have jsdocNode.End() <= n.Pos()
|
|
||||||
if nodes[middle].Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
return comparisonLessThan
|
|
||||||
}
|
|
||||||
if position < nodes[middle].End() {
|
|
||||||
if middle == 0 || position >= nodes[middle-1].End() {
|
|
||||||
return comparisonEqualTo
|
|
||||||
}
|
|
||||||
return comparisonGreaterThan
|
|
||||||
}
|
|
||||||
return comparisonLessThan
|
|
||||||
})
|
|
||||||
|
|
||||||
if match {
|
|
||||||
foundChild = nodes[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
validLookupIndex := core.IfElse(match, index-1, len(nodes)-1)
|
|
||||||
for i := validLookupIndex; i >= 0; i-- {
|
|
||||||
if nodes[i].Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if prevChild == nil {
|
|
||||||
prevChild = nodes[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
VisitEachChildAndJSDoc(n, sourceFile, visitNode, visitNodes)
|
|
||||||
|
|
||||||
if foundChild != nil {
|
|
||||||
// Note that the span of a node's tokens is [getStartOfNode(node, ...), node.end).
|
|
||||||
// Given that `position < child.end` and child has constituent tokens, we distinguish these cases:
|
|
||||||
// 1) `position` precedes `child`'s tokens or `child` has no tokens (ie: in a comment or whitespace preceding `child`):
|
|
||||||
// we need to find the last token in a previous child node or child tokens.
|
|
||||||
// 2) `position` is within the same span: we recurse on `child`.
|
|
||||||
start := GetStartOfNode(foundChild, sourceFile, !excludeJSDoc /*includeJSDoc*/)
|
|
||||||
lookInPreviousChild := start >= position || // cursor in the leading trivia or preceding tokens
|
|
||||||
!isValidPrecedingNode(foundChild, sourceFile)
|
|
||||||
if lookInPreviousChild {
|
|
||||||
if position >= foundChild.Pos() {
|
|
||||||
// Find jsdoc preceding the foundChild.
|
|
||||||
var jsDoc *ast.Node
|
|
||||||
nodeJSDoc := n.JSDoc(sourceFile)
|
|
||||||
for i := len(nodeJSDoc) - 1; i >= 0; i-- {
|
|
||||||
if nodeJSDoc[i].Pos() >= foundChild.Pos() {
|
|
||||||
jsDoc = nodeJSDoc[i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if jsDoc != nil {
|
|
||||||
if !excludeJSDoc {
|
|
||||||
return find(jsDoc)
|
|
||||||
} else {
|
|
||||||
return findRightmostValidToken(jsDoc.End(), sourceFile, n, position, excludeJSDoc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return findRightmostValidToken(foundChild.Pos(), sourceFile, n, -1 /*position*/, excludeJSDoc)
|
|
||||||
} else { // Answer is in tokens between two visited children.
|
|
||||||
return findRightmostValidToken(foundChild.Pos(), sourceFile, n, position, excludeJSDoc)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// position is in [foundChild.getStart(), foundChild.End): recur.
|
|
||||||
return find(foundChild)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have two cases here: either the position is at the end of the file,
|
|
||||||
// or the desired token is in the unvisited trailing tokens of the current node.
|
|
||||||
if position >= n.End() {
|
|
||||||
return findRightmostValidToken(n.End(), sourceFile, n, -1 /*position*/, excludeJSDoc)
|
|
||||||
} else {
|
|
||||||
return findRightmostValidToken(n.End(), sourceFile, n, position, excludeJSDoc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var node *ast.Node
|
|
||||||
if startNode != nil {
|
|
||||||
node = startNode
|
|
||||||
} else {
|
|
||||||
node = sourceFile.AsNode()
|
|
||||||
}
|
|
||||||
result := find(node)
|
|
||||||
if result != nil && ast.IsWhitespaceOnlyJsxText(result) {
|
|
||||||
panic("Expected result to be a non-whitespace token.")
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidPrecedingNode(node *ast.Node, sourceFile *ast.SourceFile) bool {
|
|
||||||
start := GetStartOfNode(node, sourceFile, false /*includeJSDoc*/)
|
|
||||||
width := node.End() - start
|
|
||||||
return !(ast.IsWhitespaceOnlyJsxText(node) || width == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetStartOfNode(node *ast.Node, file *ast.SourceFile, includeJSDoc bool) int {
|
|
||||||
return scanner.GetTokenPosOfNode(node, file, includeJSDoc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Looks for rightmost valid token in the range [startPos, endPos).
|
|
||||||
// If position is >= 0, looks for rightmost valid token that precedes or touches that position.
|
|
||||||
func findRightmostValidToken(endPos int, sourceFile *ast.SourceFile, containingNode *ast.Node, position int, excludeJSDoc bool) *ast.Node {
|
|
||||||
if position == -1 {
|
|
||||||
position = containingNode.End()
|
|
||||||
}
|
|
||||||
var find func(n *ast.Node, endPos int) *ast.Node
|
|
||||||
find = func(n *ast.Node, endPos int) *ast.Node {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ast.IsNonWhitespaceToken(n) {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
var rightmostValidNode *ast.Node
|
|
||||||
rightmostVisitedNodes := make([]*ast.Node, 0, 1) // Nodes after the last valid node.
|
|
||||||
hasChildren := false
|
|
||||||
shouldVisitNode := func(node *ast.Node) bool {
|
|
||||||
// Node is synthetic or out of the desired range: don't visit it.
|
|
||||||
return !(node.Flags&ast.NodeFlagsReparsed != 0 ||
|
|
||||||
node.End() > endPos || GetStartOfNode(node, sourceFile, !excludeJSDoc /*includeJSDoc*/) >= position)
|
|
||||||
}
|
|
||||||
visitNode := func(node *ast.Node, _ *ast.NodeVisitor) *ast.Node {
|
|
||||||
if node == nil {
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
hasChildren = true
|
|
||||||
if !shouldVisitNode(node) {
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
rightmostVisitedNodes = append(rightmostVisitedNodes, node)
|
|
||||||
if isValidPrecedingNode(node, sourceFile) {
|
|
||||||
rightmostValidNode = node
|
|
||||||
rightmostVisitedNodes = rightmostVisitedNodes[:0]
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
visitNodes := func(nodeList *ast.NodeList, _ *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if nodeList != nil && len(nodeList.Nodes) > 0 {
|
|
||||||
hasChildren = true
|
|
||||||
index, _ := core.BinarySearchUniqueFunc(nodeList.Nodes, func(middle int, node *ast.Node) int {
|
|
||||||
if node.End() > endPos {
|
|
||||||
return comparisonGreaterThan
|
|
||||||
}
|
|
||||||
return comparisonLessThan
|
|
||||||
})
|
|
||||||
validIndex := -1
|
|
||||||
for i := index - 1; i >= 0; i-- {
|
|
||||||
if !shouldVisitNode(nodeList.Nodes[i]) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if isValidPrecedingNode(nodeList.Nodes[i], sourceFile) {
|
|
||||||
validIndex = i
|
|
||||||
rightmostValidNode = nodeList.Nodes[i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := validIndex + 1; i < index; i++ {
|
|
||||||
if !shouldVisitNode(nodeList.Nodes[i]) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rightmostVisitedNodes = append(rightmostVisitedNodes, nodeList.Nodes[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
VisitEachChildAndJSDoc(n, sourceFile, visitNode, visitNodes)
|
|
||||||
|
|
||||||
// Three cases:
|
|
||||||
// 1. The answer is a token of `rightmostValidNode`.
|
|
||||||
// 2. The answer is one of the unvisited tokens that occur after the rightmost valid node.
|
|
||||||
// 3. The current node is a childless, token-less node. The answer is the current node.
|
|
||||||
|
|
||||||
// Case 2: Look at unvisited trailing tokens that occur in between the rightmost visited nodes.
|
|
||||||
if !shouldSkipChild(n) { // JSDoc nodes don't include trivia tokens as children.
|
|
||||||
var startPos int
|
|
||||||
if rightmostValidNode != nil {
|
|
||||||
startPos = rightmostValidNode.End()
|
|
||||||
} else {
|
|
||||||
startPos = n.Pos()
|
|
||||||
}
|
|
||||||
scanner := scanner.GetScannerForSourceFile(sourceFile, startPos)
|
|
||||||
var tokens []*ast.Node
|
|
||||||
for _, visitedNode := range rightmostVisitedNodes {
|
|
||||||
// Trailing tokens that occur before this node.
|
|
||||||
for startPos < min(visitedNode.Pos(), position) {
|
|
||||||
tokenStart := scanner.TokenStart()
|
|
||||||
if tokenStart >= position {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
token := scanner.Token()
|
|
||||||
tokenFullStart := scanner.TokenFullStart()
|
|
||||||
tokenEnd := scanner.TokenEnd()
|
|
||||||
startPos = tokenEnd
|
|
||||||
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n))
|
|
||||||
scanner.Scan()
|
|
||||||
}
|
|
||||||
startPos = visitedNode.End()
|
|
||||||
scanner.ResetPos(startPos)
|
|
||||||
scanner.Scan()
|
|
||||||
}
|
|
||||||
// Trailing tokens after last visited node.
|
|
||||||
for startPos < min(endPos, position) {
|
|
||||||
tokenStart := scanner.TokenStart()
|
|
||||||
if tokenStart >= position {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
token := scanner.Token()
|
|
||||||
tokenFullStart := scanner.TokenFullStart()
|
|
||||||
tokenEnd := scanner.TokenEnd()
|
|
||||||
startPos = tokenEnd
|
|
||||||
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n))
|
|
||||||
scanner.Scan()
|
|
||||||
}
|
|
||||||
|
|
||||||
lastToken := len(tokens) - 1
|
|
||||||
// Find preceding valid token.
|
|
||||||
for i := lastToken; i >= 0; i-- {
|
|
||||||
if !ast.IsWhitespaceOnlyJsxText(tokens[i]) {
|
|
||||||
return tokens[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Case 3: childless node.
|
|
||||||
if !hasChildren {
|
|
||||||
if n != containingNode {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Case 1: recur on rightmostValidNode.
|
|
||||||
if rightmostValidNode != nil {
|
|
||||||
endPos = rightmostValidNode.End()
|
|
||||||
}
|
|
||||||
return find(rightmostValidNode, endPos)
|
|
||||||
}
|
|
||||||
|
|
||||||
return find(containingNode, endPos)
|
|
||||||
}
|
|
||||||
|
|
||||||
func FindNextToken(previousToken *ast.Node, parent *ast.Node, file *ast.SourceFile) *ast.Node {
|
|
||||||
var find func(n *ast.Node) *ast.Node
|
|
||||||
find = func(n *ast.Node) *ast.Node {
|
|
||||||
if ast.IsTokenKind(n.Kind) && n.Pos() == previousToken.End() {
|
|
||||||
// this is token that starts at the end of previous token - return it
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
// Node that contains `previousToken` or occurs immediately after it.
|
|
||||||
var foundNode *ast.Node
|
|
||||||
visitNode := func(node *ast.Node, _ *ast.NodeVisitor) *ast.Node {
|
|
||||||
if node != nil && node.Flags&ast.NodeFlagsReparsed == 0 &&
|
|
||||||
node.Pos() <= previousToken.End() && node.End() > previousToken.End() {
|
|
||||||
foundNode = node
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
visitNodes := func(nodeList *ast.NodeList, _ *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if nodeList != nil && len(nodeList.Nodes) > 0 && foundNode == nil {
|
|
||||||
nodes := nodeList.Nodes
|
|
||||||
index, match := core.BinarySearchUniqueFunc(nodes, func(_ int, node *ast.Node) int {
|
|
||||||
if node.Flags&ast.NodeFlagsReparsed != 0 {
|
|
||||||
return comparisonLessThan
|
|
||||||
}
|
|
||||||
if node.Pos() > previousToken.End() {
|
|
||||||
return comparisonGreaterThan
|
|
||||||
}
|
|
||||||
if node.End() <= previousToken.Pos() {
|
|
||||||
return comparisonLessThan
|
|
||||||
}
|
|
||||||
return comparisonEqualTo
|
|
||||||
})
|
|
||||||
if match {
|
|
||||||
foundNode = nodes[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeList
|
|
||||||
}
|
|
||||||
VisitEachChildAndJSDoc(n, file, visitNode, visitNodes)
|
|
||||||
// Cases:
|
|
||||||
// 1. no answer exists
|
|
||||||
// 2. answer is an unvisited token
|
|
||||||
// 3. answer is in the visited found node
|
|
||||||
|
|
||||||
// Case 3: look for the next token inside the found node.
|
|
||||||
if foundNode != nil {
|
|
||||||
return find(foundNode)
|
|
||||||
}
|
|
||||||
startPos := previousToken.End()
|
|
||||||
// Case 2: look for the next token directly.
|
|
||||||
if startPos >= n.Pos() && startPos < n.End() {
|
|
||||||
scanner := scanner.GetScannerForSourceFile(file, startPos)
|
|
||||||
token := scanner.Token()
|
|
||||||
tokenFullStart := scanner.TokenFullStart()
|
|
||||||
tokenStart := scanner.TokenStart()
|
|
||||||
tokenEnd := scanner.TokenEnd()
|
|
||||||
if tokenStart == previousToken.End() {
|
|
||||||
return file.GetOrCreateToken(token, tokenFullStart, tokenEnd, n)
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("Expected to find next token at %d, got token %s at %d", previousToken.End(), token, tokenStart))
|
|
||||||
}
|
|
||||||
// Case 3: no answer.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return find(parent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNodeVisitor(
|
|
||||||
visitNode func(*ast.Node, *ast.NodeVisitor) *ast.Node,
|
|
||||||
visitNodes func(*ast.NodeList, *ast.NodeVisitor) *ast.NodeList,
|
|
||||||
) *ast.NodeVisitor {
|
|
||||||
var wrappedVisitNode func(*ast.Node, *ast.NodeVisitor) *ast.Node
|
|
||||||
var wrappedVisitNodes func(*ast.NodeList, *ast.NodeVisitor) *ast.NodeList
|
|
||||||
if visitNode != nil {
|
|
||||||
wrappedVisitNode = func(n *ast.Node, v *ast.NodeVisitor) *ast.Node {
|
|
||||||
if ast.IsJSDocSingleCommentNodeComment(n) {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
return visitNode(n, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if visitNodes != nil {
|
|
||||||
wrappedVisitNodes = func(n *ast.NodeList, v *ast.NodeVisitor) *ast.NodeList {
|
|
||||||
if ast.IsJSDocSingleCommentNodeList(n) {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
return visitNodes(n, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ast.NewNodeVisitor(core.Identity, nil, ast.NodeVisitorHooks{
|
|
||||||
VisitNode: wrappedVisitNode,
|
|
||||||
VisitToken: wrappedVisitNode,
|
|
||||||
VisitNodes: wrappedVisitNodes,
|
|
||||||
VisitModifiers: func(modifiers *ast.ModifierList, visitor *ast.NodeVisitor) *ast.ModifierList {
|
|
||||||
if modifiers != nil {
|
|
||||||
wrappedVisitNodes(&modifiers.NodeList, visitor)
|
|
||||||
}
|
|
||||||
return modifiers
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldSkipChild(node *ast.Node) bool {
|
|
||||||
return node.Kind == ast.KindJSDoc ||
|
|
||||||
node.Kind == ast.KindJSDocText ||
|
|
||||||
node.Kind == ast.KindJSDocTypeLiteral ||
|
|
||||||
node.Kind == ast.KindJSDocSignature ||
|
|
||||||
ast.IsJSDocLinkLike(node) ||
|
|
||||||
ast.IsJSDocTag(node)
|
|
||||||
}
|
|
||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
@ -476,6 +477,8 @@ func GetSpellingSuggestion[T any](name string, candidates []T, getName func(T) s
|
|||||||
maximumLengthDifference := max(2, int(float64(len(name))*0.34))
|
maximumLengthDifference := max(2, int(float64(len(name))*0.34))
|
||||||
bestDistance := math.Floor(float64(len(name))*0.4) + 1 // If the best result is worse than this, don't bother.
|
bestDistance := math.Floor(float64(len(name))*0.4) + 1 // If the best result is worse than this, don't bother.
|
||||||
runeName := []rune(name)
|
runeName := []rune(name)
|
||||||
|
buffers := levenshteinBuffersPool.Get().(*levenshteinBuffers)
|
||||||
|
defer levenshteinBuffersPool.Put(buffers)
|
||||||
var bestCandidate T
|
var bestCandidate T
|
||||||
for _, candidate := range candidates {
|
for _, candidate := range candidates {
|
||||||
candidateName := getName(candidate)
|
candidateName := getName(candidate)
|
||||||
@ -490,7 +493,7 @@ func GetSpellingSuggestion[T any](name string, candidates []T, getName func(T) s
|
|||||||
if len(candidateName) < 3 && !strings.EqualFold(candidateName, name) {
|
if len(candidateName) < 3 && !strings.EqualFold(candidateName, name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
distance := levenshteinWithMax(runeName, []rune(candidateName), bestDistance-0.1)
|
distance := levenshteinWithMax(buffers, runeName, []rune(candidateName), bestDistance-0.1)
|
||||||
if distance < 0 {
|
if distance < 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -502,9 +505,25 @@ func GetSpellingSuggestion[T any](name string, candidates []T, getName func(T) s
|
|||||||
return bestCandidate
|
return bestCandidate
|
||||||
}
|
}
|
||||||
|
|
||||||
func levenshteinWithMax(s1 []rune, s2 []rune, maxValue float64) float64 {
|
type levenshteinBuffers struct {
|
||||||
previous := make([]float64, len(s2)+1)
|
previous []float64
|
||||||
current := make([]float64, len(s2)+1)
|
current []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
var levenshteinBuffersPool = sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
return &levenshteinBuffers{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func levenshteinWithMax(buffers *levenshteinBuffers, s1 []rune, s2 []rune, maxValue float64) float64 {
|
||||||
|
bufferSize := len(s2) + 1
|
||||||
|
buffers.previous = slices.Grow(buffers.previous[:0], bufferSize)[:bufferSize]
|
||||||
|
buffers.current = slices.Grow(buffers.current[:0], bufferSize)[:bufferSize]
|
||||||
|
|
||||||
|
previous := buffers.previous
|
||||||
|
current := buffers.current
|
||||||
|
|
||||||
big := maxValue + 0.01
|
big := maxValue + 0.01
|
||||||
for i := range previous {
|
for i := range previous {
|
||||||
previous[i] = float64(i)
|
previous[i] = float64(i)
|
||||||
|
|||||||
@ -2326,7 +2326,7 @@ var Compiler_option_0_may_only_be_used_with_build = &Message{code: 5093, categor
|
|||||||
|
|
||||||
var Compiler_option_0_may_not_be_used_with_build = &Message{code: 5094, category: CategoryError, key: "Compiler_option_0_may_not_be_used_with_build_5094", text: "Compiler option '--{0}' may not be used with '--build'."}
|
var Compiler_option_0_may_not_be_used_with_build = &Message{code: 5094, category: CategoryError, key: "Compiler_option_0_may_not_be_used_with_build_5094", text: "Compiler option '--{0}' may not be used with '--build'."}
|
||||||
|
|
||||||
var Option_0_can_only_be_used_when_module_is_set_to_preserve_or_to_es2015_or_later = &Message{code: 5095, category: CategoryError, key: "Option_0_can_only_be_used_when_module_is_set_to_preserve_or_to_es2015_or_later_5095", text: "Option '{0}' can only be used when 'module' is set to 'preserve' or to 'es2015' or later."}
|
var Option_0_can_only_be_used_when_module_is_set_to_preserve_commonjs_or_es2015_or_later = &Message{code: 5095, category: CategoryError, key: "Option_0_can_only_be_used_when_module_is_set_to_preserve_commonjs_or_es2015_or_later_5095", text: "Option '{0}' can only be used when 'module' is set to 'preserve', 'commonjs', or 'es2015' or later."}
|
||||||
|
|
||||||
var Option_allowImportingTsExtensions_can_only_be_used_when_either_noEmit_or_emitDeclarationOnly_is_set = &Message{code: 5096, category: CategoryError, key: "Option_allowImportingTsExtensions_can_only_be_used_when_either_noEmit_or_emitDeclarationOnly_is_set_5096", text: "Option 'allowImportingTsExtensions' can only be used when either 'noEmit' or 'emitDeclarationOnly' is set."}
|
var Option_allowImportingTsExtensions_can_only_be_used_when_either_noEmit_or_emitDeclarationOnly_is_set = &Message{code: 5096, category: CategoryError, key: "Option_allowImportingTsExtensions_can_only_be_used_when_either_noEmit_or_emitDeclarationOnly_is_set_5096", text: "Option 'allowImportingTsExtensions' can only be used when either 'noEmit' or 'emitDeclarationOnly' is set."}
|
||||||
|
|
||||||
|
|||||||
@ -1455,6 +1455,9 @@ func (s *Scanner) scanIdentifierParts() string {
|
|||||||
|
|
||||||
func (s *Scanner) scanString(jsxAttributeString bool) string {
|
func (s *Scanner) scanString(jsxAttributeString bool) string {
|
||||||
quote := s.char()
|
quote := s.char()
|
||||||
|
if quote == '\'' {
|
||||||
|
s.tokenFlags |= ast.TokenFlagsSingleQuote
|
||||||
|
}
|
||||||
s.pos++
|
s.pos++
|
||||||
// Fast path for simple strings without escape sequences.
|
// Fast path for simple strings without escape sequences.
|
||||||
strLen := strings.IndexRune(s.text[s.pos:], quote)
|
strLen := strings.IndexRune(s.text[s.pos:], quote)
|
||||||
|
|||||||
@ -98,3 +98,23 @@ func CompareStringsCaseInsensitiveThenSensitive(a, b string) Comparison {
|
|||||||
}
|
}
|
||||||
return CompareStringsCaseSensitive(a, b)
|
return CompareStringsCaseSensitive(a, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CompareStringsCaseInsensitiveEslintCompatible performs a case-insensitive comparison
|
||||||
|
// using toLowerCase() instead of toUpperCase() for ESLint compatibility.
|
||||||
|
//
|
||||||
|
// `CompareStringsCaseInsensitive` transforms letters to uppercase for unicode reasons,
|
||||||
|
// while eslint's `sort-imports` rule transforms letters to lowercase. Which one you choose
|
||||||
|
// affects the relative order of letters and ASCII characters 91-96, of which `_` is a
|
||||||
|
// valid character in an identifier. So if we used `CompareStringsCaseInsensitive` for
|
||||||
|
// import sorting, TypeScript and eslint would disagree about the correct case-insensitive
|
||||||
|
// sort order for `__String` and `Foo`. Since eslint's whole job is to create consistency
|
||||||
|
// by enforcing nitpicky details like this, it makes way more sense for us to just adopt
|
||||||
|
// their convention so users can have auto-imports without making eslint angry.
|
||||||
|
func CompareStringsCaseInsensitiveEslintCompatible(a, b string) Comparison {
|
||||||
|
if a == b {
|
||||||
|
return ComparisonEqual
|
||||||
|
}
|
||||||
|
a = strings.ToLower(a)
|
||||||
|
b = strings.ToLower(b)
|
||||||
|
return strings.Compare(a, b)
|
||||||
|
}
|
||||||
|
|||||||
@ -51,7 +51,7 @@ func RemoveFileExtension(path string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Otherwise just remove single dot extension, if any
|
// Otherwise just remove single dot extension, if any
|
||||||
return path[:len(path)-len(filepath.Ext(path))]
|
return path[:len(path)-len(filepath.Ext(path))] //nolint:forbidigo
|
||||||
}
|
}
|
||||||
|
|
||||||
func TryGetExtensionFromPath(p string) string {
|
func TryGetExtensionFromPath(p string) string {
|
||||||
|
|||||||
@ -1,150 +0,0 @@
|
|||||||
package cachedvfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/collections"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FS struct {
|
|
||||||
fs vfs.FS
|
|
||||||
enabled atomic.Bool
|
|
||||||
|
|
||||||
directoryExistsCache collections.SyncMap[string, bool]
|
|
||||||
fileExistsCache collections.SyncMap[string, bool]
|
|
||||||
getAccessibleEntriesCache collections.SyncMap[string, vfs.Entries]
|
|
||||||
realpathCache collections.SyncMap[string, string]
|
|
||||||
statCache collections.SyncMap[string, vfs.FileInfo]
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ vfs.FS = (*FS)(nil)
|
|
||||||
|
|
||||||
func From(fs vfs.FS) *FS {
|
|
||||||
fsys := &FS{fs: fs}
|
|
||||||
fsys.enabled.Store(true)
|
|
||||||
return fsys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) DisableAndClearCache() {
|
|
||||||
if fsys.enabled.CompareAndSwap(true, false) {
|
|
||||||
fsys.ClearCache()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) Enable() {
|
|
||||||
fsys.enabled.Store(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) ClearCache() {
|
|
||||||
fsys.directoryExistsCache.Clear()
|
|
||||||
fsys.fileExistsCache.Clear()
|
|
||||||
fsys.getAccessibleEntriesCache.Clear()
|
|
||||||
fsys.realpathCache.Clear()
|
|
||||||
fsys.statCache.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) DirectoryExists(path string) bool {
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
if ret, ok := fsys.directoryExistsCache.Load(path); ok {
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := fsys.fs.DirectoryExists(path)
|
|
||||||
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
fsys.directoryExistsCache.Store(path, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) FileExists(path string) bool {
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
if ret, ok := fsys.fileExistsCache.Load(path); ok {
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := fsys.fs.FileExists(path)
|
|
||||||
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
fsys.fileExistsCache.Store(path, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) GetAccessibleEntries(path string) vfs.Entries {
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
if ret, ok := fsys.getAccessibleEntriesCache.Load(path); ok {
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := fsys.fs.GetAccessibleEntries(path)
|
|
||||||
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
fsys.getAccessibleEntriesCache.Store(path, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) ReadFile(path string) (contents string, ok bool) {
|
|
||||||
return fsys.fs.ReadFile(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) Realpath(path string) string {
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
if ret, ok := fsys.realpathCache.Load(path); ok {
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := fsys.fs.Realpath(path)
|
|
||||||
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
fsys.realpathCache.Store(path, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) Remove(path string) error {
|
|
||||||
return fsys.fs.Remove(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) Chtimes(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
return fsys.fs.Chtimes(path, aTime, mTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) Stat(path string) vfs.FileInfo {
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
if ret, ok := fsys.statCache.Load(path); ok {
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret := fsys.fs.Stat(path)
|
|
||||||
|
|
||||||
if fsys.enabled.Load() {
|
|
||||||
fsys.statCache.Store(path, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) UseCaseSensitiveFileNames() bool {
|
|
||||||
return fsys.fs.UseCaseSensitiveFileNames()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) WalkDir(root string, walkFn vfs.WalkDirFunc) error {
|
|
||||||
return fsys.fs.WalkDir(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsys *FS) WriteFile(path string, data string, writeByteOrderMark bool) error {
|
|
||||||
return fsys.fs.WriteFile(path, data, writeByteOrderMark)
|
|
||||||
}
|
|
||||||
@ -1,189 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf16"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/tspath"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Common struct {
|
|
||||||
RootFor func(root string) fs.FS
|
|
||||||
Realpath func(path string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
func RootLength(p string) int {
|
|
||||||
l := tspath.GetEncodedRootLength(p)
|
|
||||||
if l <= 0 {
|
|
||||||
panic(fmt.Sprintf("vfs: path %q is not absolute", p))
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func SplitPath(p string) (rootName, rest string) {
|
|
||||||
p = tspath.NormalizePath(p)
|
|
||||||
l := RootLength(p)
|
|
||||||
rootName, rest = p[:l], p[l:]
|
|
||||||
rest = tspath.RemoveTrailingDirectorySeparator(rest)
|
|
||||||
return rootName, rest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) RootAndPath(path string) (fsys fs.FS, rootName string, rest string) {
|
|
||||||
rootName, rest = SplitPath(path)
|
|
||||||
if rest == "" {
|
|
||||||
rest = "."
|
|
||||||
}
|
|
||||||
return vfs.RootFor(rootName), rootName, rest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) Stat(path string) vfs.FileInfo {
|
|
||||||
fsys, _, rest := vfs.RootAndPath(path)
|
|
||||||
if fsys == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
stat, err := fs.Stat(fsys, rest)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return stat
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) FileExists(path string) bool {
|
|
||||||
stat := vfs.Stat(path)
|
|
||||||
return stat != nil && !stat.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) DirectoryExists(path string) bool {
|
|
||||||
stat := vfs.Stat(path)
|
|
||||||
return stat != nil && stat.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) GetAccessibleEntries(path string) (result vfs.Entries) {
|
|
||||||
addToResult := func(name string, mode fs.FileMode) (added bool) {
|
|
||||||
if mode.IsDir() {
|
|
||||||
result.Directories = append(result.Directories, name)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if mode.IsRegular() {
|
|
||||||
result.Files = append(result.Files, name)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range vfs.getEntries(path) {
|
|
||||||
entryType := entry.Type()
|
|
||||||
|
|
||||||
if addToResult(entry.Name(), entryType) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if entryType&fs.ModeSymlink != 0 {
|
|
||||||
// Easy case; UNIX-like system will clearly mark symlinks.
|
|
||||||
if stat := vfs.Stat(path + "/" + entry.Name()); stat != nil {
|
|
||||||
addToResult(entry.Name(), stat.Mode())
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if entryType&fs.ModeIrregular != 0 && vfs.Realpath != nil {
|
|
||||||
// Could be a Windows junction. Try Realpath.
|
|
||||||
// TODO(jakebailey): use syscall.Win32FileAttributeData instead
|
|
||||||
fullPath := path + "/" + entry.Name()
|
|
||||||
if realpath := vfs.Realpath(fullPath); fullPath != realpath {
|
|
||||||
if stat := vfs.Stat(realpath); stat != nil {
|
|
||||||
addToResult(entry.Name(), stat.Mode())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) getEntries(path string) []vfs.DirEntry {
|
|
||||||
fsys, _, rest := vfs.RootAndPath(path)
|
|
||||||
if fsys == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := fs.ReadDir(fsys, rest)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) WalkDir(root string, walkFn fs.WalkDirFunc) error {
|
|
||||||
fsys, rootName, rest := vfs.RootAndPath(root)
|
|
||||||
if fsys == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fs.WalkDir(fsys, rest, func(path string, d fs.DirEntry, err error) error {
|
|
||||||
if path == "." {
|
|
||||||
path = ""
|
|
||||||
}
|
|
||||||
return walkFn(rootName+path, d, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *Common) ReadFile(path string) (contents string, ok bool) {
|
|
||||||
fsys, _, rest := vfs.RootAndPath(path)
|
|
||||||
if fsys == nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := fs.ReadFile(fsys, rest)
|
|
||||||
if err != nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// An invariant of any underlying filesystem is that the bytes returned
|
|
||||||
// are immutable, otherwise anyone using the filesystem would end up
|
|
||||||
// with data races.
|
|
||||||
//
|
|
||||||
// This means that we can safely convert the bytes to a string directly,
|
|
||||||
// saving a copy.
|
|
||||||
if len(b) == 0 {
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
|
|
||||||
s := unsafe.String(&b[0], len(b))
|
|
||||||
|
|
||||||
return decodeBytes(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeBytes(s string) (contents string, ok bool) {
|
|
||||||
var bom [2]byte
|
|
||||||
if len(s) >= 2 {
|
|
||||||
bom = [2]byte{s[0], s[1]}
|
|
||||||
switch bom {
|
|
||||||
case [2]byte{0xFF, 0xFE}:
|
|
||||||
return decodeUtf16(s[2:], binary.LittleEndian), true
|
|
||||||
case [2]byte{0xFE, 0xFF}:
|
|
||||||
return decodeUtf16(s[2:], binary.BigEndian), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(s) >= 3 && s[0] == 0xEF && s[1] == 0xBB && s[2] == 0xBF {
|
|
||||||
s = s[3:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeUtf16(s string, order binary.ByteOrder) string {
|
|
||||||
ints := make([]uint16, len(s)/2)
|
|
||||||
if err := binary.Read(strings.NewReader(s), order, &ints); err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return string(utf16.Decode(ints))
|
|
||||||
}
|
|
||||||
@ -1,207 +0,0 @@
|
|||||||
package iovfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/stringutil"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/tspath"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RealpathFS interface {
|
|
||||||
fs.FS
|
|
||||||
Realpath(path string) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type WritableFS interface {
|
|
||||||
fs.FS
|
|
||||||
WriteFile(path string, data []byte, perm fs.FileMode) error
|
|
||||||
MkdirAll(path string, perm fs.FileMode) error
|
|
||||||
// Removes `path` and all its contents. Will return the first error it encounters.
|
|
||||||
Remove(path string) error
|
|
||||||
Chtimes(path string, aTime time.Time, mTime time.Time) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type FsWithSys interface {
|
|
||||||
vfs.FS
|
|
||||||
FSys() fs.FS
|
|
||||||
}
|
|
||||||
|
|
||||||
// From creates a new FS from an [fs.FS].
|
|
||||||
//
|
|
||||||
// For paths like `c:/foo/bar`, fsys will be used as though it's rooted at `/` and the path is `/c:/foo/bar`.
|
|
||||||
//
|
|
||||||
// If the provided [fs.FS] implements [RealpathFS], it will be used to implement the Realpath method.
|
|
||||||
// If the provided [fs.FS] implements [WritableFS], it will be used to implement the WriteFile method.
|
|
||||||
//
|
|
||||||
// From does not actually handle case-insensitivity; ensure the passed in [fs.FS]
|
|
||||||
// respects case-insensitive file names if needed. Consider using [vfstest.FromMap] for testing.
|
|
||||||
func From(fsys fs.FS, useCaseSensitiveFileNames bool) FsWithSys {
|
|
||||||
var realpath func(path string) (string, error)
|
|
||||||
if fsys, ok := fsys.(RealpathFS); ok {
|
|
||||||
realpath = func(path string) (string, error) {
|
|
||||||
rest, hadSlash := strings.CutPrefix(path, "/")
|
|
||||||
rp, err := fsys.Realpath(rest)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if hadSlash {
|
|
||||||
return "/" + rp, nil
|
|
||||||
}
|
|
||||||
return rp, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
realpath = func(path string) (string, error) {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var writeFile func(path string, content string, writeByteOrderMark bool) error
|
|
||||||
var mkdirAll func(path string) error
|
|
||||||
var remove func(path string) error
|
|
||||||
var chtimes func(path string, aTime time.Time, mTime time.Time) error
|
|
||||||
if fsys, ok := fsys.(WritableFS); ok {
|
|
||||||
writeFile = func(path string, content string, writeByteOrderMark bool) error {
|
|
||||||
rest, _ := strings.CutPrefix(path, "/")
|
|
||||||
if writeByteOrderMark {
|
|
||||||
// Strada uses \uFEFF because NodeJS requires it, but substitutes it with the correct BOM based on the
|
|
||||||
// output encoding. \uFEFF is actually the BOM for big-endian UTF-16. For UTF-8 the actual BOM is
|
|
||||||
// \xEF\xBB\xBF.
|
|
||||||
content = stringutil.AddUTF8ByteOrderMark(content)
|
|
||||||
}
|
|
||||||
return fsys.WriteFile(rest, []byte(content), 0o666)
|
|
||||||
}
|
|
||||||
mkdirAll = func(path string) error {
|
|
||||||
rest, _ := strings.CutPrefix(path, "/")
|
|
||||||
return fsys.MkdirAll(rest, 0o777)
|
|
||||||
}
|
|
||||||
remove = func(path string) error {
|
|
||||||
rest, _ := strings.CutPrefix(path, "/")
|
|
||||||
return fsys.Remove(rest)
|
|
||||||
}
|
|
||||||
chtimes = func(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
rest, _ := strings.CutPrefix(path, "/")
|
|
||||||
return fsys.Chtimes(rest, aTime, mTime)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
writeFile = func(string, string, bool) error {
|
|
||||||
panic("writeFile not supported")
|
|
||||||
}
|
|
||||||
mkdirAll = func(string) error {
|
|
||||||
panic("mkdirAll not supported")
|
|
||||||
}
|
|
||||||
remove = func(string) error {
|
|
||||||
panic("remove not supported")
|
|
||||||
}
|
|
||||||
chtimes = func(string, time.Time, time.Time) error {
|
|
||||||
panic("chtimes not supported")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ioFS{
|
|
||||||
common: internal.Common{
|
|
||||||
RootFor: func(root string) fs.FS {
|
|
||||||
if root == "/" {
|
|
||||||
return fsys
|
|
||||||
}
|
|
||||||
|
|
||||||
p := tspath.RemoveTrailingDirectorySeparator(root)
|
|
||||||
sub, err := fs.Sub(fsys, p)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("vfs: failed to create sub file system for %q: %v", p, err))
|
|
||||||
}
|
|
||||||
return sub
|
|
||||||
},
|
|
||||||
},
|
|
||||||
useCaseSensitiveFileNames: useCaseSensitiveFileNames,
|
|
||||||
realpath: realpath,
|
|
||||||
writeFile: writeFile,
|
|
||||||
mkdirAll: mkdirAll,
|
|
||||||
remove: remove,
|
|
||||||
chtimes: chtimes,
|
|
||||||
fsys: fsys,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ioFS struct {
|
|
||||||
common internal.Common
|
|
||||||
|
|
||||||
useCaseSensitiveFileNames bool
|
|
||||||
realpath func(path string) (string, error)
|
|
||||||
writeFile func(path string, content string, writeByteOrderMark bool) error
|
|
||||||
mkdirAll func(path string) error
|
|
||||||
remove func(path string) error
|
|
||||||
chtimes func(path string, aTime time.Time, mTime time.Time) error
|
|
||||||
fsys fs.FS
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ FsWithSys = (*ioFS)(nil)
|
|
||||||
|
|
||||||
func (vfs *ioFS) UseCaseSensitiveFileNames() bool {
|
|
||||||
return vfs.useCaseSensitiveFileNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) DirectoryExists(path string) bool {
|
|
||||||
return vfs.common.DirectoryExists(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) FileExists(path string) bool {
|
|
||||||
return vfs.common.FileExists(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) GetAccessibleEntries(path string) vfs.Entries {
|
|
||||||
return vfs.common.GetAccessibleEntries(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) Stat(path string) vfs.FileInfo {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
return vfs.common.Stat(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) ReadFile(path string) (contents string, ok bool) {
|
|
||||||
return vfs.common.ReadFile(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) WalkDir(root string, walkFn vfs.WalkDirFunc) error {
|
|
||||||
return vfs.common.WalkDir(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) Remove(path string) error {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
return vfs.remove(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) Chtimes(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
return vfs.chtimes(path, aTime, mTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) Realpath(path string) string {
|
|
||||||
root, rest := internal.SplitPath(path)
|
|
||||||
// splitPath normalizes the path into parts (e.g. "c:/foo/bar" -> "c:/", "foo/bar")
|
|
||||||
// Put them back together to call realpath.
|
|
||||||
realpath, err := vfs.realpath(root + rest)
|
|
||||||
if err != nil {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return realpath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) WriteFile(path string, content string, writeByteOrderMark bool) error {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
if err := vfs.writeFile(path, content, writeByteOrderMark); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := vfs.mkdirAll(tspath.GetDirectoryPath(tspath.NormalizePath(path))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return vfs.writeFile(path, content, writeByteOrderMark)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *ioFS) FSys() fs.FS {
|
|
||||||
return vfs.fsys
|
|
||||||
}
|
|
||||||
@ -1,177 +0,0 @@
|
|||||||
package osvfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/tspath"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FS creates a new FS from the OS file system.
|
|
||||||
func FS() vfs.FS {
|
|
||||||
return osVFS
|
|
||||||
}
|
|
||||||
|
|
||||||
var osVFS vfs.FS = &osFS{
|
|
||||||
common: internal.Common{
|
|
||||||
RootFor: os.DirFS,
|
|
||||||
Realpath: osFSRealpath,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type osFS struct {
|
|
||||||
common internal.Common
|
|
||||||
}
|
|
||||||
|
|
||||||
// We do this right at startup to minimize the chance that executable gets moved or deleted.
|
|
||||||
var isFileSystemCaseSensitive = func() bool {
|
|
||||||
// win32/win64 are case insensitive platforms
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if runtime.GOARCH == "wasm" {
|
|
||||||
// !!! Who knows; this depends on the host implementation.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// As a proxy for case-insensitivity, we check if the current executable exists under a different case.
|
|
||||||
// This is not entirely correct, since different OSs can have differing case sensitivity in different paths,
|
|
||||||
// but this is largely good enough for our purposes (and what sys.ts used to do with __filename).
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("vfs: failed to get executable path: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the current executable exists under a different case, we must be case-insensitive.
|
|
||||||
swapped := swapCase(exe)
|
|
||||||
if _, err := os.Stat(swapped); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("vfs: failed to stat %q: %v", swapped, err))
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Convert all lowercase chars to uppercase, and vice-versa
|
|
||||||
func swapCase(str string) string {
|
|
||||||
return strings.Map(func(r rune) rune {
|
|
||||||
upper := unicode.ToUpper(r)
|
|
||||||
if upper == r {
|
|
||||||
return unicode.ToLower(r)
|
|
||||||
} else {
|
|
||||||
return upper
|
|
||||||
}
|
|
||||||
}, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) UseCaseSensitiveFileNames() bool {
|
|
||||||
return isFileSystemCaseSensitive
|
|
||||||
}
|
|
||||||
|
|
||||||
var readSema = make(chan struct{}, 128)
|
|
||||||
|
|
||||||
func (vfs *osFS) ReadFile(path string) (contents string, ok bool) {
|
|
||||||
// Limit ourselves to fewer open files, which greatly reduces IO contention.
|
|
||||||
readSema <- struct{}{}
|
|
||||||
defer func() { <-readSema }()
|
|
||||||
|
|
||||||
return vfs.common.ReadFile(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) DirectoryExists(path string) bool {
|
|
||||||
return vfs.common.DirectoryExists(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) FileExists(path string) bool {
|
|
||||||
return vfs.common.FileExists(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) GetAccessibleEntries(path string) vfs.Entries {
|
|
||||||
return vfs.common.GetAccessibleEntries(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) Stat(path string) vfs.FileInfo {
|
|
||||||
return vfs.common.Stat(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) WalkDir(root string, walkFn vfs.WalkDirFunc) error {
|
|
||||||
return vfs.common.WalkDir(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) Realpath(path string) string {
|
|
||||||
return osFSRealpath(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func osFSRealpath(path string) string {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
|
|
||||||
orig := path
|
|
||||||
path = filepath.FromSlash(path)
|
|
||||||
path, err := realpath(path)
|
|
||||||
if err != nil {
|
|
||||||
return orig
|
|
||||||
}
|
|
||||||
path, err = filepath.Abs(path)
|
|
||||||
if err != nil {
|
|
||||||
return orig
|
|
||||||
}
|
|
||||||
return tspath.NormalizeSlashes(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
var writeSema = make(chan struct{}, 32)
|
|
||||||
|
|
||||||
func (vfs *osFS) writeFile(path string, content string, writeByteOrderMark bool) error {
|
|
||||||
writeSema <- struct{}{}
|
|
||||||
defer func() { <-writeSema }()
|
|
||||||
|
|
||||||
file, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
if writeByteOrderMark {
|
|
||||||
if _, err := file.WriteString("\uFEFF"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := file.WriteString(content); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) ensureDirectoryExists(directoryPath string) error {
|
|
||||||
return os.MkdirAll(directoryPath, 0o777)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) WriteFile(path string, content string, writeByteOrderMark bool) error {
|
|
||||||
_ = internal.RootLength(path) // Assert path is rooted
|
|
||||||
if err := vfs.writeFile(path, content, writeByteOrderMark); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := vfs.ensureDirectoryExists(tspath.GetDirectoryPath(tspath.NormalizePath(path))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return vfs.writeFile(path, content, writeByteOrderMark)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) Remove(path string) error {
|
|
||||||
// todo: #701 add retry mechanism?
|
|
||||||
return os.RemoveAll(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vfs *osFS) Chtimes(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
return os.Chtimes(path, aTime, mTime)
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package osvfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func realpath(path string) (string, error) {
|
|
||||||
return filepath.EvalSymlinks(path)
|
|
||||||
}
|
|
||||||
@ -1,100 +0,0 @@
|
|||||||
package osvfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This implementation is based on what Node's fs.realpath.native does, via libuv: https://github.com/libuv/libuv/blob/ec5a4b54f7da7eeb01679005c615fee9633cdb3b/src/win/fs.c#L2937
|
|
||||||
|
|
||||||
func realpath(path string) (string, error) {
|
|
||||||
var h windows.Handle
|
|
||||||
if len(path) < 248 {
|
|
||||||
var err error
|
|
||||||
h, err = openMetadata(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer windows.CloseHandle(h) //nolint:errcheck
|
|
||||||
} else {
|
|
||||||
// For long paths, defer to os.Open to run the path through fixLongPath.
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// Works on directories too since https://go.dev/cl/405275.
|
|
||||||
h = windows.Handle(f.Fd())
|
|
||||||
}
|
|
||||||
|
|
||||||
// based on https://github.com/golang/go/blob/f4e3ec3dbe3b8e04a058d266adf8e048bab563f2/src/os/file_windows.go#L389
|
|
||||||
|
|
||||||
const _VOLUME_NAME_DOS = 0
|
|
||||||
|
|
||||||
buf := make([]uint16, 310) // https://github.com/microsoft/go-winio/blob/3c9576c9346a1892dee136329e7e15309e82fb4f/internal/stringbuffer/wstring.go#L13
|
|
||||||
for {
|
|
||||||
n, err := windows.GetFinalPathNameByHandle(h, &buf[0], uint32(len(buf)), _VOLUME_NAME_DOS)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if n < uint32(len(buf)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
buf = make([]uint16, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := syscall.UTF16ToString(buf)
|
|
||||||
if len(s) > 4 && s[:4] == `\\?\` {
|
|
||||||
s = s[4:]
|
|
||||||
if len(s) > 3 && s[:3] == `UNC` {
|
|
||||||
// return path like \\server\share\...
|
|
||||||
return `\` + s[3:], nil
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("GetFinalPathNameByHandle returned unexpected path: " + s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func openMetadata(path string) (windows.Handle, error) {
|
|
||||||
// based on https://github.com/microsoft/go-winio/blob/3c9576c9346a1892dee136329e7e15309e82fb4f/pkg/fs/resolve.go#L113
|
|
||||||
|
|
||||||
pathUTF16, err := windows.UTF16PtrFromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return windows.InvalidHandle, err
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
_FILE_ANY_ACCESS = 0
|
|
||||||
|
|
||||||
_FILE_SHARE_READ = 0x01
|
|
||||||
_FILE_SHARE_WRITE = 0x02
|
|
||||||
_FILE_SHARE_DELETE = 0x04
|
|
||||||
|
|
||||||
_OPEN_EXISTING = 0x03
|
|
||||||
|
|
||||||
_FILE_FLAG_BACKUP_SEMANTICS = 0x0200_0000
|
|
||||||
)
|
|
||||||
|
|
||||||
h, err := windows.CreateFile(
|
|
||||||
pathUTF16,
|
|
||||||
_FILE_ANY_ACCESS,
|
|
||||||
_FILE_SHARE_READ|_FILE_SHARE_WRITE|_FILE_SHARE_DELETE,
|
|
||||||
nil,
|
|
||||||
_OPEN_EXISTING,
|
|
||||||
_FILE_FLAG_BACKUP_SEMANTICS,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return 0, &os.PathError{
|
|
||||||
Op: "CreateFile",
|
|
||||||
Path: path,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
@ -1,464 +0,0 @@
|
|||||||
package vfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/collections"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/core"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/stringutil"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/tspath"
|
|
||||||
"github.com/dlclark/regexp2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileMatcherPatterns struct {
|
|
||||||
// One pattern for each "include" spec.
|
|
||||||
includeFilePatterns []string
|
|
||||||
// One pattern matching one of any of the "include" specs.
|
|
||||||
includeFilePattern string
|
|
||||||
includeDirectoryPattern string
|
|
||||||
excludePattern string
|
|
||||||
basePaths []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type usage string
|
|
||||||
|
|
||||||
const (
|
|
||||||
usageFiles usage = "files"
|
|
||||||
usageDirectories usage = "directories"
|
|
||||||
usageExclude usage = "exclude"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetRegularExpressionsForWildcards(specs []string, basePath string, usage usage) []string {
|
|
||||||
if len(specs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return core.Map(specs, func(spec string) string {
|
|
||||||
return getSubPatternFromSpec(spec, basePath, usage, wildcardMatchers[usage])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetRegularExpressionForWildcard(specs []string, basePath string, usage usage) string {
|
|
||||||
patterns := GetRegularExpressionsForWildcards(specs, basePath, usage)
|
|
||||||
if len(patterns) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
mappedPatterns := make([]string, len(patterns))
|
|
||||||
for i, pattern := range patterns {
|
|
||||||
mappedPatterns[i] = fmt.Sprintf("(%s)", pattern)
|
|
||||||
}
|
|
||||||
pattern := strings.Join(mappedPatterns, "|")
|
|
||||||
|
|
||||||
// If excluding, match "foo/bar/baz...", but if including, only allow "foo".
|
|
||||||
var terminator string
|
|
||||||
if usage == "exclude" {
|
|
||||||
terminator = "($|/)"
|
|
||||||
} else {
|
|
||||||
terminator = "$"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("^(%s)%s", pattern, terminator)
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceWildcardCharacter(match string, singleAsteriskRegexFragment string) string {
|
|
||||||
if match == "*" {
|
|
||||||
return singleAsteriskRegexFragment
|
|
||||||
} else {
|
|
||||||
if match == "?" {
|
|
||||||
return "[^/]"
|
|
||||||
} else {
|
|
||||||
return "\\" + match
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An "includes" path "foo" is implicitly a glob "foo/** /*" (without the space) if its last component has no extension,
|
|
||||||
// and does not contain any glob characters itself.
|
|
||||||
func IsImplicitGlob(lastPathComponent string) bool {
|
|
||||||
return !strings.ContainsAny(lastPathComponent, ".*?")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserved characters, forces escaping of any non-word (or digit), non-whitespace character.
|
|
||||||
// It may be inefficient (we could just match (/[-[\]{}()*+?.,\\^$|#\s]/g), but this is future
|
|
||||||
// proof.
|
|
||||||
var (
|
|
||||||
reservedCharacterPattern *regexp.Regexp = regexp.MustCompile(`[^\w\s/]`)
|
|
||||||
wildcardCharCodes = []rune{'*', '?'}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
commonPackageFolders = []string{"node_modules", "bower_components", "jspm_packages"}
|
|
||||||
implicitExcludePathRegexPattern = "(?!(" + strings.Join(commonPackageFolders, "|") + ")(/|$))"
|
|
||||||
)
|
|
||||||
|
|
||||||
type WildcardMatcher struct {
|
|
||||||
singleAsteriskRegexFragment string
|
|
||||||
doubleAsteriskRegexFragment string
|
|
||||||
replaceWildcardCharacter func(match string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Matches any single directory segment unless it is the last segment and a .min.js file
|
|
||||||
// Breakdown:
|
|
||||||
//
|
|
||||||
// [^./] # matches everything up to the first . character (excluding directory separators)
|
|
||||||
// (\\.(?!min\\.js$))? # matches . characters but not if they are part of the .min.js file extension
|
|
||||||
singleAsteriskRegexFragmentFilesMatcher = "([^./]|(\\.(?!min\\.js$))?)*"
|
|
||||||
singleAsteriskRegexFragment = "[^/]*"
|
|
||||||
)
|
|
||||||
|
|
||||||
var filesMatcher = WildcardMatcher{
|
|
||||||
singleAsteriskRegexFragment: singleAsteriskRegexFragmentFilesMatcher,
|
|
||||||
// Regex for the ** wildcard. Matches any number of subdirectories. When used for including
|
|
||||||
// files or directories, does not match subdirectories that start with a . character
|
|
||||||
doubleAsteriskRegexFragment: "(/" + implicitExcludePathRegexPattern + "[^/.][^/]*)*?",
|
|
||||||
replaceWildcardCharacter: func(match string) string {
|
|
||||||
return replaceWildcardCharacter(match, singleAsteriskRegexFragmentFilesMatcher)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var directoriesMatcher = WildcardMatcher{
|
|
||||||
singleAsteriskRegexFragment: singleAsteriskRegexFragment,
|
|
||||||
// Regex for the ** wildcard. Matches any number of subdirectories. When used for including
|
|
||||||
// files or directories, does not match subdirectories that start with a . character
|
|
||||||
doubleAsteriskRegexFragment: "(/" + implicitExcludePathRegexPattern + "[^/.][^/]*)*?",
|
|
||||||
replaceWildcardCharacter: func(match string) string {
|
|
||||||
return replaceWildcardCharacter(match, singleAsteriskRegexFragment)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var excludeMatcher = WildcardMatcher{
|
|
||||||
singleAsteriskRegexFragment: singleAsteriskRegexFragment,
|
|
||||||
doubleAsteriskRegexFragment: "(/.+?)?",
|
|
||||||
replaceWildcardCharacter: func(match string) string {
|
|
||||||
return replaceWildcardCharacter(match, singleAsteriskRegexFragment)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var wildcardMatchers = map[usage]WildcardMatcher{
|
|
||||||
usageFiles: filesMatcher,
|
|
||||||
usageDirectories: directoriesMatcher,
|
|
||||||
usageExclude: excludeMatcher,
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetPatternFromSpec(
|
|
||||||
spec string,
|
|
||||||
basePath string,
|
|
||||||
usage usage,
|
|
||||||
) string {
|
|
||||||
pattern := getSubPatternFromSpec(spec, basePath, usage, wildcardMatchers[usage])
|
|
||||||
if pattern == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
ending := core.IfElse(usage == "exclude", "($|/)", "$")
|
|
||||||
return fmt.Sprintf("^(%s)%s", pattern, ending)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSubPatternFromSpec(
|
|
||||||
spec string,
|
|
||||||
basePath string,
|
|
||||||
usage usage,
|
|
||||||
matcher WildcardMatcher,
|
|
||||||
) string {
|
|
||||||
matcher = wildcardMatchers[usage]
|
|
||||||
|
|
||||||
replaceWildcardCharacter := matcher.replaceWildcardCharacter
|
|
||||||
|
|
||||||
var subpattern strings.Builder
|
|
||||||
hasWrittenComponent := false
|
|
||||||
components := tspath.GetNormalizedPathComponents(spec, basePath)
|
|
||||||
lastComponent := core.LastOrNil(components)
|
|
||||||
if usage != "exclude" && lastComponent == "**" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNormalizedPathComponents includes the separator for the root component.
|
|
||||||
// We need to remove to create our regex correctly.
|
|
||||||
components[0] = tspath.RemoveTrailingDirectorySeparator(components[0])
|
|
||||||
|
|
||||||
if IsImplicitGlob(lastComponent) {
|
|
||||||
components = append(components, "**", "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
optionalCount := 0
|
|
||||||
for _, component := range components {
|
|
||||||
if component == "**" {
|
|
||||||
subpattern.WriteString(matcher.doubleAsteriskRegexFragment)
|
|
||||||
} else {
|
|
||||||
if usage == "directories" {
|
|
||||||
subpattern.WriteString("(")
|
|
||||||
optionalCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasWrittenComponent {
|
|
||||||
subpattern.WriteRune(tspath.DirectorySeparator)
|
|
||||||
}
|
|
||||||
|
|
||||||
if usage != "exclude" {
|
|
||||||
var componentPattern strings.Builder
|
|
||||||
if strings.HasPrefix(component, "*") {
|
|
||||||
componentPattern.WriteString("([^./]" + matcher.singleAsteriskRegexFragment + ")?")
|
|
||||||
component = component[1:]
|
|
||||||
} else if strings.HasPrefix(component, "?") {
|
|
||||||
componentPattern.WriteString("[^./]")
|
|
||||||
component = component[1:]
|
|
||||||
}
|
|
||||||
componentPattern.WriteString(reservedCharacterPattern.ReplaceAllStringFunc(component, replaceWildcardCharacter))
|
|
||||||
|
|
||||||
// Patterns should not include subfolders like node_modules unless they are
|
|
||||||
// explicitly included as part of the path.
|
|
||||||
//
|
|
||||||
// As an optimization, if the component pattern is the same as the component,
|
|
||||||
// then there definitely were no wildcard characters and we do not need to
|
|
||||||
// add the exclusion pattern.
|
|
||||||
if componentPattern.String() != component {
|
|
||||||
subpattern.WriteString(implicitExcludePathRegexPattern)
|
|
||||||
}
|
|
||||||
subpattern.WriteString(componentPattern.String())
|
|
||||||
} else {
|
|
||||||
subpattern.WriteString(reservedCharacterPattern.ReplaceAllStringFunc(component, replaceWildcardCharacter))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hasWrittenComponent = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for optionalCount > 0 {
|
|
||||||
subpattern.WriteString(")?")
|
|
||||||
optionalCount--
|
|
||||||
}
|
|
||||||
|
|
||||||
return subpattern.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getIncludeBasePath(absolute string) string {
|
|
||||||
wildcardOffset := strings.IndexAny(absolute, string(wildcardCharCodes))
|
|
||||||
if wildcardOffset < 0 {
|
|
||||||
// No "*" or "?" in the path
|
|
||||||
if !tspath.HasExtension(absolute) {
|
|
||||||
return absolute
|
|
||||||
} else {
|
|
||||||
return tspath.RemoveTrailingDirectorySeparator(tspath.GetDirectoryPath(absolute))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return absolute[:max(strings.LastIndex(absolute[:wildcardOffset], string(tspath.DirectorySeparator)), 0)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBasePaths computes the unique non-wildcard base paths amongst the provided include patterns.
|
|
||||||
func getBasePaths(path string, includes []string, useCaseSensitiveFileNames bool) []string {
|
|
||||||
// Storage for our results in the form of literal paths (e.g. the paths as written by the user).
|
|
||||||
basePaths := []string{path}
|
|
||||||
|
|
||||||
if len(includes) > 0 {
|
|
||||||
// Storage for literal base paths amongst the include patterns.
|
|
||||||
includeBasePaths := []string{}
|
|
||||||
for _, include := range includes {
|
|
||||||
// We also need to check the relative paths by converting them to absolute and normalizing
|
|
||||||
// in case they escape the base path (e.g "..\somedirectory")
|
|
||||||
var absolute string
|
|
||||||
if tspath.IsRootedDiskPath(include) {
|
|
||||||
absolute = include
|
|
||||||
} else {
|
|
||||||
absolute = tspath.NormalizePath(tspath.CombinePaths(path, include))
|
|
||||||
}
|
|
||||||
// Append the literal and canonical candidate base paths.
|
|
||||||
includeBasePaths = append(includeBasePaths, getIncludeBasePath(absolute))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the offsets array using either the literal or canonical path representations.
|
|
||||||
stringComparer := stringutil.GetStringComparer(!useCaseSensitiveFileNames)
|
|
||||||
sort.SliceStable(includeBasePaths, func(i, j int) bool {
|
|
||||||
return stringComparer(includeBasePaths[i], includeBasePaths[j]) < 0
|
|
||||||
})
|
|
||||||
|
|
||||||
// Iterate over each include base path and include unique base paths that are not a
|
|
||||||
// subpath of an existing base path
|
|
||||||
for _, includeBasePath := range includeBasePaths {
|
|
||||||
if core.Every(basePaths, func(basepath string) bool {
|
|
||||||
return !tspath.ContainsPath(basepath, includeBasePath, tspath.ComparePathsOptions{CurrentDirectory: path, UseCaseSensitiveFileNames: !useCaseSensitiveFileNames})
|
|
||||||
}) {
|
|
||||||
basePaths = append(basePaths, includeBasePath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return basePaths
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFileMatcherPatterns generates file matching patterns based on the provided path,
|
|
||||||
// includes, excludes, and other parameters. path is the directory of the tsconfig.json file.
|
|
||||||
func getFileMatcherPatterns(path string, excludes []string, includes []string, useCaseSensitiveFileNames bool, currentDirectory string) FileMatcherPatterns {
|
|
||||||
path = tspath.NormalizePath(path)
|
|
||||||
currentDirectory = tspath.NormalizePath(currentDirectory)
|
|
||||||
absolutePath := tspath.CombinePaths(currentDirectory, path)
|
|
||||||
|
|
||||||
return FileMatcherPatterns{
|
|
||||||
includeFilePatterns: core.Map(GetRegularExpressionsForWildcards(includes, absolutePath, "files"), func(pattern string) string { return "^" + pattern + "$" }),
|
|
||||||
includeFilePattern: GetRegularExpressionForWildcard(includes, absolutePath, "files"),
|
|
||||||
includeDirectoryPattern: GetRegularExpressionForWildcard(includes, absolutePath, "directories"),
|
|
||||||
excludePattern: GetRegularExpressionForWildcard(excludes, absolutePath, "exclude"),
|
|
||||||
basePaths: getBasePaths(path, includes, useCaseSensitiveFileNames),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type regexp2CacheKey struct {
|
|
||||||
pattern string
|
|
||||||
opts regexp2.RegexOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
regexp2CacheMu sync.RWMutex
|
|
||||||
regexp2Cache = make(map[regexp2CacheKey]*regexp2.Regexp)
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetRegexFromPattern(pattern string, useCaseSensitiveFileNames bool) *regexp2.Regexp {
|
|
||||||
flags := regexp2.ECMAScript
|
|
||||||
if !useCaseSensitiveFileNames {
|
|
||||||
flags |= regexp2.IgnoreCase
|
|
||||||
}
|
|
||||||
opts := regexp2.RegexOptions(flags)
|
|
||||||
|
|
||||||
key := regexp2CacheKey{pattern, opts}
|
|
||||||
|
|
||||||
regexp2CacheMu.RLock()
|
|
||||||
re, ok := regexp2Cache[key]
|
|
||||||
regexp2CacheMu.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return re
|
|
||||||
}
|
|
||||||
|
|
||||||
regexp2CacheMu.Lock()
|
|
||||||
defer regexp2CacheMu.Unlock()
|
|
||||||
|
|
||||||
re, ok = regexp2Cache[key]
|
|
||||||
if ok {
|
|
||||||
return re
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avoid infinite growth; may cause thrashing but no worse than not caching at all.
|
|
||||||
if len(regexp2Cache) > 1000 {
|
|
||||||
clear(regexp2Cache)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avoid holding onto the pattern string, since this may pin a full config file in memory.
|
|
||||||
pattern = strings.Clone(pattern)
|
|
||||||
key.pattern = pattern
|
|
||||||
|
|
||||||
re = regexp2.MustCompile(pattern, opts)
|
|
||||||
regexp2Cache[key] = re
|
|
||||||
return re
|
|
||||||
}
|
|
||||||
|
|
||||||
type visitor struct {
|
|
||||||
includeFileRegexes []*regexp2.Regexp
|
|
||||||
excludeRegex *regexp2.Regexp
|
|
||||||
includeDirectoryRegex *regexp2.Regexp
|
|
||||||
extensions []string
|
|
||||||
useCaseSensitiveFileNames bool
|
|
||||||
host FS
|
|
||||||
visited collections.Set[string]
|
|
||||||
results [][]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) visitDirectory(
|
|
||||||
path string,
|
|
||||||
absolutePath string,
|
|
||||||
depth *int,
|
|
||||||
) {
|
|
||||||
canonicalPath := tspath.GetCanonicalFileName(absolutePath, v.useCaseSensitiveFileNames)
|
|
||||||
if v.visited.Has(canonicalPath) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v.visited.Add(canonicalPath)
|
|
||||||
systemEntries := v.host.GetAccessibleEntries(absolutePath)
|
|
||||||
files := systemEntries.Files
|
|
||||||
directories := systemEntries.Directories
|
|
||||||
|
|
||||||
for _, current := range files {
|
|
||||||
name := tspath.CombinePaths(path, current)
|
|
||||||
absoluteName := tspath.CombinePaths(absolutePath, current)
|
|
||||||
if len(v.extensions) > 0 && !tspath.FileExtensionIsOneOf(name, v.extensions) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if v.excludeRegex != nil && core.Must(v.excludeRegex.MatchString(absoluteName)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if v.includeFileRegexes == nil {
|
|
||||||
(v.results)[0] = append((v.results)[0], name)
|
|
||||||
} else {
|
|
||||||
includeIndex := core.FindIndex(v.includeFileRegexes, func(re *regexp2.Regexp) bool { return core.Must(re.MatchString(absoluteName)) })
|
|
||||||
if includeIndex != -1 {
|
|
||||||
(v.results)[includeIndex] = append((v.results)[includeIndex], name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if depth != nil {
|
|
||||||
newDepth := *depth - 1
|
|
||||||
if newDepth == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
depth = &newDepth
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, current := range directories {
|
|
||||||
name := tspath.CombinePaths(path, current)
|
|
||||||
absoluteName := tspath.CombinePaths(absolutePath, current)
|
|
||||||
if (v.includeDirectoryRegex == nil || core.Must(v.includeDirectoryRegex.MatchString(absoluteName))) && (v.excludeRegex == nil || !core.Must(v.excludeRegex.MatchString(absoluteName))) {
|
|
||||||
v.visitDirectory(name, absoluteName, depth)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// path is the directory of the tsconfig.json
|
|
||||||
func matchFiles(path string, extensions []string, excludes []string, includes []string, useCaseSensitiveFileNames bool, currentDirectory string, depth *int, host FS) []string {
|
|
||||||
path = tspath.NormalizePath(path)
|
|
||||||
currentDirectory = tspath.NormalizePath(currentDirectory)
|
|
||||||
|
|
||||||
patterns := getFileMatcherPatterns(path, excludes, includes, useCaseSensitiveFileNames, currentDirectory)
|
|
||||||
var includeFileRegexes []*regexp2.Regexp
|
|
||||||
if patterns.includeFilePatterns != nil {
|
|
||||||
includeFileRegexes = core.Map(patterns.includeFilePatterns, func(pattern string) *regexp2.Regexp { return GetRegexFromPattern(pattern, useCaseSensitiveFileNames) })
|
|
||||||
}
|
|
||||||
var includeDirectoryRegex *regexp2.Regexp
|
|
||||||
if patterns.includeDirectoryPattern != "" {
|
|
||||||
includeDirectoryRegex = GetRegexFromPattern(patterns.includeDirectoryPattern, useCaseSensitiveFileNames)
|
|
||||||
}
|
|
||||||
var excludeRegex *regexp2.Regexp
|
|
||||||
if patterns.excludePattern != "" {
|
|
||||||
excludeRegex = GetRegexFromPattern(patterns.excludePattern, useCaseSensitiveFileNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Associate an array of results with each include regex. This keeps results in order of the "include" order.
|
|
||||||
// If there are no "includes", then just put everything in results[0].
|
|
||||||
var results [][]string
|
|
||||||
if len(includeFileRegexes) > 0 {
|
|
||||||
tempResults := make([][]string, len(includeFileRegexes))
|
|
||||||
for i := range includeFileRegexes {
|
|
||||||
tempResults[i] = []string{}
|
|
||||||
}
|
|
||||||
results = tempResults
|
|
||||||
} else {
|
|
||||||
results = [][]string{{}}
|
|
||||||
}
|
|
||||||
v := visitor{
|
|
||||||
useCaseSensitiveFileNames: useCaseSensitiveFileNames,
|
|
||||||
host: host,
|
|
||||||
includeFileRegexes: includeFileRegexes,
|
|
||||||
excludeRegex: excludeRegex,
|
|
||||||
includeDirectoryRegex: includeDirectoryRegex,
|
|
||||||
extensions: extensions,
|
|
||||||
results: results,
|
|
||||||
}
|
|
||||||
for _, basePath := range patterns.basePaths {
|
|
||||||
v.visitDirectory(basePath, tspath.CombinePaths(currentDirectory, basePath), depth)
|
|
||||||
}
|
|
||||||
|
|
||||||
return core.Flatten(results)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReadDirectory(host FS, currentDir string, path string, extensions []string, excludes []string, includes []string, depth *int) []string {
|
|
||||||
return matchFiles(path, extensions, excludes, includes, host.UseCaseSensitiveFileNames(), currentDir, depth, host)
|
|
||||||
}
|
|
||||||
@ -1,79 +0,0 @@
|
|||||||
package vfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/fs"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate go tool github.com/matryer/moq -fmt goimports -out vfsmock/mock_generated.go -pkg vfsmock . FS
|
|
||||||
//go:generate go tool mvdan.cc/gofumpt -w vfsmock/mock_generated.go
|
|
||||||
|
|
||||||
// FS is a file system abstraction.
|
|
||||||
type FS interface {
|
|
||||||
// UseCaseSensitiveFileNames returns true if the file system is case-sensitive.
|
|
||||||
UseCaseSensitiveFileNames() bool
|
|
||||||
|
|
||||||
// FileExists returns true if the file exists.
|
|
||||||
FileExists(path string) bool
|
|
||||||
|
|
||||||
// ReadFile reads the file specified by path and returns the content.
|
|
||||||
// If the file fails to be read, ok will be false.
|
|
||||||
ReadFile(path string) (contents string, ok bool)
|
|
||||||
|
|
||||||
WriteFile(path string, data string, writeByteOrderMark bool) error
|
|
||||||
|
|
||||||
// Removes `path` and all its contents. Will return the first error it encounters.
|
|
||||||
Remove(path string) error
|
|
||||||
|
|
||||||
// Chtimes changes the access and modification times of the named
|
|
||||||
Chtimes(path string, aTime time.Time, mTime time.Time) error
|
|
||||||
|
|
||||||
// DirectoryExists returns true if the path is a directory.
|
|
||||||
DirectoryExists(path string) bool
|
|
||||||
|
|
||||||
// GetAccessibleEntries returns the files/directories in the specified directory.
|
|
||||||
// If any entry is a symlink, it will be followed.
|
|
||||||
GetAccessibleEntries(path string) Entries
|
|
||||||
|
|
||||||
Stat(path string) FileInfo
|
|
||||||
|
|
||||||
// WalkDir walks the file tree rooted at root, calling walkFn for each file or directory in the tree.
|
|
||||||
// It is has the same behavior as [fs.WalkDir], but with paths as [string].
|
|
||||||
WalkDir(root string, walkFn WalkDirFunc) error
|
|
||||||
|
|
||||||
// Realpath returns the "real path" of the specified path,
|
|
||||||
// following symlinks and correcting filename casing.
|
|
||||||
Realpath(path string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Entries struct {
|
|
||||||
Files []string
|
|
||||||
Directories []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// DirEntry is [fs.DirEntry].
|
|
||||||
DirEntry = fs.DirEntry
|
|
||||||
|
|
||||||
// FileInfo is [fs.FileInfo].
|
|
||||||
FileInfo = fs.FileInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalid = fs.ErrInvalid // "invalid argument"
|
|
||||||
ErrPermission = fs.ErrPermission // "permission denied"
|
|
||||||
ErrExist = fs.ErrExist // "file already exists"
|
|
||||||
ErrNotExist = fs.ErrNotExist // "file does not exist"
|
|
||||||
ErrClosed = fs.ErrClosed // "file already closed"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WalkDirFunc is [fs.WalkDirFunc].
|
|
||||||
type WalkDirFunc = fs.WalkDirFunc
|
|
||||||
|
|
||||||
var (
|
|
||||||
// SkipAll is [fs.SkipAll].
|
|
||||||
SkipAll = fs.SkipAll //nolint:errname
|
|
||||||
|
|
||||||
// SkipDir is [fs.SkipDir].
|
|
||||||
SkipDir = fs.SkipDir //nolint:errname
|
|
||||||
)
|
|
||||||
@ -1,536 +0,0 @@
|
|||||||
// Code generated by moq; DO NOT EDIT.
|
|
||||||
// github.com/matryer/moq
|
|
||||||
|
|
||||||
package vfsmock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure, that FSMock does implement vfs.FS.
|
|
||||||
// If this is not the case, regenerate this file with moq.
|
|
||||||
var _ vfs.FS = &FSMock{}
|
|
||||||
|
|
||||||
// FSMock is a mock implementation of vfs.FS.
|
|
||||||
//
|
|
||||||
// func TestSomethingThatUsesFS(t *testing.T) {
|
|
||||||
//
|
|
||||||
// // make and configure a mocked vfs.FS
|
|
||||||
// mockedFS := &FSMock{
|
|
||||||
// ChtimesFunc: func(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
// panic("mock out the Chtimes method")
|
|
||||||
// },
|
|
||||||
// DirectoryExistsFunc: func(path string) bool {
|
|
||||||
// panic("mock out the DirectoryExists method")
|
|
||||||
// },
|
|
||||||
// FileExistsFunc: func(path string) bool {
|
|
||||||
// panic("mock out the FileExists method")
|
|
||||||
// },
|
|
||||||
// GetAccessibleEntriesFunc: func(path string) vfs.Entries {
|
|
||||||
// panic("mock out the GetAccessibleEntries method")
|
|
||||||
// },
|
|
||||||
// ReadFileFunc: func(path string) (string, bool) {
|
|
||||||
// panic("mock out the ReadFile method")
|
|
||||||
// },
|
|
||||||
// RealpathFunc: func(path string) string {
|
|
||||||
// panic("mock out the Realpath method")
|
|
||||||
// },
|
|
||||||
// RemoveFunc: func(path string) error {
|
|
||||||
// panic("mock out the Remove method")
|
|
||||||
// },
|
|
||||||
// StatFunc: func(path string) vfs.FileInfo {
|
|
||||||
// panic("mock out the Stat method")
|
|
||||||
// },
|
|
||||||
// UseCaseSensitiveFileNamesFunc: func() bool {
|
|
||||||
// panic("mock out the UseCaseSensitiveFileNames method")
|
|
||||||
// },
|
|
||||||
// WalkDirFunc: func(root string, walkFn vfs.WalkDirFunc) error {
|
|
||||||
// panic("mock out the WalkDir method")
|
|
||||||
// },
|
|
||||||
// WriteFileFunc: func(path string, data string, writeByteOrderMark bool) error {
|
|
||||||
// panic("mock out the WriteFile method")
|
|
||||||
// },
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // use mockedFS in code that requires vfs.FS
|
|
||||||
// // and then make assertions.
|
|
||||||
//
|
|
||||||
// }
|
|
||||||
type FSMock struct {
|
|
||||||
// ChtimesFunc mocks the Chtimes method.
|
|
||||||
ChtimesFunc func(path string, aTime time.Time, mTime time.Time) error
|
|
||||||
|
|
||||||
// DirectoryExistsFunc mocks the DirectoryExists method.
|
|
||||||
DirectoryExistsFunc func(path string) bool
|
|
||||||
|
|
||||||
// FileExistsFunc mocks the FileExists method.
|
|
||||||
FileExistsFunc func(path string) bool
|
|
||||||
|
|
||||||
// GetAccessibleEntriesFunc mocks the GetAccessibleEntries method.
|
|
||||||
GetAccessibleEntriesFunc func(path string) vfs.Entries
|
|
||||||
|
|
||||||
// ReadFileFunc mocks the ReadFile method.
|
|
||||||
ReadFileFunc func(path string) (string, bool)
|
|
||||||
|
|
||||||
// RealpathFunc mocks the Realpath method.
|
|
||||||
RealpathFunc func(path string) string
|
|
||||||
|
|
||||||
// RemoveFunc mocks the Remove method.
|
|
||||||
RemoveFunc func(path string) error
|
|
||||||
|
|
||||||
// StatFunc mocks the Stat method.
|
|
||||||
StatFunc func(path string) vfs.FileInfo
|
|
||||||
|
|
||||||
// UseCaseSensitiveFileNamesFunc mocks the UseCaseSensitiveFileNames method.
|
|
||||||
UseCaseSensitiveFileNamesFunc func() bool
|
|
||||||
|
|
||||||
// WalkDirFunc mocks the WalkDir method.
|
|
||||||
WalkDirFunc func(root string, walkFn vfs.WalkDirFunc) error
|
|
||||||
|
|
||||||
// WriteFileFunc mocks the WriteFile method.
|
|
||||||
WriteFileFunc func(path string, data string, writeByteOrderMark bool) error
|
|
||||||
|
|
||||||
// calls tracks calls to the methods.
|
|
||||||
calls struct {
|
|
||||||
// Chtimes holds details about calls to the Chtimes method.
|
|
||||||
Chtimes []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
// ATime is the aTime argument value.
|
|
||||||
ATime time.Time
|
|
||||||
// MTime is the mTime argument value.
|
|
||||||
MTime time.Time
|
|
||||||
}
|
|
||||||
// DirectoryExists holds details about calls to the DirectoryExists method.
|
|
||||||
DirectoryExists []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// FileExists holds details about calls to the FileExists method.
|
|
||||||
FileExists []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// GetAccessibleEntries holds details about calls to the GetAccessibleEntries method.
|
|
||||||
GetAccessibleEntries []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// ReadFile holds details about calls to the ReadFile method.
|
|
||||||
ReadFile []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// Realpath holds details about calls to the Realpath method.
|
|
||||||
Realpath []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// Remove holds details about calls to the Remove method.
|
|
||||||
Remove []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// Stat holds details about calls to the Stat method.
|
|
||||||
Stat []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
// UseCaseSensitiveFileNames holds details about calls to the UseCaseSensitiveFileNames method.
|
|
||||||
UseCaseSensitiveFileNames []struct{}
|
|
||||||
// WalkDir holds details about calls to the WalkDir method.
|
|
||||||
WalkDir []struct {
|
|
||||||
// Root is the root argument value.
|
|
||||||
Root string
|
|
||||||
// WalkFn is the walkFn argument value.
|
|
||||||
WalkFn vfs.WalkDirFunc
|
|
||||||
}
|
|
||||||
// WriteFile holds details about calls to the WriteFile method.
|
|
||||||
WriteFile []struct {
|
|
||||||
// Path is the path argument value.
|
|
||||||
Path string
|
|
||||||
// Data is the data argument value.
|
|
||||||
Data string
|
|
||||||
// WriteByteOrderMark is the writeByteOrderMark argument value.
|
|
||||||
WriteByteOrderMark bool
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lockChtimes sync.RWMutex
|
|
||||||
lockDirectoryExists sync.RWMutex
|
|
||||||
lockFileExists sync.RWMutex
|
|
||||||
lockGetAccessibleEntries sync.RWMutex
|
|
||||||
lockReadFile sync.RWMutex
|
|
||||||
lockRealpath sync.RWMutex
|
|
||||||
lockRemove sync.RWMutex
|
|
||||||
lockStat sync.RWMutex
|
|
||||||
lockUseCaseSensitiveFileNames sync.RWMutex
|
|
||||||
lockWalkDir sync.RWMutex
|
|
||||||
lockWriteFile sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chtimes calls ChtimesFunc.
|
|
||||||
func (mock *FSMock) Chtimes(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
if mock.ChtimesFunc == nil {
|
|
||||||
panic("FSMock.ChtimesFunc: method is nil but FS.Chtimes was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
ATime time.Time
|
|
||||||
MTime time.Time
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
ATime: aTime,
|
|
||||||
MTime: mTime,
|
|
||||||
}
|
|
||||||
mock.lockChtimes.Lock()
|
|
||||||
mock.calls.Chtimes = append(mock.calls.Chtimes, callInfo)
|
|
||||||
mock.lockChtimes.Unlock()
|
|
||||||
return mock.ChtimesFunc(path, aTime, mTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChtimesCalls gets all the calls that were made to Chtimes.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.ChtimesCalls())
|
|
||||||
func (mock *FSMock) ChtimesCalls() []struct {
|
|
||||||
Path string
|
|
||||||
ATime time.Time
|
|
||||||
MTime time.Time
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
ATime time.Time
|
|
||||||
MTime time.Time
|
|
||||||
}
|
|
||||||
mock.lockChtimes.RLock()
|
|
||||||
calls = mock.calls.Chtimes
|
|
||||||
mock.lockChtimes.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryExists calls DirectoryExistsFunc.
|
|
||||||
func (mock *FSMock) DirectoryExists(path string) bool {
|
|
||||||
if mock.DirectoryExistsFunc == nil {
|
|
||||||
panic("FSMock.DirectoryExistsFunc: method is nil but FS.DirectoryExists was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockDirectoryExists.Lock()
|
|
||||||
mock.calls.DirectoryExists = append(mock.calls.DirectoryExists, callInfo)
|
|
||||||
mock.lockDirectoryExists.Unlock()
|
|
||||||
return mock.DirectoryExistsFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryExistsCalls gets all the calls that were made to DirectoryExists.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.DirectoryExistsCalls())
|
|
||||||
func (mock *FSMock) DirectoryExistsCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockDirectoryExists.RLock()
|
|
||||||
calls = mock.calls.DirectoryExists
|
|
||||||
mock.lockDirectoryExists.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExists calls FileExistsFunc.
|
|
||||||
func (mock *FSMock) FileExists(path string) bool {
|
|
||||||
if mock.FileExistsFunc == nil {
|
|
||||||
panic("FSMock.FileExistsFunc: method is nil but FS.FileExists was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockFileExists.Lock()
|
|
||||||
mock.calls.FileExists = append(mock.calls.FileExists, callInfo)
|
|
||||||
mock.lockFileExists.Unlock()
|
|
||||||
return mock.FileExistsFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExistsCalls gets all the calls that were made to FileExists.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.FileExistsCalls())
|
|
||||||
func (mock *FSMock) FileExistsCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockFileExists.RLock()
|
|
||||||
calls = mock.calls.FileExists
|
|
||||||
mock.lockFileExists.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccessibleEntries calls GetAccessibleEntriesFunc.
|
|
||||||
func (mock *FSMock) GetAccessibleEntries(path string) vfs.Entries {
|
|
||||||
if mock.GetAccessibleEntriesFunc == nil {
|
|
||||||
panic("FSMock.GetAccessibleEntriesFunc: method is nil but FS.GetAccessibleEntries was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockGetAccessibleEntries.Lock()
|
|
||||||
mock.calls.GetAccessibleEntries = append(mock.calls.GetAccessibleEntries, callInfo)
|
|
||||||
mock.lockGetAccessibleEntries.Unlock()
|
|
||||||
return mock.GetAccessibleEntriesFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccessibleEntriesCalls gets all the calls that were made to GetAccessibleEntries.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.GetAccessibleEntriesCalls())
|
|
||||||
func (mock *FSMock) GetAccessibleEntriesCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockGetAccessibleEntries.RLock()
|
|
||||||
calls = mock.calls.GetAccessibleEntries
|
|
||||||
mock.lockGetAccessibleEntries.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFile calls ReadFileFunc.
|
|
||||||
func (mock *FSMock) ReadFile(path string) (string, bool) {
|
|
||||||
if mock.ReadFileFunc == nil {
|
|
||||||
panic("FSMock.ReadFileFunc: method is nil but FS.ReadFile was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockReadFile.Lock()
|
|
||||||
mock.calls.ReadFile = append(mock.calls.ReadFile, callInfo)
|
|
||||||
mock.lockReadFile.Unlock()
|
|
||||||
return mock.ReadFileFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFileCalls gets all the calls that were made to ReadFile.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.ReadFileCalls())
|
|
||||||
func (mock *FSMock) ReadFileCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockReadFile.RLock()
|
|
||||||
calls = mock.calls.ReadFile
|
|
||||||
mock.lockReadFile.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Realpath calls RealpathFunc.
|
|
||||||
func (mock *FSMock) Realpath(path string) string {
|
|
||||||
if mock.RealpathFunc == nil {
|
|
||||||
panic("FSMock.RealpathFunc: method is nil but FS.Realpath was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockRealpath.Lock()
|
|
||||||
mock.calls.Realpath = append(mock.calls.Realpath, callInfo)
|
|
||||||
mock.lockRealpath.Unlock()
|
|
||||||
return mock.RealpathFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RealpathCalls gets all the calls that were made to Realpath.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.RealpathCalls())
|
|
||||||
func (mock *FSMock) RealpathCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockRealpath.RLock()
|
|
||||||
calls = mock.calls.Realpath
|
|
||||||
mock.lockRealpath.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove calls RemoveFunc.
|
|
||||||
func (mock *FSMock) Remove(path string) error {
|
|
||||||
if mock.RemoveFunc == nil {
|
|
||||||
panic("FSMock.RemoveFunc: method is nil but FS.Remove was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockRemove.Lock()
|
|
||||||
mock.calls.Remove = append(mock.calls.Remove, callInfo)
|
|
||||||
mock.lockRemove.Unlock()
|
|
||||||
return mock.RemoveFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveCalls gets all the calls that were made to Remove.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.RemoveCalls())
|
|
||||||
func (mock *FSMock) RemoveCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockRemove.RLock()
|
|
||||||
calls = mock.calls.Remove
|
|
||||||
mock.lockRemove.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat calls StatFunc.
|
|
||||||
func (mock *FSMock) Stat(path string) vfs.FileInfo {
|
|
||||||
if mock.StatFunc == nil {
|
|
||||||
panic("FSMock.StatFunc: method is nil but FS.Stat was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
mock.lockStat.Lock()
|
|
||||||
mock.calls.Stat = append(mock.calls.Stat, callInfo)
|
|
||||||
mock.lockStat.Unlock()
|
|
||||||
return mock.StatFunc(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatCalls gets all the calls that were made to Stat.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.StatCalls())
|
|
||||||
func (mock *FSMock) StatCalls() []struct {
|
|
||||||
Path string
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
mock.lockStat.RLock()
|
|
||||||
calls = mock.calls.Stat
|
|
||||||
mock.lockStat.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseCaseSensitiveFileNames calls UseCaseSensitiveFileNamesFunc.
|
|
||||||
func (mock *FSMock) UseCaseSensitiveFileNames() bool {
|
|
||||||
if mock.UseCaseSensitiveFileNamesFunc == nil {
|
|
||||||
panic("FSMock.UseCaseSensitiveFileNamesFunc: method is nil but FS.UseCaseSensitiveFileNames was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct{}{}
|
|
||||||
mock.lockUseCaseSensitiveFileNames.Lock()
|
|
||||||
mock.calls.UseCaseSensitiveFileNames = append(mock.calls.UseCaseSensitiveFileNames, callInfo)
|
|
||||||
mock.lockUseCaseSensitiveFileNames.Unlock()
|
|
||||||
return mock.UseCaseSensitiveFileNamesFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseCaseSensitiveFileNamesCalls gets all the calls that were made to UseCaseSensitiveFileNames.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.UseCaseSensitiveFileNamesCalls())
|
|
||||||
func (mock *FSMock) UseCaseSensitiveFileNamesCalls() []struct{} {
|
|
||||||
var calls []struct{}
|
|
||||||
mock.lockUseCaseSensitiveFileNames.RLock()
|
|
||||||
calls = mock.calls.UseCaseSensitiveFileNames
|
|
||||||
mock.lockUseCaseSensitiveFileNames.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkDir calls WalkDirFunc.
|
|
||||||
func (mock *FSMock) WalkDir(root string, walkFn vfs.WalkDirFunc) error {
|
|
||||||
if mock.WalkDirFunc == nil {
|
|
||||||
panic("FSMock.WalkDirFunc: method is nil but FS.WalkDir was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Root string
|
|
||||||
WalkFn vfs.WalkDirFunc
|
|
||||||
}{
|
|
||||||
Root: root,
|
|
||||||
WalkFn: walkFn,
|
|
||||||
}
|
|
||||||
mock.lockWalkDir.Lock()
|
|
||||||
mock.calls.WalkDir = append(mock.calls.WalkDir, callInfo)
|
|
||||||
mock.lockWalkDir.Unlock()
|
|
||||||
return mock.WalkDirFunc(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkDirCalls gets all the calls that were made to WalkDir.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.WalkDirCalls())
|
|
||||||
func (mock *FSMock) WalkDirCalls() []struct {
|
|
||||||
Root string
|
|
||||||
WalkFn vfs.WalkDirFunc
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Root string
|
|
||||||
WalkFn vfs.WalkDirFunc
|
|
||||||
}
|
|
||||||
mock.lockWalkDir.RLock()
|
|
||||||
calls = mock.calls.WalkDir
|
|
||||||
mock.lockWalkDir.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFile calls WriteFileFunc.
|
|
||||||
func (mock *FSMock) WriteFile(path string, data string, writeByteOrderMark bool) error {
|
|
||||||
if mock.WriteFileFunc == nil {
|
|
||||||
panic("FSMock.WriteFileFunc: method is nil but FS.WriteFile was just called")
|
|
||||||
}
|
|
||||||
callInfo := struct {
|
|
||||||
Path string
|
|
||||||
Data string
|
|
||||||
WriteByteOrderMark bool
|
|
||||||
}{
|
|
||||||
Path: path,
|
|
||||||
Data: data,
|
|
||||||
WriteByteOrderMark: writeByteOrderMark,
|
|
||||||
}
|
|
||||||
mock.lockWriteFile.Lock()
|
|
||||||
mock.calls.WriteFile = append(mock.calls.WriteFile, callInfo)
|
|
||||||
mock.lockWriteFile.Unlock()
|
|
||||||
return mock.WriteFileFunc(path, data, writeByteOrderMark)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFileCalls gets all the calls that were made to WriteFile.
|
|
||||||
// Check the length with:
|
|
||||||
//
|
|
||||||
// len(mockedFS.WriteFileCalls())
|
|
||||||
func (mock *FSMock) WriteFileCalls() []struct {
|
|
||||||
Path string
|
|
||||||
Data string
|
|
||||||
WriteByteOrderMark bool
|
|
||||||
} {
|
|
||||||
var calls []struct {
|
|
||||||
Path string
|
|
||||||
Data string
|
|
||||||
WriteByteOrderMark bool
|
|
||||||
}
|
|
||||||
mock.lockWriteFile.RLock()
|
|
||||||
calls = mock.calls.WriteFile
|
|
||||||
mock.lockWriteFile.RUnlock()
|
|
||||||
return calls
|
|
||||||
}
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
package vfsmock
|
|
||||||
|
|
||||||
import "efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
|
|
||||||
// Wrap wraps a vfs.FS and returns a FSMock which calls it.
|
|
||||||
func Wrap(fs vfs.FS) *FSMock {
|
|
||||||
return &FSMock{
|
|
||||||
DirectoryExistsFunc: fs.DirectoryExists,
|
|
||||||
FileExistsFunc: fs.FileExists,
|
|
||||||
GetAccessibleEntriesFunc: fs.GetAccessibleEntries,
|
|
||||||
ReadFileFunc: fs.ReadFile,
|
|
||||||
RealpathFunc: fs.Realpath,
|
|
||||||
RemoveFunc: fs.Remove,
|
|
||||||
ChtimesFunc: fs.Chtimes,
|
|
||||||
StatFunc: fs.Stat,
|
|
||||||
UseCaseSensitiveFileNamesFunc: fs.UseCaseSensitiveFileNames,
|
|
||||||
WalkDirFunc: fs.WalkDir,
|
|
||||||
WriteFileFunc: fs.WriteFile,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,614 +0,0 @@
|
|||||||
package vfstest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"iter"
|
|
||||||
"maps"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"testing/fstest"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/tspath"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs"
|
|
||||||
"efprojects.com/kitten-ipc/kitcom/internal/tsgo/vfs/iovfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MapFS struct {
|
|
||||||
// mu protects m.
|
|
||||||
// A single mutex is sufficient as we only use fstest.Map's Open method.
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
// keys in m are canonicalPaths
|
|
||||||
m fstest.MapFS
|
|
||||||
|
|
||||||
useCaseSensitiveFileNames bool
|
|
||||||
|
|
||||||
symlinks map[canonicalPath]canonicalPath
|
|
||||||
|
|
||||||
clock Clock
|
|
||||||
}
|
|
||||||
|
|
||||||
type Clock interface {
|
|
||||||
Now() time.Time
|
|
||||||
SinceStart() time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type clockImpl struct {
|
|
||||||
start time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clockImpl) Now() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clockImpl) SinceStart() time.Duration {
|
|
||||||
return time.Since(c.start)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ iovfs.RealpathFS = (*MapFS)(nil)
|
|
||||||
_ iovfs.WritableFS = (*MapFS)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
type sys struct {
|
|
||||||
original any
|
|
||||||
realpath string
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromMap creates a new [vfs.FS] from a map of paths to file contents.
|
|
||||||
// Those file contents may be strings, byte slices, or [fstest.MapFile]s.
|
|
||||||
//
|
|
||||||
// The paths must be normalized absolute paths according to the tspath package,
|
|
||||||
// without trailing directory separators.
|
|
||||||
// The paths must be all POSIX-style or all Windows-style, but not both.
|
|
||||||
func FromMap[File any](m map[string]File, useCaseSensitiveFileNames bool) vfs.FS {
|
|
||||||
return FromMapWithClock(m, useCaseSensitiveFileNames, &clockImpl{start: time.Now()})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromMapWithClock creates a new [vfs.FS] from a map of paths to file contents.
|
|
||||||
// Those file contents may be strings, byte slices, or [fstest.MapFile]s.
|
|
||||||
//
|
|
||||||
// The paths must be normalized absolute paths according to the tspath package,
|
|
||||||
// without trailing directory separators.
|
|
||||||
// The paths must be all POSIX-style or all Windows-style, but not both.
|
|
||||||
func FromMapWithClock[File any](m map[string]File, useCaseSensitiveFileNames bool, clock Clock) vfs.FS {
|
|
||||||
posix := false
|
|
||||||
windows := false
|
|
||||||
|
|
||||||
checkPath := func(p string) {
|
|
||||||
if !tspath.IsRootedDiskPath(p) {
|
|
||||||
panic(fmt.Sprintf("non-rooted path %q", p))
|
|
||||||
}
|
|
||||||
|
|
||||||
if normal := tspath.RemoveTrailingDirectorySeparator(tspath.NormalizePath(p)); normal != p {
|
|
||||||
panic(fmt.Sprintf("non-normalized path %q", p))
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(p, "/") {
|
|
||||||
posix = true
|
|
||||||
} else {
|
|
||||||
windows = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mfs := make(fstest.MapFS, len(m))
|
|
||||||
// Sorted creation to ensure times are always guaranteed to be in order.
|
|
||||||
keys := slices.Collect(maps.Keys(m))
|
|
||||||
slices.SortFunc(keys, comparePathsByParts)
|
|
||||||
for _, p := range keys {
|
|
||||||
f := m[p]
|
|
||||||
checkPath(p)
|
|
||||||
|
|
||||||
var file *fstest.MapFile
|
|
||||||
switch f := any(f).(type) {
|
|
||||||
case string:
|
|
||||||
file = &fstest.MapFile{Data: []byte(f), ModTime: clock.Now()}
|
|
||||||
case []byte:
|
|
||||||
file = &fstest.MapFile{Data: f, ModTime: clock.Now()}
|
|
||||||
case *fstest.MapFile:
|
|
||||||
fCopy := *f
|
|
||||||
fCopy.ModTime = clock.Now()
|
|
||||||
file = &fCopy
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("invalid file type %T", f))
|
|
||||||
}
|
|
||||||
|
|
||||||
if file.Mode&fs.ModeSymlink != 0 {
|
|
||||||
target := string(file.Data)
|
|
||||||
checkPath(target)
|
|
||||||
|
|
||||||
target, _ = strings.CutPrefix(target, "/")
|
|
||||||
fileCopy := *file
|
|
||||||
fileCopy.Data = []byte(target)
|
|
||||||
file = &fileCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
p, _ = strings.CutPrefix(p, "/")
|
|
||||||
mfs[p] = file
|
|
||||||
}
|
|
||||||
|
|
||||||
if posix && windows {
|
|
||||||
panic("mixed posix and windows paths")
|
|
||||||
}
|
|
||||||
|
|
||||||
return iovfs.From(convertMapFS(mfs, useCaseSensitiveFileNames, clock), useCaseSensitiveFileNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertMapFS(input fstest.MapFS, useCaseSensitiveFileNames bool, clock Clock) *MapFS {
|
|
||||||
if clock == nil {
|
|
||||||
clock = &clockImpl{start: time.Now()}
|
|
||||||
}
|
|
||||||
m := &MapFS{
|
|
||||||
m: make(fstest.MapFS, len(input)),
|
|
||||||
useCaseSensitiveFileNames: useCaseSensitiveFileNames,
|
|
||||||
clock: clock,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the input is well-formed.
|
|
||||||
canonicalPaths := make(map[canonicalPath]string, len(input))
|
|
||||||
for path := range input {
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
if other, ok := canonicalPaths[canonical]; ok {
|
|
||||||
// Ensure consistent panic messages
|
|
||||||
path, other = min(path, other), max(path, other)
|
|
||||||
panic(fmt.Sprintf("duplicate path: %q and %q have the same canonical path", path, other))
|
|
||||||
}
|
|
||||||
canonicalPaths[canonical] = path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the input by depth and path so we ensure parent dirs are created
|
|
||||||
// before their children, if explicitly specified by the input.
|
|
||||||
inputKeys := slices.Collect(maps.Keys(input))
|
|
||||||
slices.SortFunc(inputKeys, comparePathsByParts)
|
|
||||||
|
|
||||||
for _, p := range inputKeys {
|
|
||||||
file := input[p]
|
|
||||||
|
|
||||||
// Create all missing intermediate directories so we can attach the realpath to each of them.
|
|
||||||
// fstest.MapFS doesn't require this as it synthesizes directories on the fly, but it's a lot
|
|
||||||
// harder to reapply a realpath onto those when we're deep in some FileInfo method.
|
|
||||||
if dir := dirName(p); dir != "" {
|
|
||||||
if err := m.mkdirAll(dir, 0o777); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to create intermediate directories for %q: %v", p, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.setEntry(p, m.getCanonicalPath(p), *file)
|
|
||||||
}
|
|
||||||
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePathsByParts(a, b string) int {
|
|
||||||
for {
|
|
||||||
aStart, aEnd, aOk := strings.Cut(a, "/")
|
|
||||||
bStart, bEnd, bOk := strings.Cut(b, "/")
|
|
||||||
|
|
||||||
if !aOk || !bOk {
|
|
||||||
return strings.Compare(a, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r := strings.Compare(aStart, bStart); r != 0 {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
a, b = aEnd, bEnd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type canonicalPath string
|
|
||||||
|
|
||||||
func (m *MapFS) getCanonicalPath(p string) canonicalPath {
|
|
||||||
return canonicalPath(tspath.GetCanonicalFileName(p, m.useCaseSensitiveFileNames))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) open(p canonicalPath) (fs.File, error) {
|
|
||||||
return m.m.Open(string(p))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) remove(path string) error {
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
canonicalString := string(canonical)
|
|
||||||
fileInfo := m.m[canonicalString]
|
|
||||||
if fileInfo == nil {
|
|
||||||
// file does not exist
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
delete(m.m, canonicalString)
|
|
||||||
delete(m.symlinks, canonical)
|
|
||||||
|
|
||||||
if fileInfo.Mode.IsDir() {
|
|
||||||
canonicalString += "/"
|
|
||||||
for path := range m.m {
|
|
||||||
if strings.HasPrefix(path, canonicalString) {
|
|
||||||
delete(m.m, path)
|
|
||||||
delete(m.symlinks, canonicalPath(path))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Symlink(target string) *fstest.MapFile {
|
|
||||||
return &fstest.MapFile{
|
|
||||||
Data: []byte(target),
|
|
||||||
Mode: fs.ModeSymlink,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) getFollowingSymlinks(p canonicalPath) (*fstest.MapFile, canonicalPath, error) {
|
|
||||||
return m.getFollowingSymlinksWorker(p, "", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
type brokenSymlinkError struct {
|
|
||||||
from, to canonicalPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *brokenSymlinkError) Error() string {
|
|
||||||
return fmt.Sprintf("broken symlink %q -> %q", e.from, e.to)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) getFollowingSymlinksWorker(p canonicalPath, symlinkFrom, symlinkTo canonicalPath) (*fstest.MapFile, canonicalPath, error) {
|
|
||||||
if file, ok := m.m[string(p)]; ok && file.Mode&fs.ModeSymlink == 0 {
|
|
||||||
return file, p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if target, ok := m.symlinks[p]; ok {
|
|
||||||
return m.getFollowingSymlinksWorker(target, p, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This could be a path underneath a symlinked directory.
|
|
||||||
for other, target := range m.symlinks {
|
|
||||||
if len(other) < len(p) && other == p[:len(other)] && p[len(other)] == '/' {
|
|
||||||
return m.getFollowingSymlinksWorker(target+p[len(other):], other, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := fs.ErrNotExist
|
|
||||||
if symlinkFrom != "" {
|
|
||||||
err = &brokenSymlinkError{symlinkFrom, symlinkTo}
|
|
||||||
}
|
|
||||||
return nil, p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) set(p canonicalPath, file *fstest.MapFile) {
|
|
||||||
m.m[string(p)] = file
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) setEntry(realpath string, canonical canonicalPath, file fstest.MapFile) {
|
|
||||||
if realpath == "" || canonical == "" {
|
|
||||||
panic("empty path")
|
|
||||||
}
|
|
||||||
|
|
||||||
file.Sys = &sys{
|
|
||||||
original: file.Sys,
|
|
||||||
realpath: realpath,
|
|
||||||
}
|
|
||||||
m.set(canonical, &file)
|
|
||||||
|
|
||||||
if file.Mode&fs.ModeSymlink != 0 {
|
|
||||||
if m.symlinks == nil {
|
|
||||||
m.symlinks = make(map[canonicalPath]canonicalPath)
|
|
||||||
}
|
|
||||||
m.symlinks[canonical] = m.getCanonicalPath(string(file.Data))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitPath(s string, offset int) (before, after string) {
|
|
||||||
idx := strings.IndexByte(s[offset:], '/')
|
|
||||||
if idx < 0 {
|
|
||||||
return s, ""
|
|
||||||
}
|
|
||||||
return s[:idx+offset], s[idx+1+offset:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func dirName(p string) string {
|
|
||||||
dir, _ := path.Split(p)
|
|
||||||
return strings.TrimSuffix(dir, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func baseName(p string) string {
|
|
||||||
_, file := path.Split(p)
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) mkdirAll(p string, perm fs.FileMode) error {
|
|
||||||
if p == "" {
|
|
||||||
panic("empty path")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fast path; already exists.
|
|
||||||
if other, _, err := m.getFollowingSymlinks(m.getCanonicalPath(p)); err == nil {
|
|
||||||
if !other.Mode.IsDir() {
|
|
||||||
return fmt.Errorf("mkdir %q: path exists but is not a directory", p)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var toCreate []string
|
|
||||||
offset := 0
|
|
||||||
for {
|
|
||||||
dir, rest := splitPath(p, offset)
|
|
||||||
canonical := m.getCanonicalPath(dir)
|
|
||||||
other, otherPath, err := m.getFollowingSymlinks(canonical)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
toCreate = append(toCreate, dir)
|
|
||||||
} else {
|
|
||||||
if !other.Mode.IsDir() {
|
|
||||||
return fmt.Errorf("mkdir %q: path exists but is not a directory", otherPath)
|
|
||||||
}
|
|
||||||
if canonical != otherPath {
|
|
||||||
// We have a symlinked parent, reset and start again.
|
|
||||||
p = other.Sys.(*sys).realpath + "/" + rest
|
|
||||||
toCreate = toCreate[:0]
|
|
||||||
offset = 0
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if rest == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
offset = len(dir) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dir := range toCreate {
|
|
||||||
m.setEntry(dir, m.getCanonicalPath(dir), fstest.MapFile{
|
|
||||||
Mode: fs.ModeDir | perm&^umask,
|
|
||||||
ModTime: m.clock.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileInfo struct {
|
|
||||||
fs.FileInfo
|
|
||||||
sys any
|
|
||||||
realpath string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi *fileInfo) Name() string {
|
|
||||||
return baseName(fi.realpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi *fileInfo) Sys() any {
|
|
||||||
return fi.sys
|
|
||||||
}
|
|
||||||
|
|
||||||
type file struct {
|
|
||||||
fs.File
|
|
||||||
fileInfo *fileInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *file) Stat() (fs.FileInfo, error) {
|
|
||||||
return f.fileInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type readDirFile struct {
|
|
||||||
fs.ReadDirFile
|
|
||||||
fileInfo *fileInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *readDirFile) Stat() (fs.FileInfo, error) {
|
|
||||||
return f.fileInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
|
|
||||||
list, err := f.ReadDirFile.ReadDir(n)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries := make([]fs.DirEntry, len(list))
|
|
||||||
for i, entry := range list {
|
|
||||||
info := must(entry.Info())
|
|
||||||
newInfo, ok := convertInfo(info)
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Sprintf("unexpected synthesized dir: %q", info.Name()))
|
|
||||||
}
|
|
||||||
entries[i] = fs.FileInfoToDirEntry(newInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) Open(name string) (fs.File, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
|
|
||||||
_, cp, _ := m.getFollowingSymlinks(m.getCanonicalPath(name))
|
|
||||||
f, err := m.open(cp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := must(f.Stat())
|
|
||||||
|
|
||||||
newInfo, ok := convertInfo(info)
|
|
||||||
if !ok {
|
|
||||||
// This is a synthesized dir.
|
|
||||||
if name != "." {
|
|
||||||
panic(fmt.Sprintf("unexpected synthesized dir: %q", name))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &readDirFile{
|
|
||||||
ReadDirFile: f.(fs.ReadDirFile),
|
|
||||||
fileInfo: &fileInfo{
|
|
||||||
FileInfo: info,
|
|
||||||
sys: info.Sys(),
|
|
||||||
realpath: ".",
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f, ok := f.(fs.ReadDirFile); ok {
|
|
||||||
return &readDirFile{
|
|
||||||
ReadDirFile: f,
|
|
||||||
fileInfo: newInfo,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &file{
|
|
||||||
File: f,
|
|
||||||
fileInfo: newInfo,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) Realpath(name string) (string, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
|
|
||||||
file, _, err := m.getFollowingSymlinks(m.getCanonicalPath(name))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return file.Sys.(*sys).realpath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertInfo(info fs.FileInfo) (*fileInfo, bool) {
|
|
||||||
sys, ok := info.Sys().(*sys)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return &fileInfo{
|
|
||||||
FileInfo: info,
|
|
||||||
sys: sys.original,
|
|
||||||
realpath: sys.realpath,
|
|
||||||
}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
const umask = 0o022
|
|
||||||
|
|
||||||
func (m *MapFS) MkdirAll(path string, perm fs.FileMode) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
|
|
||||||
return m.mkdirAll(path, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) WriteFile(path string, data []byte, perm fs.FileMode) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
|
|
||||||
if parent := dirName(path); parent != "" {
|
|
||||||
canonical := m.getCanonicalPath(parent)
|
|
||||||
parentFile, _, err := m.getFollowingSymlinks(canonical)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("write %q: %w", path, err)
|
|
||||||
}
|
|
||||||
if !parentFile.Mode.IsDir() {
|
|
||||||
return fmt.Errorf("write %q: parent path exists but is not a directory", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file, cp, err := m.getFollowingSymlinks(m.getCanonicalPath(path))
|
|
||||||
if err != nil {
|
|
||||||
var brokenSymlinkError *brokenSymlinkError
|
|
||||||
if !errors.Is(err, fs.ErrNotExist) && !errors.As(err, &brokenSymlinkError) {
|
|
||||||
// No other errors are possible.
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !file.Mode.IsRegular() {
|
|
||||||
return fmt.Errorf("write %q: path exists but is not a regular file", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.setEntry(path, cp, fstest.MapFile{
|
|
||||||
Data: data,
|
|
||||||
ModTime: m.clock.Now(),
|
|
||||||
Mode: perm &^ umask,
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) Remove(path string) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
|
|
||||||
return m.remove(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) Chtimes(path string, aTime time.Time, mTime time.Time) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
canonicalString := string(canonical)
|
|
||||||
fileInfo := m.m[canonicalString]
|
|
||||||
if fileInfo == nil {
|
|
||||||
// file does not exist
|
|
||||||
return fs.ErrNotExist
|
|
||||||
}
|
|
||||||
fileInfo.ModTime = mTime
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) GetTargetOfSymlink(path string) (string, bool) {
|
|
||||||
path, _ = strings.CutPrefix(path, "/")
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
canonicalString := string(canonical)
|
|
||||||
if fileInfo, ok := m.m[canonicalString]; ok {
|
|
||||||
if fileInfo.Mode&fs.ModeSymlink != 0 {
|
|
||||||
return "/" + string(fileInfo.Data), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) GetModTime(path string) time.Time {
|
|
||||||
path, _ = strings.CutPrefix(path, "/")
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
canonicalString := string(canonical)
|
|
||||||
if fileInfo, ok := m.m[canonicalString]; ok {
|
|
||||||
return fileInfo.ModTime
|
|
||||||
}
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) Entries() iter.Seq2[string, *fstest.MapFile] {
|
|
||||||
return func(yield func(string, *fstest.MapFile) bool) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
inputKeys := slices.Collect(maps.Keys(m.m))
|
|
||||||
slices.SortFunc(inputKeys, comparePathsByParts)
|
|
||||||
|
|
||||||
for _, p := range inputKeys {
|
|
||||||
file := m.m[p]
|
|
||||||
path := file.Sys.(*sys).realpath
|
|
||||||
if !tspath.PathIsAbsolute(path) {
|
|
||||||
path = "/" + path
|
|
||||||
}
|
|
||||||
if !yield(path, file) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapFS) GetFileInfo(path string) *fstest.MapFile {
|
|
||||||
path, _ = strings.CutPrefix(path, "/")
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
canonical := m.getCanonicalPath(path)
|
|
||||||
canonicalString := string(canonical)
|
|
||||||
return m.m[canonicalString]
|
|
||||||
}
|
|
||||||
|
|
||||||
func must[T any](v T, err error) T {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
Loading…
x
Reference in New Issue
Block a user