Options
All
  • Public
  • Public/Protected
  • All
Menu

millan

Index

Enumerations

Classes

Interfaces

Type aliases

Variables

Functions

Object literals

Type aliases

CstNodeTraverseContext

CstNodeTraverseContext: ITraverseContext & object

Lit

Lit: string | number | boolean | undefined | null | void | symbol | __type

LocalName

LocalName: "message" | "BlankNode" | "IRI" | "optional" | "prefix" | "equals" | "disjoint" | "lessThan" | "lessThanOrEquals" | "targetClass" | "targetSubjectsOf" | "targetObjectsOf" | "class" | "datatype" | "severity" | "Literal" | "BlankNodeOrIRI" | "BlankNodeOrLiteral" | "IRIOrLiteral" | "minCount" | "maxCount" | "minLength" | "maxLength" | "qualifiedMinCount" | "qualifiedMaxCount" | "pattern" | "flags" | "namespace" | "labelTemplate" | "uniqueLang" | "qualifiedValueShapesDisjoint" | "closed" | "deactivated" | "not" | "node" | "property" | "qualifiedValueShape" | "sparql" | "declare" | "prefixes" | "parameter" | "nodeValidator" | "propertyValidator" | "validator" | "minExclusive" | "minInclusive" | "maxExclusive" | "maxInclusive" | "Shape" | "NodeShape" | "PropertyShape" | "targetNode" | "path" | "alternativePath" | "inversePath" | "zeroOrMorePath" | "oneOrMorePath" | "zeroOrOnePath" | "nodeKind" | "languageIn" | "and" | "or" | "xone" | "ignoredProperties" | "hasValue" | "in" | "select" | "ask"

ModeString

ModeString: "stardog" | "standard"

PartialRecognitionException

PartialRecognitionException: Pick<IRecognitionException, "name" | "message">

StardogSparqlParserResult

StardogSparqlParserResult: ReturnType<parse>

TokenMap

TokenMap: object

Type declaration

Variables

Const ANON

ANON: RegExp = regex.and(/\[/, regex.many(WS), /\]/)

AliasArgumentToken

AliasArgumentToken: TokenType

Const BAD_FIXTURES_DIR

BAD_FIXTURES_DIR: string = path.join(__dirname, 'fixtures', 'bad')

Const BLANK_NODE_LABEL

BLANK_NODE_LABEL: RegExp = regex.and(/_:/,regex.or(PN_CHARS_U, /\d/),regex.option(regex.and(regex.many(regex.or(PN_CHARS, /\./)), PN_CHARS)))

Const BOOLEAN_PATTERN

BOOLEAN_PATTERN: RegExp = /true|false/

BindDirectiveToken

BindDirectiveToken: TokenType

Const CATCH_ALL

CATCH_ALL: RegExp = /[\s\S]*/

Const CATCH_ALL_AT_LEAST_ONE

CATCH_ALL_AT_LEAST_ONE: RegExp = /[\s\S]+/

Const CATEGORY_PATTERN

CATEGORY_PATTERN: RegExp = /^categor(?:y|ies)/i

ConfigDirectiveToken

ConfigDirectiveToken: TokenType

Const DECIMAL

DECIMAL: RegExp = /(\d*\.\d+)|(\d+\.\d*)/

Const DECIMAL_NEGATIVE

DECIMAL_NEGATIVE: RegExp = regex.and(/-/, DECIMAL)

Const DECIMAL_POSITIVE

DECIMAL_POSITIVE: RegExp = regex.and(/\+/, DECIMAL)

Const DOUBLE

DOUBLE: RegExp = regex.or(regex.and(/\d+\.\d*/, EXPONENT),regex.and(/\.\d+/, EXPONENT),regex.and(/\d+/, EXPONENT))

Const DOUBLE_NEGATIVE

DOUBLE_NEGATIVE: RegExp = regex.and(/-/, DOUBLE)

Const DOUBLE_POSITIVE

DOUBLE_POSITIVE: RegExp = regex.and(/\+/, DOUBLE)

Const ECHAR

ECHAR: RegExp = /\\[tbnrf"'\\]/

Const ESCAPED_CHARACTER_PATTERN

ESCAPED_CHARACTER_PATTERN: RegExp = /\\["\\/bfnrt]/

Const ESCAPED_UNICODE_PATTERN

ESCAPED_UNICODE_PATTERN: RegExp = /\\u[0-9A-Fa-f]{4}/

Const EXPONENT

EXPONENT: RegExp = /[eE][+-]?\d+/

Const EXPONENT_PART_PATTERN

EXPONENT_PART_PATTERN: RegExp = /[eE][+-]?[0-9]+/

Const EndThen

EndThen: TokenType = createToken({name: 'EndThen',pattern: '}',pop_mode: true,})

Const EnumValueToken

EnumValueToken: TokenType = createToken({name: 'EnumValueToken',pattern: Lexer.NA, // pure category, no explicit match of its own})

Const FROM_BLOCK_END_MATCHER

FROM_BLOCK_END_MATCHER: RegExp = /^\s*to\s*{/i

Const FROM_JSON_BLOCK_END_MATCHER

FROM_JSON_BLOCK_END_MATCHER: RegExp = /((?:.|\s)*?)to\s*{/i

FilterDirectiveToken

FilterDirectiveToken: TokenType

Const FragmentName

FragmentName: TokenType = createToken({name: 'FragmentName',pattern: Lexer.NA,})

Const GOOD_FIXTURES_DIR

GOOD_FIXTURES_DIR: string = path.join(__dirname, 'fixtures', 'good')

GraphArgumentToken

GraphArgumentToken: TokenType

Const GroupGraphPattern

GroupGraphPattern: TokenType = createToken({name: 'GroupGraphPattern', // This name is useful for error messages in real-time parsingpattern: (text, startOffset = 0) => {// Capture a single brace and then anything up to its closing brace.if (text[startOffset] !== '{') {return null;}let unclosedBraceCount = 1;let cursor;for (cursor = startOffset + 1;cursor < text.length && unclosedBraceCount > 0;cursor++) {if (text[cursor] === '{') {unclosedBraceCount++;} else if (text[cursor] === '}') {unclosedBraceCount--;}}if (unclosedBraceCount > 0) {return null;}return CATCH_ALL_AT_LEAST_ONE.exec(text.slice(startOffset, cursor));},line_breaks: true,pop_mode: true,})

Const HEX

HEX: RegExp = /[0-9A-Fa-f]/

Const INTEGER

INTEGER: RegExp = /\d+/

Const INTEGER_NEGATIVE

INTEGER_NEGATIVE: RegExp = regex.and(/-/, INTEGER)

Const INTEGER_PART_PATTERN

INTEGER_PART_PATTERN: RegExp = /\-?(?:0|[1-9][0-9]*)/

Const INTEGER_POSITIVE

INTEGER_POSITIVE: RegExp = regex.and(/\+/, INTEGER)

Const IRIREF

IRIREF: RegExp = /<[^<>\\{}|\^`\u0000-\u0020]*>/

Const If

If: TokenType = createToken({name: 'If',pattern: /if/i,push_mode: LexerMode.IFCLAUSE,})

IfArgumentToken

IfArgumentToken: TokenType

IncludeDirectiveToken

IncludeDirectiveToken: TokenType

Const LANGTAG

LANGTAG: RegExp = /@[a-zA-Z]+(-[a-zA-Z0-9]+)*/

Const NAME_PATTERN

NAME_PATTERN: RegExp = /[_A-Za-z][_0-9A-Za-z]*/

Const NIL

NIL: RegExp = regex.and(/\(/, regex.many(WS), /\)/)

Const NULL_PATTERN

NULL_PATTERN: RegExp = /null/

Const Name

Name: TokenType = createToken({ name: 'Name', pattern: NAME_PATTERN })

Const ON_PATTERN

ON_PATTERN: RegExp = /on/

Const PERCENT

PERCENT: RegExp = regex.and(/%/, HEX, HEX)

Const PLX

PLX: RegExp = regex.or(PERCENT, PN_LOCAL_ESC)

Const PNAME_LN

PNAME_LN: RegExp = regex.and(PNAME_NS, PN_LOCAL)

Const PNAME_LN_TOKEN

PNAME_LN_TOKEN: TokenType = createToken({name: 'PNAME_LN',pattern: PNAME_LN,})

Const PNAME_NS

PNAME_NS: RegExp = regex.and(regex.option(PN_PREFIX), /:/)

Const PN_CHARS

PN_CHARS: RegExp = regex.or(PN_CHARS_U,/-/,/\d/,/\u00b7/,/[\u0300-\u036f]/,/[\u203f-\u2040]/)

Const PN_CHARS_BASE

PN_CHARS_BASE: RegExp = /[A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]|[\uD800-\uDBFF][\uDC00-\uDFFF]/

Const PN_CHARS_U

PN_CHARS_U: RegExp = regex.or(PN_CHARS_BASE, /_/)

Const PN_LOCAL

PN_LOCAL: RegExp = regex.and(regex.or(PN_CHARS_U, /:/, /\d/, PLX),regex.option(regex.and(regex.many(regex.or(PN_CHARS, /\./, /:/, PLX)),regex.or(PN_CHARS, /:/, PLX))))

Const PN_LOCAL_ESC

PN_LOCAL_ESC: RegExp = /\\[_~.\-!\$&'()*+,=\/?#@%;]/

Const PN_PREFIX

PN_PREFIX: RegExp = regex.and(PN_CHARS_BASE,regex.option(regex.and(regex.many(regex.or(PN_CHARS, /\./)), PN_CHARS)))

Const Punctuator

Punctuator: TokenType = createToken({name: 'Punctuator',pattern: Lexer.NA,})

Const Rule

Rule: TokenType = createToken({name: 'Rule',pattern: /rule/i,})

Const STRING_CHARACTER_PATTERN

STRING_CHARACTER_PATTERN: RegExp = regex.and(/"/,regex.many(regex.or(STRING_SOURCE_CHARACTER_PATTERN,ESCAPED_UNICODE_PATTERN,ESCAPED_CHARACTER_PATTERN)),/"/)

Const STRING_LITERAL1

STRING_LITERAL1: RegExp = regex.and(/'/,regex.many(regex.or(/[^\u0027\u005C\u000A\u000D]/, ECHAR)),/'/)

Const STRING_LITERAL2

STRING_LITERAL2: RegExp = regex.and(/"/,regex.many(regex.or(/[^\u0022\u005C\u000A\u000D]/, ECHAR)),/"/)

Const STRING_LITERAL_LONG1

STRING_LITERAL_LONG1: RegExp = regex.and(/'''/,regex.many(regex.and(regex.option(regex.or(/'/, /''/)), regex.or(/[^'\\]/, ECHAR))),/'''/)

Const STRING_LITERAL_LONG1_TOKEN

STRING_LITERAL_LONG1_TOKEN: TokenType = createToken({name: 'STRING_LITERAL_LONG1',pattern: STRING_LITERAL_LONG1,})

Const STRING_LITERAL_LONG2

STRING_LITERAL_LONG2: RegExp = regex.and(/"""/,regex.many(regex.and(regex.option(regex.or(/"/, /""/)), regex.or(/[^"\\]/, ECHAR))),/"""/)

Const STRING_LITERAL_LONG2_TOKEN

STRING_LITERAL_LONG2_TOKEN: TokenType = createToken({name: 'STRING_LITERAL_LONG2',pattern: STRING_LITERAL_LONG2,})

Const STRING_SOURCE_CHARACTER_PATTERN

STRING_SOURCE_CHARACTER_PATTERN: RegExp = /[\u0009\u0020\u0021\u0023-\u005B\u005D-\uFFFF]/

SkipDirectiveToken

SkipDirectiveToken: TokenType

Const SparqlReceivingStardogDirective

SparqlReceivingStardogDirective: TokenType = createToken({name: 'SparqlReceivingStardogDirective',pattern: Lexer.NA,})

Const StardogArgument

StardogArgument: TokenType = createToken({name: 'StardogArgument',pattern: Lexer.NA,})

Const StardogDirective

StardogDirective: TokenType = createToken({name: 'StardogDirective',pattern: Lexer.NA,})

Const StringValueToken

StringValueToken: TokenType = createToken({name: 'StringValueToken',pattern: Lexer.NA,})

Const Then

Then: TokenType = createToken({name: 'Then',pattern: /then/i,push_mode: LexerMode.THENCLAUSE,})

ToArgumentToken

ToArgumentToken: TokenType

Const TopLevel

TopLevel: TokenType = createToken({name: 'TopLevel',pattern: Lexer.NA,})

Const TriplesBlock

TriplesBlock: TokenType = createToken({name: 'TriplesBlock', // This name is useful for error messages in real-time parsingpattern: /[^{}]+/,line_breaks: true,})

Const UNKNOWN

UNKNOWN: TokenType = createToken({ name: 'UNKNOWN', pattern: /\w+/i })

Const VAR1

VAR1: RegExp = regex.and(/\?/, VARNAME)

Const VAR2

VAR2: RegExp = regex.and(/\$/, VARNAME)

Const VARNAME

VARNAME: RegExp = regex.and(regex.or(PN_CHARS_U, /\d/),regex.many(regex.or(PN_CHARS_U, /\d/, /\u00b7/, /[\u0300-\u036f]/, /[\u203f-\u2040]/)))

Const WS

WS: RegExp = /[\u0020\u0009\u000d\u000a]/

Const allInvalidQueries

allInvalidQueries: FileAndContents[] = [...getAllFileContents(join(__dirname, 'fixtures', 'sparql11', 'ebnf', 'badDog')),]

Const allValidQueries

allValidQueries: FileAndContents[] = [...getAllFileContents(join(__dirname, 'fixtures', 'sparql11', 'ebnf', 'goodDog')),]

Const baseTokens

baseTokens: TokenType[] = [sparqlTokenMap.NIL,sparqlTokenMap.ANON,sparqlTokenMap.LCurly,sparqlTokenMap.RCurly,sparqlTokenMap.LParen,sparqlTokenMap.RParen,sparqlTokenMap.WhiteSpace,sparqlTokenMap.IRIREF,sparqlTokenMap.LANGTAG,sparqlTokenMap.DOUBLE,sparqlTokenMap.DECIMAL,sparqlTokenMap.INTEGER,sparqlTokenMap.DOUBLE_POSITIVE,sparqlTokenMap.DECIMAL_POSITIVE,sparqlTokenMap.INTEGER_POSITIVE,sparqlTokenMap.DOUBLE_NEGATIVE,sparqlTokenMap.DECIMAL_NEGATIVE,sparqlTokenMap.INTEGER_NEGATIVE,sparqlTokenMap.STRING_LITERAL1,sparqlTokenMap.STRING_LITERAL2,sparqlTokenMap.STRING_LITERAL_LONG1,sparqlTokenMap.STRING_LITERAL_LONG2,sparqlTokenMap.PNAME_NS,sparqlTokenMap.PNAME_LN,sparqlTokenMap.BLANK_NODE_LABEL,sparqlTokenMap.VAR1,sparqlTokenMap.VAR2,sparqlTokenMap.Comment,sparqlTokenMap.SELECT,sparqlTokenMap.CONSTRUCT,sparqlTokenMap.DISTINCT,sparqlTokenMap.Star,sparqlTokenMap.WHERE,sparqlTokenMap.GROUP_BY,sparqlTokenMap.ORDER_BY,sparqlTokenMap.BY,sparqlTokenMap.Period,sparqlTokenMap.QuestionMark,sparqlTokenMap.Plus,sparqlTokenMap.Minus,sparqlTokenMap.LBracket,sparqlTokenMap.RBracket,sparqlTokenMap.PERCENT,sparqlTokenMap.BASE,sparqlTokenMap.PREFIX,sparqlTokenMap.DESCRIBE,sparqlTokenMap.ASK,sparqlTokenMap.FROM,sparqlTokenMap.REDUCED,sparqlTokenMap.NAMED,sparqlTokenMap.HAVING,sparqlTokenMap.ASC,sparqlTokenMap.DESC,sparqlTokenMap.OFFSET,sparqlTokenMap.LIMIT,sparqlTokenMap.VALUES,sparqlTokenMap.LOAD,sparqlTokenMap.SILENT,sparqlTokenMap.INTO,sparqlTokenMap.AS,sparqlTokenMap.CLEAR,sparqlTokenMap.DROP,sparqlTokenMap.CREATE,sparqlTokenMap.ADD,sparqlTokenMap.TO,sparqlTokenMap.MOVE,sparqlTokenMap.COPY,sparqlTokenMap.INSERT_DATA,sparqlTokenMap.DELETE_DATA,sparqlTokenMap.DELETE_WHERE,sparqlTokenMap.WITH,sparqlTokenMap.DELETE,sparqlTokenMap.INSERT,sparqlTokenMap.USING,sparqlTokenMap.DEFAULT,sparqlTokenMap.GRAPH,sparqlTokenMap.ALL,sparqlTokenMap.OPTIONAL,sparqlTokenMap.SERVICE,sparqlTokenMap.BIND,sparqlTokenMap.UNDEF,sparqlTokenMap.MINUS,sparqlTokenMap.UNION,sparqlTokenMap.FILTER,sparqlTokenMap.LANGMATCHES,sparqlTokenMap.LANG,sparqlTokenMap.DATATYPE,sparqlTokenMap.BOUND,sparqlTokenMap.IRI,sparqlTokenMap.URI,sparqlTokenMap.BNODE,sparqlTokenMap.RAND,sparqlTokenMap.ABS,sparqlTokenMap.CEIL,sparqlTokenMap.FLOOR,sparqlTokenMap.ROUND,sparqlTokenMap.CONCAT,sparqlTokenMap.STRLEN,sparqlTokenMap.UCASE,sparqlTokenMap.LCASE,sparqlTokenMap.ENCODE_FOR_URI,sparqlTokenMap.CONTAINS,sparqlTokenMap.STRSTARTS,sparqlTokenMap.STRENDS,sparqlTokenMap.STRBEFORE,sparqlTokenMap.STRAFTER,sparqlTokenMap.YEAR,sparqlTokenMap.MONTH,sparqlTokenMap.DAY,sparqlTokenMap.HOURS,sparqlTokenMap.MINUTES,sparqlTokenMap.SECONDS,sparqlTokenMap.TIMEZONE,sparqlTokenMap.TZ,sparqlTokenMap.NOW,sparqlTokenMap.UUID,sparqlTokenMap.STRUUID,sparqlTokenMap.MD5,sparqlTokenMap.SHA1,sparqlTokenMap.SHA256,sparqlTokenMap.SHA384,sparqlTokenMap.SHA512,sparqlTokenMap.COALESCE,sparqlTokenMap.IF,sparqlTokenMap.STRLANG,sparqlTokenMap.STRDT,sparqlTokenMap.STR,sparqlTokenMap.sameTerm,sparqlTokenMap.isIRI,sparqlTokenMap.isURI,sparqlTokenMap.isBLANK,sparqlTokenMap.isLITERAL,sparqlTokenMap.isNUMERIC,sparqlTokenMap.REGEX,sparqlTokenMap.SUBSTR,sparqlTokenMap.REPLACE,sparqlTokenMap.EXISTS,sparqlTokenMap.NOT_EXISTS,sparqlTokenMap.COUNT,sparqlTokenMap.SUM,sparqlTokenMap.MIN,sparqlTokenMap.MAX_LENGTH,sparqlTokenMap.MAX,sparqlTokenMap.AVG,sparqlTokenMap.SAMPLE,sparqlTokenMap.GROUP_CONCAT,sparqlTokenMap.SEPARATOR,sparqlTokenMap.TRUE,sparqlTokenMap.FALSE,sparqlTokenMap.Semicolon,sparqlTokenMap.Comma,sparqlTokenMap.ForwardSlash,sparqlTokenMap.DoubleCaret,sparqlTokenMap.Caret,sparqlTokenMap.LogicalOr,sparqlTokenMap.Pipe,sparqlTokenMap.LogicalAnd,sparqlTokenMap.NotEquals,sparqlTokenMap.Bang,sparqlTokenMap.Equals,sparqlTokenMap.LessThanEquals,sparqlTokenMap.GreaterThanEquals,sparqlTokenMap.LEmbed,sparqlTokenMap.REmbed,sparqlTokenMap.LessThan,sparqlTokenMap.GreaterThan,sparqlTokenMap.IN,sparqlTokenMap.NOT_IN,sparqlTokenMap.A,sparqlTokenMap.UNKNOWN,]

Const basicFixture

basicFixture: "ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\d{3}-\\d{2}-\\d{4}$" ;] ;sh:property [ # _:b2sh:path ex:worksFor ;sh:class ex:Company ;sh:nodeKind sh:IRI ;] ;sh:closed true ;<http://www.w3.org/ns/shacl#ignoredProperties> ( rdf:type ) ." = `ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\\\d{3}-\\\\d{2}-\\\\d{4}$" ;] ;sh:property [ # _:b2sh:path ex:worksFor ;sh:class ex:Company ;sh:nodeKind sh:IRI ;] ;sh:closed true ;<http://www.w3.org/ns/shacl#ignoredProperties> ( rdf:type ) .`

Const categoryTokens

categoryTokens: any[] = Object.keys(categoryTokenMap).map((key) => categoryTokenMap[key])

Const conditionalDirectiveTokens

conditionalDirectiveTokens: TokenType[] = [SkipDirectiveToken,IncludeDirectiveToken,FilterDirectiveToken,]

Const defaultNamespacesMap

defaultNamespacesMap: object = Object.freeze(['', 'rdf', 'rdfs', 'xsd', 'owl', 'stardog'].reduce((namespacesMap, prefix) => ({...namespacesMap,[prefix]: true,}),{}))

Type declaration

Const disallowedSparqlLiteralTokenNames

disallowedSparqlLiteralTokenNames: string[] = [sparqlTokenMap.DOUBLE,sparqlTokenMap.DECIMAL,sparqlTokenMap.INTEGER,sparqlTokenMap.DOUBLE_POSITIVE,sparqlTokenMap.DECIMAL_POSITIVE,sparqlTokenMap.INTEGER_POSITIVE,sparqlTokenMap.DOUBLE_NEGATIVE,sparqlTokenMap.DECIMAL_NEGATIVE,sparqlTokenMap.INTEGER_NEGATIVE,sparqlTokenMap.STRING_LITERAL1,sparqlTokenMap.STRING_LITERAL2,sparqlTokenMap.STRING_LITERAL_LONG1,sparqlTokenMap.STRING_LITERAL_LONG2,].map((token) => token.tokenName)

Const disallowedSparqlTokenNames

disallowedSparqlTokenNames: string[] = Object.keys(disallowedSparqlTokenNameToRuleMap)

Const escapeSequence

escapeSequence: RegExp = /\\u([a-fA-F0-9]{4})|\\U([a-fA-F0-9]{8})|\\[uU]|\\(.)/g

Const escapedIri

escapedIri: RegExp = /^<((?:[^ <>{}\\]|\\[uU])+)>[ \t]*/

Const finalTokens

finalTokens: TokenType[] = [FragmentName,EnumValueToken,Name,StringValueToken,Punctuator,]

Const fixture

fixture: "ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\d{3}-\\d{2}-\\d{4}$" ;] ;sh:property [ # _:b2sh:path ex:worksFor ;sh:class ex:Company ;sh:nodeKind sh:IRI ;] ;sh:closed true ;<http://www.w3.org/ns/shacl#ignoredProperties> ( rdf:type ) ." = `ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\\\d{3}-\\\\d{2}-\\\\d{4}$" ;] ;sh:property [ # _:b2sh:path ex:worksFor ;sh:class ex:Company ;sh:nodeKind sh:IRI ;] ;sh:closed true ;<http://www.w3.org/ns/shacl#ignoredProperties> ( rdf:type ) .`

Const fixtureSuites

fixtureSuites: string[] = ['shacl-core/complex','shacl-core/misc','shacl-core/node','shacl-core/path','shacl-core/property','shacl-core/targets','shacl-sparql/component','shacl-sparql/node','shacl-sparql/pre-binding','shacl-sparql/property',]

Const getShaclTokenMap

getShaclTokenMap: function = memoize((prefixes: { shacl: string; xsd: string }) => {const prefixWithShacl = makePrefixer(prefixes.shacl);const prefixWithXsd = makePrefixer(prefixes.xsd);// Add the prefixed local names to the SHACL token map now that we know the// prefixes.const shaclTokenMap = localNames.reduce((tokenMap, localName) => {const tokenName = `SHACL_${localName}`;const prefixedTokenName = `${tokenName}_prefixed`;return {...tokenMap,[prefixedTokenName]: createToken({name: prefixedTokenName,pattern: prefixWithShacl(localName),categories: [tokenMap[tokenName], turtleTokenMap.PNAME_LN],}),};}, shaclUnprefixedTokenMap);// Add the prefixed local names to the XSD token map now that we know the// prefixes.return xsdLocalNames.reduce((tokenMap, localName) => {const tokenName = `SHACL_xsd_${localName}`;const prefixedTokenName = `${tokenName}_prefixed`;return {...tokenMap,[prefixedTokenName]: createToken({name: prefixedTokenName,pattern: `${prefixWithXsd(localName)}`,categories: [tokenMap[tokenName], turtleTokenMap.PNAME_LN],}),};}, shaclTokenMap);}, isDeepEqual)

Type declaration

    • Parameters

      • prefixes: object
        • shacl: string
        • xsd: string

      Returns TokenMap

Const getShaclTokenTypes

getShaclTokenTypes: function = memoize((prefixes: { shacl: string; xsd: string }) => {const tokenMap = getShaclTokenMap(prefixes);const { pnameTokens, iriTokens } = Object.keys(tokenMap).sort(reverseSort).reduce((accumulator, key) => {if (key.endsWith('_IRI')) {if (iriIndex < pnameIndex) {accumulator.iriTokens.push(tokenMap[key.slice(0, -4)]);}accumulator.iriTokens.push(tokenMap[key]);} else if (key.endsWith('_prefixed')) {if (pnameIndex < iriIndex) {accumulator.pnameTokens.push(tokenMap[key.slice(0, -9)]);}accumulator.pnameTokens.push(tokenMap[key]);}return accumulator;},{ pnameTokens: [], iriTokens: [] });if (pnameIndex < iriIndex) {return [...turtleTokenTypes.slice(0, pnameIndex),...categoryTokens,...pnameTokens,...turtleTokenTypes.slice(pnameIndex, iriIndex),...iriTokens,...turtleTokenTypes.slice(iriIndex),];} else {return [...turtleTokenTypes.slice(0, iriIndex),...categoryTokens,...iriTokens,...turtleTokenTypes.slice(iriIndex, pnameIndex),...pnameTokens,...turtleTokenTypes.slice(pnameIndex),];}}, isDeepEqual)

Type declaration

    • (prefixes: object): TokenType[]
    • Parameters

      • prefixes: object
        • shacl: string
        • xsd: string

      Returns TokenType[]

Const graphQlTokens

graphQlTokens: TokenType[] = []

Const graphqlTokens

graphqlTokens: any = require('./tokens')

Const illegalIriChars

illegalIriChars: RegExp = /[\x00-\x20<>\\"\{\}\|\^\`]/

Const indexOfIriRef

indexOfIriRef: any = turtleTokenTypes.indexOf(turtleTokenMap.IRIREF)

Const indexOfPnCharsBase

indexOfPnCharsBase: any = turtleTokenTypes.indexOf(turtleTokenMap.PN_CHARS_BASE)

Const indexOfSelect

indexOfSelect: number = baseTokens.indexOf(sparqlTokenMap.SELECT)

Const invalidTestsFilenames

invalidTestsFilenames: string[] = ['qualifiedValueShape-001.ttl', // references `sh:nodeShape` from old, not current, SHACL spec'shacl-shacl-data-shapes.ttl', // has SHACL results (different from SHACL itself) that parser can't handle'path-complex-002-shapes.ttl', // has SHACL results (different from SHACL itself) that parser can't handle'path-strange-001.ttl', // spec says that a shape has at most one value for `sh:path`, yet this has more'path-strange-002.ttl', // spec says that a shape has at most one value for `sh:path`, yet this has more]

Const iriIndex

iriIndex: any = turtleTokenTypes.indexOf(turtleTokenMap.IRIREF)

Const lexer

lexer: Lexer = new Lexer(turtleTokenTypes)

Const localNameToCategoryMap

localNameToCategoryMap: object = Object.keys(localNamesByCategory).reduce((nameToCategoryMap, category) => {const categoryLocalNames = localNamesByCategory[category];categoryLocalNames.forEach((localName) => (nameToCategoryMap[localName] = category));return nameToCategoryMap;},{})

Type declaration

Const localNames

localNames: string[] = Object.keys(localNameToCategoryMap)

Const manyValuesForSingleValueFixture

manyValuesForSingleValueFixture: "ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string, xsd:boolean ; #invalidsh:pattern "^\\d{3}-\\d{2}-\\d{4}$" ;] ." = `ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;sh:targetClass ex:Person ; # Applies to all personssh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string, xsd:boolean ; #invalidsh:pattern "^\\\\d{3}-\\\\d{2}-\\\\d{4}$" ;] .`

Const mixedShaclAndTurtleFixture

mixedShaclAndTurtleFixture: "ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;:loves ex:Somebody ;sh:targetClass ex:Person, ex:Human ;sh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\d{3}-\\d{2}-\\d{4}$" ;] ." = `ex:OtherPersona :Thing .ex:PersonShapea sh:NodeShape ;:loves ex:Somebody ;sh:targetClass ex:Person, ex:Human ;sh:property [ # _:b1sh:path ex:ssn ; # constrains the values of ex:ssnsh:maxCount 1 ;sh:datatype xsd:string ;sh:pattern "^\\\\d{3}-\\\\d{2}-\\\\d{4}$" ;] .`

Const nonStandardTokens

nonStandardTokens: TokenType[] = [...pathsTokens,sparqlTokenMap.UNNEST,sparqlTokenMap.VALIDATE,sparqlTokenMap.SHAPES,sparqlTokenMap.SHAPE,sparqlTokenMap.PER,]

Const parser

parser: TurtleParser = new TurtleParser()

Const pathsTokens

pathsTokens: TokenType[] = [sparqlTokenMap.START,sparqlTokenMap.END,sparqlTokenMap.VIA,sparqlTokenMap.CYCLIC,sparqlTokenMap.PATHS_SHORTEST,sparqlTokenMap.PATHS_ALL,sparqlTokenMap.PATHS,]

Const pnameIndex

pnameIndex: any = turtleTokenTypes.indexOf(sparqlTokenMap.PNAME_NS)

Const shaclIriNamespace

shaclIriNamespace: "http://www.w3.org/ns/shacl#" = "http://www.w3.org/ns/shacl#"

Const shaclTokens

shaclTokens: any = require('./tokens')

Const shaclUnprefixedTokenMap

shaclUnprefixedTokenMap: object = localNames.reduce((tokenMap, localName) => {const category = localNameToCategoryMap[localName];const categoryToken = categoryTokenMap[category];const tokenName = `SHACL_${localName}`;const iriTokenName = `${tokenName}_IRI`;// Category token that will select either a SHACL IRI or a SHACL PN_LOCAL:const iriOrPrefixCategoryToken = createToken({name: tokenName,pattern: Lexer.NA,categories: categoryToken ? [categoryToken] : [],});return {...tokenMap,[tokenName]: iriOrPrefixCategoryToken,[iriTokenName]: createToken({name: iriTokenName,pattern: `<${shaclIriNamespace}${localName}>`,categories: [iriOrPrefixCategoryToken, turtleTokenMap.IRIREF],}),};}, xsdUnprefixedTokenMap)

Type declaration

Const smsOnlyTokens

smsOnlyTokens: TokenType[] = [smsTokenMap.Template,smsTokenMap.Sql,smsTokenMap.GraphQl,smsTokenMap.Json,smsTokenMap.Csv,smsTokenMap.Mapping,smsTokenMap.SqlBlock,smsTokenMap.JsonBlock,smsTokenMap.GraphQlBlock,]

Const smsTokenTypes

smsTokenTypes: any[] = [...smsOnlyTokens, ...stardogSparqlTokens]

Const smsTokens

smsTokens: any = require('./tokens')

Const sparqlReceivingStardogDirectiveTokens

sparqlReceivingStardogDirectiveTokens: TokenType[] = [BindDirectiveToken,...conditionalDirectiveTokens,]

Const sparqlTokenTypes

sparqlTokenTypes: TokenType[] = [...baseTokens, ...nonStandardTokens]

Const sparqlTokens

sparqlTokens: any = require('./sparql/tokens')

Const srsTokenTypes

srsTokenTypes: TokenType[] = [Rule,If,Then,EndThen,sparqlTokenMap.LCurly,...turtleTokenTypes,GroupGraphPattern,TriplesBlock,]

Const srsTokens

srsTokens: any = require('./tokens')

Const standardParser

standardParser: StandardGraphQlParser = new StandardGraphQlParser()

Const stardogArguments

stardogArguments: StardogArgumentHolder = ['orderBy','first','to','if','alias','graph','offset','limit','iri',].sort().reduce((accumulator, name) => {const key = `${name[0].toUpperCase()}${name.slice(1)}ArgumentToken`;const categories = [Name, EnumValueToken, FragmentName, StardogArgument];if (['orderBy', 'first', 'limit', 'offset'].includes(name)) {categories.push(TopLevel);}const token = createToken({name: key,pattern: name,categories,longer_alt: Name,});return {...accumulator,tokenMap: {...accumulator.tokenMap,[key]: token,},orderedTokens: accumulator.orderedTokens.concat(token),};},{ tokenMap: {}, orderedTokens: [] } as StardogArgumentHolder)

Const stardogDirectives

stardogDirectives: StardogDirectiveHolder = ['optional','bind','hide','skip','include','filter','prefix','config',].sort().reduce((accumulator, name) => {const key = `${name[0].toUpperCase()}${name.slice(1)}DirectiveToken`;const categories = [Name, EnumValueToken, FragmentName, StardogDirective];if (['prefix', 'config'].includes(name)) {categories.push(TopLevel);}if (['bind', 'skip', 'include', 'filter'].includes(name)) {categories.push(SparqlReceivingStardogDirective);}const token = createToken({name: key,pattern: name,categories,longer_alt: Name,});return {...accumulator,tokenMap: {...accumulator.tokenMap,[key]: token,},orderedTokens: accumulator.orderedTokens.concat(token),};},{ tokenMap: {}, orderedTokens: [] } as StardogDirectiveHolder)

Const stardogGraphQlParser

stardogGraphQlParser: StardogGraphQlParser = new StardogGraphQlParser()

Const stardogGraphQlTokens

stardogGraphQlTokens: TokenType[] = [...graphQlTokens,...stardogDirectives.orderedTokens,...stardogArguments.orderedTokens,stardogOrderByArgumentFieldPropertyToken,stardogOrderByArgumentDescPropertyToken,StardogDirective,SparqlReceivingStardogDirective,StardogArgument,TopLevel,]

Const stardogOrderByArgumentDescPropertyToken

stardogOrderByArgumentDescPropertyToken: TokenType = createToken({name: 'OrderByArgumentDescPropertyToken',pattern: 'desc',categories: [Name, EnumValueToken, FragmentName],longer_alt: Name,})

Const stardogOrderByArgumentFieldPropertyToken

stardogOrderByArgumentFieldPropertyToken: TokenType = createToken({name: 'OrderByArgumentFieldPropertyToken',pattern: 'field',categories: [Name, EnumValueToken, FragmentName],longer_alt: Name,})

Const stardogSparqlTokens

stardogSparqlTokens: any = [...baseTokens.slice(0, indexOfSelect),...nonStandardTokens,...baseTokens.slice(indexOfSelect),]

Const stringLiteralLongQuote

stringLiteralLongQuote: RegExp = /^"""([^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*)"""/

Const stringLiteralLongSingleQuote

stringLiteralLongSingleQuote: RegExp = /^'''([^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*)'''/

Const stringLiteralQuote

stringLiteralQuote: RegExp = /^"((?:[^"\\\r\n]|\\.)*)"(?=[^"])/

Const stringLiteralSingleQuote

stringLiteralSingleQuote: RegExp = /^'((?:[^'\\\r\n]|\\.)*)'(?=[^'])/

Const subExpressionMatcher

subExpressionMatcher: RegExp = /(?:[A-Z]+Expression|ValueLogical)$/i

Const trigLexer

trigLexer: Lexer = new Lexer(trigTokenTypes)

Const trigTokenTypes

trigTokenTypes: TokenType[] = [...turtleTokenTypes.slice(0, indexOfPnCharsBase),sparqlTokenMap.GRAPH,...turtleTokenTypes.slice(indexOfPnCharsBase),]

Const trigTokens

trigTokens: any = require('./tokens')

Const turtleLexer

turtleLexer: Lexer = new Lexer(turtleTokenTypes)

Const turtleTokenTypes

turtleTokenTypes: any = [turtleTokenMap.Comment,sparqlTokenMap.ANON,sparqlTokenMap.LBracket,sparqlTokenMap.RBracket,sparqlTokenMap.LCurly,sparqlTokenMap.RCurly,sparqlTokenMap.LParen,sparqlTokenMap.RParen,sparqlTokenMap.WhiteSpace,turtleTokenMap.TRUE,turtleTokenMap.FALSE,sparqlTokenMap.Comma,sparqlTokenMap.Semicolon,sparqlTokenMap.PNAME_NS,sparqlTokenMap.A,sparqlTokenMap.PREFIX,sparqlTokenMap.BASE,sparqlTokenMap.PNAME_LN,sparqlTokenMap.BLANK_NODE_LABEL,turtleTokenMap.TTL_BASE,turtleTokenMap.TTL_PREFIX,sparqlTokenMap.LANGTAG,turtleTokenMap.DOUBLE,turtleTokenMap.DECIMAL,sparqlTokenMap.Period,sparqlTokenMap.DoubleCaret,turtleTokenMap.LEmbed,turtleTokenMap.REmbed,turtleTokenMap.IRIREF,turtleTokenMap.STRING_LITERAL_LONG_SINGLE_QUOTE,turtleTokenMap.STRING_LITERAL_LONG_QUOTE,turtleTokenMap.STRING_LITERAL_QUOTE,turtleTokenMap.STRING_LITERAL_SINGLE_QUOTE,turtleTokenMap.INTEGER,turtleTokenMap.EXPONENT,turtleTokenMap.PLX,sparqlTokenMap.PERCENT,turtleTokenMap.HEX,turtleTokenMap.PN_CHARS_BASE,turtleTokenMap.PN_CHARS_U,turtleTokenMap.PN_CHARS,turtleTokenMap.PN_PREFIX,turtleTokenMap.PN_LOCAL,turtleTokenMap.PN_LOCAL_ESC,turtleTokenMap.ECHAR,turtleTokenMap.UCHAR,turtleTokenMap.UNKNOWN,]

Const turtleTokens

turtleTokens: any = require('./tokens')

Const unescapedIri

unescapedIri: RegExp = /^<([^\x00-\x20<>\\"\{\}\|\^\`]*)>[ \t]*/

Const unescapedStringLiteralQuote

unescapedStringLiteralQuote: RegExp = /^"([^"\\\r\n]+)"/

Const unescapedStringLiteralSingleQuote

unescapedStringLiteralSingleQuote: RegExp = /^'([^'\\\r\n]+)'/

Const unicodeRegexp

unicodeRegexp: RegExp = /[\0-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/

Const upperLowerIfFixtures

upperLowerIfFixtures: object = ['IF', 'if', 'iF', 'If'].reduce((accumulator, val, index) => ({...accumulator,['upperLowerIf' + index]:'PREFIX test: <http://test.com/test/0.1/>\n' +val +' {\n' + // If' ?X test:hasSibling ?Y . ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n',}),{})

Type declaration

Const xsdIriNamespace

xsdIriNamespace: "http://www.w3.org/2001/XMLSchema#" = "http://www.w3.org/2001/XMLSchema#"

Const xsdLocalNames

xsdLocalNames: Object = getAsTypedTuple('boolean','integer','string','date','dateTime','anyURI')

Const xsdUnprefixedTokenMap

xsdUnprefixedTokenMap: object = xsdLocalNames.reduce((tokenMap, localName) => {const tokenName = `SHACL_xsd_${localName}`; // category token nameconst iriTokenName = `${tokenName}_IRI`; // IRI token name// Category token that will ultimately select either an XSD IRI or an XSD PN_LOCAL:const iriOrPrefixCategoryToken = createToken({name: tokenName,pattern: Lexer.NA,});return {...tokenMap,[tokenName]: iriOrPrefixCategoryToken,[iriTokenName]: createToken({name: iriTokenName,pattern: `<${xsdIriNamespace}${localName}`,categories: [iriOrPrefixCategoryToken, turtleTokenMap.IRIREF],}),};}, {})

Type declaration

Functions

Const _traverse

  • _traverse(root: CstElement, ctx?: ITraverseContext, visit: function, visitSafely?: boolean): void
  • Parameters

    • root: CstElement
    • Default value ctx: ITraverseContext = new TraverseContext({ node: root })
    • visit: function
        • Parameters

          • ctx: ITraverseContext
          • Optional next: function
              • (nextCtx?: any): void
              • Parameters

                • Optional nextCtx: any

                Returns void

          Returns void

    • Default value visitSafely: boolean = true

    Returns void

addIfClauseErrorsToErrors

  • addIfClauseErrorsToErrors(__namedParameters: object): object
  • Parameters

    • __namedParameters: object
      • cst: IToken | CstNode
      • errors: IRecognitionException[]
      • fullCtx: ITraverseContext
      • namespacesMap: object
      • semanticErrors: IRecognitionException[]

    Returns object

    • errors: IRecognitionException[]
    • semanticErrors: IRecognitionException[]

Const addPredicatesAndTypesToShape

  • addPredicatesAndTypesToShape(shape: ShaclShape, shaclRulePredicateObjectListNodes: CstElement[]): void

addThenClauseErrorsToErrors

  • addThenClauseErrorsToErrors(__namedParameters: object): object
  • Parameters

    • __namedParameters: object
      • cst: IToken | CstNode
      • errors: IRecognitionException[]
      • fullCtx: ITraverseContext
      • namespacesMap: object
      • semanticErrors: IRecognitionException[]

    Returns object

    • errors: IRecognitionException[]
    • semanticErrors: IRecognitionException[]

Const createAndPushPunctuator

  • createAndPushPunctuator(config: ITokenConfig): TokenType

Const createAndPushToken

  • createAndPushToken(config: ITokenConfig): TokenType

Const createAndPushTokenWithNameAlt

  • createAndPushTokenWithNameAlt(config: ITokenConfig): TokenType

Const createKeyword

  • createKeyword(__namedParameters: object): TokenType
  • createKeyword(__namedParameters: object): TokenType

Const defaultEarlyAbortTest

  • defaultEarlyAbortTest(): boolean

Const explicitEndMatcher

  • explicitEndMatcher(textToMatch: string, endCandidateChar: string, endMatcher: RegExp): RegExpExecArray
  • Parameters

    • textToMatch: string
    • endCandidateChar: string
    • endMatcher: RegExp

    Returns RegExpExecArray

findAndSwapPlaceholders

Const getAllBadStandardFixtures

  • getAllBadStandardFixtures(): Promise<object[]>

Const getAllBadStardogFixtures

  • getAllBadStardogFixtures(): Promise<object[]>

Const getAllFileContents

  • getAllFileContents(): Promise<Object>
  • getAllFileContents(pathToFiles: any): FileAndContents[]

Const getAllGoodFixtures

  • getAllGoodFixtures(): Promise<string[]>
  • getAllGoodFixtures(): Promise<object[]>

getArgumentNodes

  • getArgumentNodes(argumentDictionary: CstChildrenDictionary): CstNode[]
  • Parameters

    • argumentDictionary: CstChildrenDictionary

    Returns CstNode[]

getArgumentTokenTypesForDirectiveNameToken

  • getArgumentTokenTypesForDirectiveNameToken(directiveNameToken: IToken): TokenType[]

Const getAsTypedTuple

  • getAsTypedTuple<T>(...args: T): T

getCustomErrorRuleStack

Const getCustomIRecognitionException

  • getCustomIRecognitionException(__namedParameters: object): object
  • Parameters

    • __namedParameters: object
      • message: string
      • name: string
      • node: IToken
      • ruleStack: string[]

    Returns object

    • message: string
    • name: string
    • resyncedTokens: undefined[]
    • token: IToken
    • context: object
      • ruleOccurrenceStack: undefined[]
      • ruleStack: string[]

Const getDisallowedLiteralError

Const getDisallowedTokenError

  • Parameters

    Returns object

    • message: string
    • name: string
    • resyncedTokens: undefined[]
    • token: IToken
    • context: object
      • ruleOccurrenceStack: undefined[]
      • ruleStack: string[]

getFirstChildCstElementByRuleStack

  • getFirstChildCstElementByRuleStack(ruleStack: string[], rootCstNode: CstNode): CstElement | undefined

Const getLocalName

  • getLocalName(iri: string, matcher: RegExp): string

Const getNoPrefixError

Const getShaclLocalNameMatcher

  • getShaclLocalNameMatcher(shaclPrefix: string): RegExp

Const getShaclShapeFromBlankNodePropertyList

  • getShaclShapeFromBlankNodePropertyList(ctx: any): object
  • Parameters

    • ctx: any

    Returns object

    • predicates: undefined[]
    • types: undefined[]
    • subject: object
      • token: any
      • type: string

Const getShaclVisitor

Const getSnapshotObj

  • getSnapshotObj(input: object): any

Const getSparqlSrsVisitor

Const getStardogGraphQlVisitor

Const getUnderlyingStartToken

  • getUnderlyingStartToken(ctx: CstNode): any

isCstNode

  • isCstNode(object: CstElement): boolean

Const isCstNodeTraverseContext

  • isCstNodeTraverseContext(ctx: any): boolean

isIToken

  • isIToken(object: CstElement): boolean

Const isParentBindOrBoundExpressionOrEmbeddedTriplePattern

  • isParentBindOrBoundExpressionOrEmbeddedTriplePattern(parentCtx: any): boolean

isSparqlReceivingStardogDirective

  • isSparqlReceivingStardogDirective(directiveToken: IToken): boolean

Const jsonStringifyReplacer

  • jsonStringifyReplacer(key: string, value: any): any

log

  • log(...args: any[]): void

Const makeExpectExtensionForParse

  • makeExpectExtensionForParse(parse: parse): object

Const makePrefixer

  • makePrefixer(prefix: string): (Anonymous function)

mapSparqlErrors

  • mapSparqlErrors(sparqlErrors: IRecognitionException[], tokenForOffset: IToken, offsetPadding?: number): object[]
  • Parameters

    • sparqlErrors: IRecognitionException[]
    • tokenForOffset: IToken
    • Default value offsetPadding: number = 0

    Returns object[]

Const parse

  • parse(doc: string, rule: Function): any
  • parse(doc: string, rule: Function): any
  • parse(doc: string, rule: Function): any
  • parse(doc: string, rule: Function): any

parseSparqlExpression

  • parseSparqlExpression(stringValueToken: IToken, stardogSparqlParser: StardogSparqlParser): object

Const readDirAsync

  • readDirAsync(pathName: any): Promise<string[]>

Const readFileAsync

  • readFileAsync(filePath: any): Promise<string>

reduceVisitorItemErrors

  • reduceVisitorItemErrors(acc: IRecognitionException[], item: SparqlSrsVisitorItem): IRecognitionException[]

Const reverseSort

  • reverseSort(a: any, b: any): 0 | 1 | -1

Const testAllFilesInDirectory

  • testAllFilesInDirectory(directoryPath: string, parser: TrigParser, parseMode: ModeString): Promise<void[]>

Const testFilesInDirectory

  • testFilesInDirectory(directoryPath: string, parser: SrsParser, parseMode: ModeString, filenameFilter: function): Promise<void[]>
  • testFilesInDirectory(directoryPath: string, parser: TurtleParser, parseMode: ModeString, filenameFilter: function): Promise<void[]>

Const traverse

  • traverse(root: CstElement, visit: function): void

Const unescape

  • unescape(item: string): string

Const unsafeTraverse

  • unsafeTraverse(root: CstElement, visit: function): void

Const upperLowerThenFixtures

  • upperLowerThenFixtures(): object

validateDirectiveArguments

  • validateDirectiveArguments(__namedParameters: object): void

validateDirectiveArgumentsArity

  • validateDirectiveArgumentsArity(__namedParameters: object): void
  • Parameters

    • __namedParameters: object
      • allowedArgumentTokenTypes: TokenType[]
      • directiveImage: string | RegExp
      • errorAccumulator: ErrorAccumulator
      • numMinimumArguments: number
      • suppliedArgumentNodes: CstNode[]

    Returns void

validateDirectiveArgumentsNameAndValue

  • validateDirectiveArgumentsNameAndValue(__namedParameters: object): void

validateSuppliedArgumentsForDirective

Object literals

Const categoryTokenMap

categoryTokenMap: object

AnyLiteralTakingPredicate

AnyLiteralTakingPredicate: TokenType = createToken({name: 'AnyLiteralTakingPredicate',pattern: Lexer.NA,})

BooleanTakingPredicate

BooleanTakingPredicate: TokenType = createToken({name: 'BooleanTakingPredicate',pattern: Lexer.NA,})

IntTakingPredicate

IntTakingPredicate: TokenType = createToken({name: 'IntTakingPredicate',pattern: Lexer.NA,})

LangStringTakingPredicate

LangStringTakingPredicate: TokenType = createToken({name: 'LangStringTakingPredicate',pattern: Lexer.NA,})

ManyIriTakingPredicate

ManyIriTakingPredicate: TokenType = createToken({name: 'ManyIriTakingPredicate',pattern: Lexer.NA,})

NodeKindIRI

NodeKindIRI: TokenType = createToken({name: 'NodeKindIRI',pattern: Lexer.NA,})

ShapeExpectingPredicate

ShapeExpectingPredicate: TokenType = createToken({name: 'ShapeExpectingPredicate',pattern: Lexer.NA,})

SingleIriTakingPredicate

SingleIriTakingPredicate: TokenType = createToken({name: 'SingleIriTakingPredicate',pattern: Lexer.NA,})

StringLiteralQuoteTakingPredicate

StringLiteralQuoteTakingPredicate: TokenType = createToken({name: 'StringLiteralQuoteTakingPredicate',pattern: Lexer.NA,})

Const disallowedSparqlTokenNameToRuleMap

disallowedSparqlTokenNameToRuleMap: object

__computed

__computed: string = "RightEmbed"

Const escapeReplacements

escapeReplacements: object

!

!: string = "!"

"

": string = """

#

#: string = "#"

$

$: string = "$"

%

%: string = "%"

&

&: string = "&"

'

': string = "'"

(

(: string = "("

)

): string = ")"

*

*: string = "*"

+

+: string = "+"

,

,: string = ","

-

-: string = "-"

.

.: string = "."

/

/: string = "/"

;

;: string = ";"

=

=: string = "="

?

?: string = "?"

@

@: string = "@"

\

\: string = "\"

_

_: string = "_"

b

b: string = ""

f

f: string = " "

n

n: string = ""

r

r: string = " "

t

t: string = " "

~

~: string = "~"

Const fixtures

fixtures: object

bind

bind: string = `# stardog.java misc.smsPREFIX : <http://example.com/>MAPPING <urn:misc>FROM JSON {{"datasets" : {"?datasetName" : { "?datasetKey" : "?datasetVal" },"dataset1" : { "x" : "?dataset1x" }},"bind_chain" : "?bindChainRoot","bind_chain_str" : "?bindChainStr"}}TO {?datasetIri a :Dataset ;:formalName ?formalName ;?datasetProp ?datasetVal .# Constant in the subject position<urn:root> :bindChain ?bindChainFinal ;# shouldn't show up:missing ?missing ;:templateExprArg ?templateExprArg ;:dataset1x ?dataset1x .}WHERE {# chain of binds with multiple variable dependenciesbind(xsd:integer(?bindChainRoot) as ?bindChainInt)bind(?bindChainInt + 1 as ?bindChainAdded)bind(str(?bindChainAdded) as ?bindChainAddedStr)bind(concat(?bindChainStr, ?bindChainAddedStr) as ?bindChainFinal)# template with expr argsbind(template("http://example.com/something/{bindChainAddedStr}") as ?templateExprArg)bind(template("http://example.com/dataset/{datasetName}") as ?datasetIri)bind(template("http://example.com/dataset/{datasetKey}") as ?datasetProp)# Regex with escaped charsbind(replace(?datasetName, "dataset(\\\\d)", "This is #$1") as ?formalName)};# issue 49MAPPINGFROM SQL {SELECT *FROM \`cardb\`.\`car\`}TO {?subject <http://api.stardog.com/car#brand> ?brand .?subject <http://api.stardog.com/car#color> ?color .?subject <http://api.stardog.com/car#id> ?id_integer .?subject <http://api.stardog.com/car#model> ?model .?subject <http://api.stardog.com/car#owner> ?owner_integer .?subject <http://api.stardog.com/car#price> ?price_integer .?subject <http://api.stardog.com/car#ref-owner> ?ref_owner .?subject <http://api.stardog.com/car#register_number> ?register_number .?subject <http://api.stardog.com/car#year> ?year_integer .?subject rdf:type :car} WHERE {BIND(StrDt(?id, xsd:integer) AS ?id_integer)BIND(StrDt(?owner, xsd:integer) AS ?owner_integer)BIND(StrDt(?price, xsd:integer) AS ?price_integer)BIND(StrDt(?year, xsd:integer) AS ?year_integer)BIND(template("http://api.stardog.com/car/id={id}") AS ?subject)BIND(template("http://api.stardog.com/owner/owner_id={owner}") AS ?ref_owner)};# valid matchingsMAPPINGFROM SQL {SELECT *FROM sms_bind}TO {?subject <http://api.stardog.com/sms_bind#strdt> ?strdt .?subject <http://api.stardog.com/sms_bind#strlang> ?strlang .?subject <http://api.stardog.com/sms_bind#template> ?template .?subject <http://api.stardog.com/sms_bind#iri_func> ?iri_func .?subject <http://api.stardog.com/sms_bind#iri> ?iri .?subject rdf:type :sms_bind} WHERE {BIND(StrDt(?id, xsd:integer) AS ?strdt)BIND(StrLang("Hello", "en") AS ?strlang)BIND(iri(?url) AS ?iri_func)BIND(<http://example.com> as ?iri)BIND(template("http://api.stardog.com/sms_bind/template={id}") AS ?template)}`

comments

comments: string = `# some commentmapping <spooky># some commentfrom json {sd}to { ?s a :Thing }# some commentwhere {}`

edgePropertiesEmbeddedPropertyList

edgePropertiesEmbeddedPropertyList: string = `PREFIX emp: <http://example.com/emp>MAPPINGFROM SQL {select * from employees}TO {?emp a { :since 2010 } emp:Employee .}WHERE {BIND(template("http://employee/{emp_no}") as ?emp)}`

edgePropertiesEmbeddedPropertyList2

edgePropertiesEmbeddedPropertyList2: string = `# Employees mappings with edge properties using Stardog syntaxPREFIX emp: <http://example.com/emp/>MAPPINGFROM SQL {select * from employees}TO {?emp a emp:Employee .?emp emp:firstName {emp:since "?birth_date"^^xsd:date ;emp:justAnotherFirstNameEdgeProp "?hire_date"^^xsd:date ;emp:beginsAt "?birth_date"^^xsd:date} ?first_name .?emp emp:lastName { emp:beginsAt "?birth_date"^^xsd:date } ?last_name .?emp emp:worksFor { emp:beginsAt "?hire_date"^^xsd:date } emp:TheCompany .}WHERE {BIND(template("http://employee/{emp_no}") as ?emp)}`

edgePropertiesEmbeddedTriples

edgePropertiesEmbeddedTriples: string = `PREFIX emp: <http://example.com/emp>MAPPINGFROM SQL {select * from employees}TO {?emp a emp:Employee .<<?emp emp:firstName ?first_name>> emp:since "?birth_date"^^xsd:date .}WHERE {BIND(template("http://employee/{emp_no}") as ?emp)}`

edgePropertiesEmbeddedTriples2

edgePropertiesEmbeddedTriples2: string = `# Employees mappings with edge propertiesPREFIX emp: <http://example.com/emp/>MAPPINGFROM SQL {select * from employees}TO {?emp a emp:Employee .# TODO: Should we avoid duplication of implicit ordinary scans?<<?emp emp:firstName ?first_name>> emp:since "?birth_date"^^xsd:date .<<?emp emp:firstName ?first_name>> emp:justAnotherFirstNameEdgeProp "?hire_date"^^xsd:date .<<?emp emp:firstName ?first_name>> emp:beginsAt "?birth_date"^^xsd:date .<<?emp emp:lastName ?last_name>> emp:beginsAt "?birth_date"^^xsd:date .<<?emp emp:worksFor emp:TheCompany>> emp:beginsAt "?hire_date"^^xsd:date .}WHERE {BIND(template("http://employee/{emp_no}") as ?emp)}`

emptyWhere

emptyWhere: string = `mapping <spooky>from json {}to { ?s a :Thing }where {}`

graphQlMapping

graphQlMapping: string = `mapping <spooky>from graphql {spooker {movieId: _idtitleplotratingString: ratingrt: rottenTomatoesdate: release_datecast @array {actorId: idactorName: namecharacterName: role}directors @array {directorId: iddirectorName: nameage}genre: genres @array}}to { $s a :Thing }where {}`

jsonMapping

jsonMapping: string = `mapping <spooky>from json {"spooker": {"ghoul": "?ghastly","goblin": "?macabre"}}to { $s a :Thing }where {}`

multipleJsonWithComments

multipleJsonWithComments: string = `# SMS2 file with multiple mappings# first, movies onlyPREFIX tt: <http://www.imdb.com/title/>PREFIX nm: <http://www.imdb.com/name/>PREFIX : <http://example.com/>MAPPING <urn:moviesOnly>FROM JSON {"movies":{"_id": "?movieId","title": "?title"}}TO {?movie a :Movie ;rdfs:label ?title ;}WHERE {BIND (template("http://www.imdb.com/title/{movieId}") AS ?movie)};# directorsMAPPING <urn:directors>FROM JSON {"movies":{"_id": "?movieId","directors": [{ "id": "?directorId", "name": "?directorName" , "age": "?age" }]}}TO {?director a :Director ;:directed ?movie ;rdfs:label ?directorName ;}WHERE {BIND (template("http://www.imdb.com/title/{movieId}") AS ?movie)BIND (template("http://www.imdb.com/name/{directorId}") AS ?director)};# actors, no mapping nameMAPPING <someIri>FROM JSON {"movies":{"_id": "?movieId","cast": [{ "id": "?actorId", "name": "?actorName", "role": "?characterName" }]}}TO {?actor a :Actor ;:starredIn ?movie ;:actorName ?actorName ;}WHERE {BIND (template("http://www.imdb.com/title/{movieId}") AS ?movie)BIND (template("http://www.imdb.com/name/{actorId}") AS ?actor)}`

nonTerminatedSqlBlock

nonTerminatedSqlBlock: string = `MAPPINGFROM SQL { SELECT *FROM reviewTO {?subject <http://api.stardog.com/review#language> ?language .?subject <http://api.stardog.com/review#nr> ?nr .?subject <http://api.stardog.com/review#person> ?person .?subject <http://api.stardog.com/review#producer> ?producer .?subject <http://api.stardog.com/review#product> ?product .?subject <http://api.stardog.com/review#publishDate> ?publishDate .?subject <http://api.stardog.com/review#publisher> ?publisher .?subject <http://api.stardog.com/review#rating1> ?rating1 .?subject <http://api.stardog.com/review#rating2> ?rating2 .?subject <http://api.stardog.com/review#rating3> ?rating3 .?subject <http://api.stardog.com/review#rating4> ?rating4 .?subject <http://api.stardog.com/review#reviewDate> ?reviewDate .?subject <http://api.stardog.com/review#text> ?text .?subject <http://api.stardog.com/review#title> ?title .?subject rdf:type :review} WHERE {BIND(template("http://api.stardog.com/review/nr={nr}") AS ?subject)BIND(xsd:date(?publishDate) AS ?publishDate)BIND(xsd:dateTime(?reviewDate) AS ?reviewDate)BIND(xsd:integer(?nr) AS ?nr)BIND(xsd:integer(?person) AS ?person)BIND(xsd:integer(?producer) AS ?producer)BIND(xsd:integer(?product) AS ?product)BIND(xsd:integer(?publisher) AS ?publisher)BIND(xsd:integer(?rating1) AS ?rating1)BIND(xsd:integer(?rating2) AS ?rating2)BIND(xsd:integer(?rating3) AS ?rating3)BIND(xsd:integer(?rating4) AS ?rating4)}`

plainStringInTemplate

plainStringInTemplate: string = `mapping <spooky>from json {}to { ?s a :Thing }where { template('spooky') }`

prefixDecls

prefixDecls: string = `PREFIX tt: <http://www.imdb.com/title/>PREFIX nm: <http://www.imdb.com/name/>PREFIX : <http://example.com/>mapping <spooky>from json {sd}to { ?s a :Thing }where {}`

sqlMapping

sqlMapping: string = `mapping <spooky>from sql {select * from tableName}to { $s a :Thing }where {}`

var2

var2: string = `mapping <spooky>from json {}to { $s a :Thing }where {}`

csvMapping

csvMapping: object

brace

brace: string = `prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>prefix xsd: <http://www.w3.org/2001/XMLSchema#>prefix gr: <http://purl.org/goodrelations/v1#>prefix foaf: <http://xmlns.com/foaf/0.1/>prefix dbpedia: <http://dbpedia.org/resource/>prefix vso: <http://purl.org/vso/ns#>prefix : <http://example.org/cars#>mapping <urn:mapping>from csv {}to {?manufacturer a gr:BusinessEntity ;rdfs:label ?Make .?model_iri a gr:ProductOrServiceModel ;rdfs:label ?model_string ;gr:hasManufacturer ?make_iri .?car_iri a vso:Automobile, gr:ActualProductOrServiceInstance ;rdfs:label ?car_label ;gr:hasManufacturer ?make_hash_iri ;gr:hasMakeAndModel ?model_iri ;vso:modelDate ?model_date .?offer_iri a gr:Offering ;rdfs:comment ?Description ;gr:includes ?car_iri ;gr:hasBusinessFunction gr:Sell ;gr:hasPriceSpecification ?price_bnode .?price_bnode a gr:UnitPriceSpecification ;gr:hasCurrency "USD"^^xsd:string ;gr:hasCurrencyValue ?price_float .}where {bind(template("http://example.org/cars#Manufacturer-{Make}") as ?manufacturer)bind(sha1(?Model) as ?model_hash)bind(template("http://example.org/cars#Model-{model_hash}") as ?model_iri)bind(concat(?Make, " ", ?Model) as ?model_string)bind(template("http://example.org/cars#Manufacturer-{Make}") as ?make_iri)bind(template("http://example.org/cars#Car-{_ROW_NUMBER_}") as ?car_iri)bind(concat(?Make, " ", ?Model, " (", ?Year, ")") as ?car_label)bind(sha1(?Make) as ?make_hash)bind(template("http://example.org/cars#Manufacturer-{make_hash}") as ?make_hash_iri)bind(xsd:date(concat(?Year, "-01-01")) as ?model_date)bind(template("http://example.org/cars#Offer-{_ROW_NUMBER_}") as ?offer_iri)bind(bnode() as ?price_bnode)bind(xsd:float(?Price) AS ?price_float)}`

no_brace

no_brace: string = `# x,y,name,unusedmappingfrom csvto {?subj a :Thing ;:name ?name ;:expr2 ?expr2 ;.}where {bind(coalesce(?x, ?y) as ?id)bind(if(bound(?x), concat("x + 1 = ", str(integer(?x) + 1)), "x is not bound") as ?expr2)bind(template("http://example.com/{id}") as ?subj)}`

valid

valid: object

Const graphQlTokenMap

graphQlTokenMap: object

Name

Name: TokenType

Const graphQlUtils

graphQlUtils: object

getArgumentNodes

getArgumentNodes: getArgumentNodes

getArgumentTokenTypesForDirectiveNameToken

getArgumentTokenTypesForDirectiveNameToken: getArgumentTokenTypesForDirectiveNameToken

isSparqlReceivingStardogDirective

isSparqlReceivingStardogDirective: isSparqlReceivingStardogDirective

Const ignoredTokens

ignoredTokens: object

Comma

Comma: TokenType = createAndPushToken({name: 'Comma',pattern: ',',group: Lexer.SKIPPED,})

Comment

Comment: TokenType = createAndPushToken({name: 'Comment',pattern: /#[^\n\r]*/,group: Lexer.SKIPPED,})

LineTerminator

LineTerminator: TokenType = createAndPushToken({name: 'LineTerminator',pattern: /\n\r|\r|\n/,group: Lexer.SKIPPED,})

UnicodeBOM

UnicodeBOM: TokenType = createAndPushToken({name: 'UnicodeBOM',pattern: '\\uFFFE',group: Lexer.SKIPPED,})

WhiteSpace

WhiteSpace: TokenType = createAndPushToken({name: 'WhiteSpace',pattern: /[ \t]+/,group: Lexer.SKIPPED,})

Const keywords

keywords: object

A

A: TokenType = createKeyword({ name: 'A', pattern: /a/ })

ABS

ABS: TokenType = createKeyword({ name: 'ABS' })

ADD

ADD: TokenType = createKeyword({ name: 'ADD' })

ALL

ALL: TokenType = createKeyword({ name: 'ALL' })

ARGUMENT_DEFINITION

ARGUMENT_DEFINITION: TokenType = createAndPushTokenWithNameAlt({name: 'ARGUMENT_DEFINITION',pattern: 'ARGUMENT_DEFINITION',})

AS

AS: TokenType = createKeyword({ name: 'AS' })

ASC

ASC: TokenType = createKeyword({ name: 'ASC' })

ASK

ASK: TokenType = createKeyword({ name: 'ASK' })

AVG

AVG: TokenType = createKeyword({ name: 'AVG' })

BASE

BASE: TokenType = createKeyword({ name: 'BASE' })

BIND

BIND: TokenType = createKeyword({ name: 'BIND' })

BNODE

BNODE: TokenType = createKeyword({ name: 'BNODE' })

BOUND

BOUND: TokenType = createKeyword({ name: 'BOUND' })

BY

BY: TokenType = createKeyword({ name: 'BY' })

CEIL

CEIL: TokenType = createKeyword({ name: 'CEIL' })

CLEAR

CLEAR: TokenType = createKeyword({ name: 'CLEAR' })

COALESCE

COALESCE: TokenType = createKeyword({ name: 'COALESCE' })

CONCAT

CONCAT: TokenType = createKeyword({ name: 'CONCAT' })

CONSTRUCT

CONSTRUCT: TokenType = createKeyword({ name: 'CONSTRUCT' })

CONTAINS

CONTAINS: TokenType = createKeyword({ name: 'CONTAINS' })

COPY

COPY: TokenType = createKeyword({ name: 'COPY' })

COUNT

COUNT: TokenType = createKeyword({ name: 'COUNT' })

CREATE

CREATE: TokenType = createKeyword({ name: 'CREATE' })

CYCLIC

CYCLIC: TokenType = createKeyword({ name: 'CYCLIC' })

DATATYPE

DATATYPE: TokenType = createKeyword({ name: 'DATATYPE' })

DAY

DAY: TokenType = createKeyword({ name: 'DAY' })

DEFAULT

DEFAULT: TokenType = createKeyword({ name: 'DEFAULT' })

DELETE

DELETE: TokenType = createKeyword({ name: 'DELETE' })

DELETE_DATA

DELETE_DATA: TokenType = createKeyword({name: 'DELETE_DATA',pattern: /DELETE +DATA/i,})

DELETE_WHERE

DELETE_WHERE: TokenType = createKeyword({name: 'DELETE_WHERE',pattern: /DELETE +WHERE/i,})

DESC

DESC: TokenType = createKeyword({ name: 'DESC' })

DESCRIBE

DESCRIBE: TokenType = createKeyword({ name: 'DESCRIBE' })

DISTINCT

DISTINCT: TokenType = createKeyword({ name: 'DISTINCT' })

DROP

DROP: TokenType = createKeyword({ name: 'DROP' })

DirectiveToken

DirectiveToken: TokenType = createAndPushTokenWithNameAlt({name: 'DirectiveToken',pattern: 'directive',})

ENCODE_FOR_URI

ENCODE_FOR_URI: TokenType = createKeyword({ name: 'ENCODE_FOR_URI' })

END

END: TokenType = createKeyword({ name: 'END' })

ENUM

ENUM: TokenType = createAndPushTokenWithNameAlt({ name: 'ENUM', pattern: 'ENUM' })

ENUM_VALUE

ENUM_VALUE: TokenType = createAndPushTokenWithNameAlt({name: 'ENUM_VALUE',pattern: 'ENUM_VALUE',})

EXISTS

EXISTS: TokenType = createKeyword({ name: 'EXISTS' })

Enum

Enum: TokenType = createAndPushTokenWithNameAlt({name: 'Enum',pattern: 'enum',})

Extend

Extend: TokenType = createAndPushTokenWithNameAlt({name: 'Extend',pattern: 'extend',})

FALSE

FALSE: TokenType = createKeyword({ name: 'FALSE' })

FIELD

FIELD: TokenType = createAndPushTokenWithNameAlt({ name: 'FIELD', pattern: 'FIELD' })

FIELD_DEFINITION

FIELD_DEFINITION: TokenType = createAndPushTokenWithNameAlt({name: 'FIELD_DEFINITION',pattern: 'FIELD_DEFINITION',})

FILTER

FILTER: TokenType = createKeyword({ name: 'FILTER' })

FLOOR

FLOOR: TokenType = createKeyword({ name: 'FLOOR' })

FRAGMENT_DEFINITION

FRAGMENT_DEFINITION: TokenType = createAndPushTokenWithNameAlt({name: 'FRAGMENT_DEFINITION',pattern: 'FRAGMENT_DEFINITION',})

FRAGMENT_SPREAD

FRAGMENT_SPREAD: TokenType = createAndPushTokenWithNameAlt({name: 'FRAGMENT_SPREAD',pattern: 'FRAGMENT_SPREAD',})

FROM

FROM: TokenType = createKeyword({ name: 'FROM' })

Fragment

Fragment: TokenType = createAndPushTokenWithNameAlt({name: 'Fragment',pattern: 'fragment',})

GRAPH

GRAPH: TokenType = createKeyword({ name: 'GRAPH' })

GROUP_BY

GROUP_BY: TokenType = createKeyword({name: 'GROUP_BY',pattern: /GROUP BY/i,})

GROUP_CONCAT

GROUP_CONCAT: TokenType = createKeyword({ name: 'GROUP_CONCAT' })

HAVING

HAVING: TokenType = createKeyword({ name: 'HAVING' })

HOURS

HOURS: TokenType = createKeyword({ name: 'HOURS' })

IF

IF: TokenType = createKeyword({ name: 'IF' })

IN

IN: TokenType = createKeyword({ name: 'IN' })

INLINE_FRAGMENT

INLINE_FRAGMENT: TokenType = createAndPushTokenWithNameAlt({name: 'INLINE_FRAGMENT',pattern: 'INLINE_FRAGMENT',})

INPUT_FIELD_DEFINITION

INPUT_FIELD_DEFINITION: TokenType = createAndPushTokenWithNameAlt({name: 'INPUT_FIELD_DEFINITION',pattern: 'INPUT_FIELD_DEFINITION',})

INPUT_OBJECT

INPUT_OBJECT: TokenType = createAndPushTokenWithNameAlt({name: 'INPUT_OBJECT',pattern: 'INPUT_OBJECT',})

INSERT

INSERT: TokenType = createKeyword({ name: 'INSERT' })

INSERT_DATA

INSERT_DATA: TokenType = createKeyword({name: 'INSERT_DATA',pattern: /INSERT +DATA/i,})

INTERFACE

INTERFACE: TokenType = createAndPushTokenWithNameAlt({name: 'INTERFACE',pattern: 'INTERFACE',})

INTO

INTO: TokenType = createKeyword({ name: 'INTO' })

IRI

IRI: TokenType = createKeyword({ name: 'IRI' })

Implements

Implements: TokenType = createAndPushTokenWithNameAlt({name: 'Implements',pattern: 'implements',})

Input

Input: TokenType = createAndPushTokenWithNameAlt({name: 'Input',pattern: 'input',})

Interface

Interface: TokenType = createAndPushTokenWithNameAlt({name: 'Interface',pattern: 'interface',})

LANG

LANG: TokenType = createKeyword({ name: 'LANG' })

LANGMATCHES

LANGMATCHES: TokenType = createKeyword({ name: 'LANGMATCHES' })

LCASE

LCASE: TokenType = createKeyword({ name: 'LCASE' })

LIMIT

LIMIT: TokenType = createKeyword({ name: 'LIMIT' })

LOAD

LOAD: TokenType = createKeyword({ name: 'LOAD' })

MAX

MAX: TokenType = createKeyword({ name: 'MAX' })

MAX_LENGTH

MAX_LENGTH: TokenType = createKeyword({name: 'MAX_LENGTH',pattern: /MAX LENGTH/i,})

MD5

MD5: TokenType = createKeyword({ name: 'MD5' })

MIN

MIN: TokenType = createKeyword({ name: 'MIN' })

MINUS

MINUS: TokenType = createKeyword({ name: 'MINUS' })

MINUTES

MINUTES: TokenType = createKeyword({ name: 'MINUTES' })

MONTH

MONTH: TokenType = createKeyword({ name: 'MONTH' })

MOVE

MOVE: TokenType = createKeyword({ name: 'MOVE' })

MUTATION

MUTATION: TokenType = createAndPushTokenWithNameAlt({name: 'MUTATION',pattern: 'MUTATION',})

Mutation

Mutation: TokenType = createAndPushTokenWithNameAlt({name: 'Mutation',pattern: 'mutation',})

NAMED

NAMED: TokenType = createKeyword({ name: 'NAMED' })

NOT_EXISTS

NOT_EXISTS: TokenType = createKeyword({name: 'NOT_EXISTS',pattern: /NOT EXISTS/i,})

NOT_IN

NOT_IN: TokenType = createKeyword({name: 'NOT_IN',pattern: /NOT IN/i,})

NOW

NOW: TokenType = createKeyword({ name: 'NOW' })

OBJECT

OBJECT: TokenType = createAndPushTokenWithNameAlt({ name: 'OBJECT', pattern: 'OBJECT' })

OFFSET

OFFSET: TokenType = createKeyword({ name: 'OFFSET' })

OPTIONAL

OPTIONAL: TokenType = createKeyword({ name: 'OPTIONAL' })

ORDER_BY

ORDER_BY: TokenType = createKeyword({name: 'ORDER_BY',pattern: /ORDER BY/i,})

On

On: TokenType = createAndPushTokenWithNameAlt({name: 'On',pattern: ON_PATTERN,})

PATHS

PATHS: TokenType = createKeyword({ name: 'PATHS' })

PATHS_ALL

PATHS_ALL: TokenType = createKeyword({name: 'PATHS_ALL',pattern: /PATHS ALL/i,})

PATHS_SHORTEST

PATHS_SHORTEST: TokenType = createKeyword({name: 'PATHS_SHORTEST',pattern: /PATHS SHORTEST/i,})

PER

PER: TokenType = createKeyword({ name: 'PER' })

PREFIX

PREFIX: TokenType = createKeyword({ name: 'PREFIX' })

QUERY

QUERY: TokenType = createAndPushTokenWithNameAlt({ name: 'QUERY', pattern: 'QUERY' })

Query

Query: TokenType = createAndPushTokenWithNameAlt({name: 'Query',pattern: 'query',})

RAND

RAND: TokenType = createKeyword({ name: 'RAND' })

REDUCED

REDUCED: TokenType = createKeyword({ name: 'REDUCED' })

REGEX

REGEX: TokenType = createKeyword({ name: 'REGEX' })

REPLACE

REPLACE: TokenType = createKeyword({ name: 'REPLACE' })

ROUND

ROUND: TokenType = createKeyword({ name: 'ROUND' })

SAMPLE

SAMPLE: TokenType = createKeyword({ name: 'SAMPLE' })

SCALAR

SCALAR: TokenType = createAndPushTokenWithNameAlt({ name: 'SCALAR', pattern: 'SCALAR' })

SCHEMA

SCHEMA: TokenType = createAndPushTokenWithNameAlt({ name: 'SCHEMA', pattern: 'SCHEMA' })

SECONDS

SECONDS: TokenType = createKeyword({ name: 'SECONDS' })

SELECT

SELECT: TokenType = createKeyword({ name: 'SELECT' })

SEPARATOR

SEPARATOR: TokenType = createKeyword({ name: 'SEPARATOR' })

SERVICE

SERVICE: TokenType = createKeyword({ name: 'SERVICE' })

SHA1

SHA1: TokenType = createKeyword({ name: 'SHA1' })

SHA256

SHA256: TokenType = createKeyword({ name: 'SHA256' })

SHA384

SHA384: TokenType = createKeyword({ name: 'SHA384' })

SHA512

SHA512: TokenType = createKeyword({ name: 'SHA512' })

SHAPE

SHAPE: TokenType = createKeyword({ name: 'SHAPE' })

SHAPES

SHAPES: TokenType = createKeyword({ name: 'SHAPES' })

SILENT

SILENT: TokenType = createKeyword({ name: 'SILENT' })

START

START: TokenType = createKeyword({ name: 'START' })

STR

STR: TokenType = createKeyword({ name: 'STR' })

STRAFTER

STRAFTER: TokenType = createKeyword({ name: 'STRAFTER' })

STRBEFORE

STRBEFORE: TokenType = createKeyword({ name: 'STRBEFORE' })

STRDT

STRDT: TokenType = createKeyword({ name: 'STRDT' })

STRENDS

STRENDS: TokenType = createKeyword({ name: 'STRENDS' })

STRLANG

STRLANG: TokenType = createKeyword({ name: 'STRLANG' })

STRLEN

STRLEN: TokenType = createKeyword({ name: 'STRLEN' })

STRSTARTS

STRSTARTS: TokenType = createKeyword({ name: 'STRSTARTS' })

STRUUID

STRUUID: TokenType = createKeyword({ name: 'STRUUID' })

SUBSCRIPTION

SUBSCRIPTION: TokenType = createAndPushTokenWithNameAlt({name: 'SUBSCRIPTION',pattern: 'SUBSCRIPTION',})

SUBSTR

SUBSTR: TokenType = createKeyword({ name: 'SUBSTR' })

SUM

SUM: TokenType = createKeyword({ name: 'SUM' })

Scalar

Scalar: TokenType = createAndPushTokenWithNameAlt({name: 'Scalar',pattern: 'scalar',})

Schema

Schema: TokenType = createAndPushTokenWithNameAlt({name: 'Schema',pattern: 'schema',})

Subscription

Subscription: TokenType = createAndPushTokenWithNameAlt({name: 'Subscription',pattern: 'subscription',})

TIMEZONE

TIMEZONE: TokenType = createKeyword({ name: 'TIMEZONE' })

TO

TO: TokenType = createKeyword({ name: 'TO' })

TRUE

TRUE: TokenType = createKeyword({ name: 'TRUE' })

TZ

TZ: TokenType = createKeyword({ name: 'TZ' })

TypeToken

TypeToken: TokenType = createAndPushTokenWithNameAlt({name: 'TypeToken',pattern: 'type',})

UCASE

UCASE: TokenType = createKeyword({ name: 'UCASE' })

UNDEF

UNDEF: TokenType = createKeyword({ name: 'UNDEF' })

UNION

UNION: TokenType = createAndPushTokenWithNameAlt({ name: 'UNION', pattern: 'UNION' })

UNKNOWN

UNKNOWN: TokenType

UNNEST

UNNEST: TokenType = createKeyword({ name: 'UNNEST' })

URI

URI: TokenType = createKeyword({ name: 'URI' })

USING

USING: TokenType = createKeyword({ name: 'USING' })

UUID

UUID: TokenType = createKeyword({ name: 'UUID' })

Union

Union: TokenType = createAndPushTokenWithNameAlt({name: 'Union',pattern: 'union',})

VALIDATE

VALIDATE: TokenType = createKeyword({ name: 'VALIDATE' })

VALUES

VALUES: TokenType = createKeyword({ name: 'VALUES' })

VIA

VIA: TokenType = createKeyword({ name: 'VIA' })

WHERE

WHERE: TokenType = createKeyword({ name: 'WHERE' })

WITH

WITH: TokenType = createKeyword({ name: 'WITH' })

YEAR

YEAR: TokenType = createKeyword({ name: 'YEAR' })

isBLANK

isBLANK: TokenType = createKeyword({ name: 'isBLANK' })

isIRI

isIRI: TokenType = createKeyword({ name: 'isIRI' })

isLITERAL

isLITERAL: TokenType = createKeyword({ name: 'isLITERAL' })

isNUMERIC

isNUMERIC: TokenType = createKeyword({ name: 'isNUMERIC' })

isURI

isURI: TokenType = createKeyword({ name: 'isURI' })

sameTerm

sameTerm: TokenType = createKeyword({ name: 'sameTerm' })

Const localNamesByCategory

localNamesByCategory: object

AnyLiteralTakingPredicate

AnyLiteralTakingPredicate: Object = getAsTypedTuple('minExclusive','minInclusive','maxExclusive','maxInclusive')

BooleanTakingPredicate

BooleanTakingPredicate: Object = getAsTypedTuple('uniqueLang','qualifiedValueShapesDisjoint','closed','deactivated','optional')

IntTakingPredicate

IntTakingPredicate: Object = getAsTypedTuple('minCount','maxCount','minLength','maxLength','qualifiedMinCount','qualifiedMaxCount')

LangStringTakingPredicate

LangStringTakingPredicate: Object = getAsTypedTuple('message', 'labelTemplate')

ManyIriTakingPredicate

ManyIriTakingPredicate: Object = getAsTypedTuple('equals','disjoint','lessThan','lessThanOrEquals','targetClass','targetSubjectsOf','targetObjectsOf')

NodeKindIRI

NodeKindIRI: Object = getAsTypedTuple('IRI','BlankNode','Literal','BlankNodeOrIRI','BlankNodeOrLiteral','IRIOrLiteral')

ShapeExpectingPredicate

ShapeExpectingPredicate: Object = getAsTypedTuple('not','node','property','qualifiedValueShape','sparql','declare','prefixes','parameter','nodeValidator','propertyValidator','validator')

SingleIriTakingPredicate

SingleIriTakingPredicate: Object = getAsTypedTuple('class', 'datatype', 'severity')

StringLiteralQuoteTakingPredicate

StringLiteralQuoteTakingPredicate: Object = getAsTypedTuple('pattern','flags','prefix','namespace')

other

other: Object = getAsTypedTuple('Shape','NodeShape','PropertyShape','targetNode','message','path','alternativePath','inversePath','zeroOrMorePath','oneOrMorePath','zeroOrOnePath','nodeKind','languageIn','and','or','xone','ignoredProperties','hasValue','in','select','ask')

Const multiModeLexerDefinition

multiModeLexerDefinition: object

defaultMode

defaultMode: LexerMode = LexerMode.TURTLE

modes

modes: object

__computed

__computed: any[] = [turtleTokenMap.WhiteSpace,sparqlTokenMap.LCurly,EndThen,TriplesBlock,]

Const nonKeywordTerminals

nonKeywordTerminals: object

BlockStringToken

BlockStringToken: TokenType = createAndPushToken({name: 'BlockStringToken',pattern: STRING_LITERAL_LONG2,categories: [StringValueToken],})

BooleanValueToken

BooleanValueToken: TokenType = createAndPushToken({name: 'BooleanValueToken',pattern: BOOLEAN_PATTERN,longer_alt: Name,})

EnumValueToken

EnumValueToken: TokenType

FloatValueToken

FloatValueToken: TokenType = createAndPushToken({name: 'FloatValueToken',pattern: regex.and(INTEGER_PART_PATTERN,regex.or(regex.and(/\.[0-9]+/, regex.option(EXPONENT_PART_PATTERN)),EXPONENT_PART_PATTERN)),})

FragmentName

FragmentName: TokenType

IntValueToken

IntValueToken: TokenType = createAndPushToken({name: 'IntValueToken',pattern: INTEGER_PART_PATTERN,})

Name

Name: TokenType

NullValueToken

NullValueToken: TokenType = createAndPushToken({name: 'NullValueToken',pattern: NULL_PATTERN,longer_alt: Name,})

StringToken

StringToken: TokenType = createAndPushToken({name: 'StringToken',pattern: STRING_CHARACTER_PATTERN,categories: [StringValueToken],})

StringValueToken

StringValueToken: TokenType

Const punctuators

punctuators: object

Amp

Amp: TokenType = createAndPushPunctuator({ name: 'Amp', pattern: '&' })

At

At: TokenType = createAndPushPunctuator({ name: 'At', pattern: '@' })

Bang

Bang: TokenType = createAndPushPunctuator({ name: 'Bang', pattern: '!' })

Colon

Colon: TokenType = createAndPushPunctuator({ name: 'Colon', pattern: ':' })

Dollar

Dollar: TokenType = createAndPushPunctuator({ name: 'Dollar', pattern: '$' })

Equals

Equals: TokenType = createAndPushPunctuator({ name: 'Equals', pattern: '=' })

LBracket

LBracket: TokenType = createAndPushPunctuator({ name: 'LBracket', pattern: '[' })

LCurly

LCurly: TokenType = createAndPushPunctuator({ name: 'LCurly', pattern: '{' })

LParen

LParen: TokenType = createAndPushPunctuator({ name: 'LParen', pattern: '(' })

Pipe

Pipe: TokenType = createAndPushPunctuator({ name: 'Pipe', pattern: '|' })

Punctuator

Punctuator: TokenType

RBracket

RBracket: TokenType = createAndPushPunctuator({ name: 'RBracket', pattern: ']' })

RCurly

RCurly: TokenType = createAndPushPunctuator({ name: 'RCurly', pattern: '}' })

RParen

RParen: TokenType = createAndPushPunctuator({ name: 'RParen', pattern: ')' })

Spread

Spread: TokenType = createAndPushPunctuator({ name: 'Spread', pattern: '...' })

Const regex

regex: object

and

  • and(...r: RegExp[]): RegExp

many

  • many(r: RegExp): RegExp

option

  • option(r: RegExp): RegExp

or

  • or(...r: RegExp[]): RegExp

Const smsTokenMap

smsTokenMap: object

Csv

Csv: TokenType = createKeyword({ name: 'Csv' })

GraphQl

GraphQl: TokenType = createKeyword({ name: 'GraphQl' })

GraphQlBlock

GraphQlBlock: TokenType = createToken({name: 'GraphQlBlock',pattern: (text: string,startOffset: number = 0,matchedTokensSoFar: IToken[]) => {const [secondToLastToken, lastToken] = matchedTokensSoFar.slice(-2);if (!secondToLastToken ||!lastToken ||secondToLastToken.tokenType.tokenName !==smsTokenMap.GraphQl.tokenName ||lastToken.tokenType.tokenName !== smsTokenMap.LCurly.tokenName) {return null;}const textToMatch = text.slice(startOffset);return explicitEndMatcher(textToMatch, '}', FROM_BLOCK_END_MATCHER);},line_breaks: true,})

Json

Json: TokenType = createKeyword({ name: 'Json' })

JsonBlock

JsonBlock: TokenType = createToken({name: 'JsonBlock',pattern: (text: string,startOffset: number = 0,matchedTokensSoFar: IToken[]) => {const [lastToken] = matchedTokensSoFar.slice(-1);if (!lastToken ||lastToken.tokenType.tokenName !== smsTokenMap.Json.tokenName) {return null;}const textToMatch = text.slice(startOffset);const match = FROM_JSON_BLOCK_END_MATCHER.exec(textToMatch);if (!match) {return null;}const capturedMatch = match.slice(1) as RegExpExecArray;return capturedMatch;},line_breaks: true,})

Mapping

Mapping: TokenType = createKeyword({ name: 'Mapping' })

Sql

Sql: TokenType = createKeyword({ name: 'Sql' })

SqlBlock

SqlBlock: TokenType = createToken({name: 'SqlBlock',pattern: (text: string,startOffset: number = 0,matchedTokensSoFar: IToken[]) => {const [secondToLastToken, lastToken] = matchedTokensSoFar.slice(-2);if (!secondToLastToken ||!lastToken ||secondToLastToken.tokenType.tokenName !== smsTokenMap.Sql.tokenName ||lastToken.tokenType.tokenName !== smsTokenMap.LCurly.tokenName) {return null;}const textToMatch = text.slice(startOffset);return explicitEndMatcher(textToMatch, '}', FROM_BLOCK_END_MATCHER);},line_breaks: true,})

Template

Template: TokenType = createKeyword({ name: 'Template' })

Const sparqlTokenMap

sparqlTokenMap: object

A

A: TokenType = keywords.A

ABS

ABS: TokenType = keywords.ABS

ADD

ADD: TokenType = keywords.ADD

ALL

ALL: TokenType = keywords.ALL

ANON

ANON: TokenType = terminals.ANON

AS

AS: TokenType = keywords.AS

ASC

ASC: TokenType = keywords.ASC

ASK

ASK: TokenType = keywords.ASK

AVG

AVG: TokenType = keywords.AVG

BASE

BASE: TokenType = keywords.BASE

BIND

BIND: TokenType = keywords.BIND

BLANK_NODE_LABEL

BLANK_NODE_LABEL: TokenType = terminals.BLANK_NODE_LABEL

BNODE

BNODE: TokenType = keywords.BNODE

BOUND

BOUND: TokenType = keywords.BOUND

BY

BY: TokenType = keywords.BY

Bang

Bang: TokenType = createToken({name: 'Bang',pattern: '!',})

CEIL

CEIL: TokenType = keywords.CEIL

CLEAR

CLEAR: TokenType = keywords.CLEAR

COALESCE

COALESCE: TokenType = keywords.COALESCE

CONCAT

CONCAT: TokenType = keywords.CONCAT

CONSTRUCT

CONSTRUCT: TokenType = keywords.CONSTRUCT

CONTAINS

CONTAINS: TokenType = keywords.CONTAINS

COPY

COPY: TokenType = keywords.COPY

COUNT

COUNT: TokenType = keywords.COUNT

CREATE

CREATE: TokenType = keywords.CREATE

CYCLIC

CYCLIC: TokenType = keywords.CYCLIC

Caret

Caret: TokenType = createToken({name: 'Caret',pattern: '^',})

Comma

Comma: TokenType = createToken({name: 'Comma',pattern: ',',})

Comment

Comment: TokenType = createToken({name: 'Comment',pattern: /#[^\n]*/,group: 'comments',})

DATATYPE

DATATYPE: TokenType = keywords.DATATYPE

DAY

DAY: TokenType = keywords.DAY

DECIMAL

DECIMAL: TokenType = terminals.DECIMAL

DECIMAL_NEGATIVE

DECIMAL_NEGATIVE: TokenType = terminals.DECIMAL_NEGATIVE

DECIMAL_POSITIVE

DECIMAL_POSITIVE: TokenType = terminals.DECIMAL_POSITIVE

DEFAULT

DEFAULT: TokenType = keywords.DEFAULT

DELETE

DELETE: TokenType = keywords.DELETE

DELETE_DATA

DELETE_DATA: TokenType = keywords.DELETE_DATA

DELETE_WHERE

DELETE_WHERE: TokenType = keywords.DELETE_WHERE

DESC

DESC: TokenType = keywords.DESC

DESCRIBE

DESCRIBE: TokenType = keywords.DESCRIBE

DISTINCT

DISTINCT: TokenType = keywords.DISTINCT

DOUBLE

DOUBLE: TokenType = terminals.DOUBLE

DOUBLE_NEGATIVE

DOUBLE_NEGATIVE: TokenType = terminals.DOUBLE_NEGATIVE

DOUBLE_POSITIVE

DOUBLE_POSITIVE: TokenType = terminals.DOUBLE_POSITIVE

DROP

DROP: TokenType = keywords.DROP

DoubleCaret

DoubleCaret: TokenType = createToken({name: 'DoubleCaret',pattern: '^^',})

ENCODE_FOR_URI

ENCODE_FOR_URI: TokenType = keywords.ENCODE_FOR_URI

END

END: TokenType = keywords.END

EXISTS

EXISTS: TokenType = keywords.EXISTS

Equals

Equals: TokenType = createToken({name: 'Equals',pattern: '=',})

FALSE

FALSE: TokenType = keywords.FALSE

FILTER

FILTER: TokenType = keywords.FILTER

FLOOR

FLOOR: TokenType = keywords.FLOOR

FROM

FROM: TokenType = keywords.FROM

ForwardSlash

ForwardSlash: TokenType = createToken({name: 'ForwardSlash',pattern: '/',})

GRAPH

GRAPH: TokenType = keywords.GRAPH

GROUP_BY

GROUP_BY: TokenType = keywords.GROUP_BY

GROUP_CONCAT

GROUP_CONCAT: TokenType = keywords.GROUP_CONCAT

GreaterThan

GreaterThan: TokenType = createToken({name: 'GreaterThan',pattern: '>',})

GreaterThanEquals

GreaterThanEquals: TokenType = createToken({name: 'GreaterThanEquals',pattern: '>=',})

HAVING

HAVING: TokenType = keywords.HAVING

HOURS

HOURS: TokenType = keywords.HOURS

IF

IF: TokenType = keywords.IF

IN

IN: TokenType = keywords.IN

INSERT

INSERT: TokenType = keywords.INSERT

INSERT_DATA

INSERT_DATA: TokenType = keywords.INSERT_DATA

INTEGER

INTEGER: TokenType = terminals.INTEGER

INTEGER_NEGATIVE

INTEGER_NEGATIVE: TokenType = terminals.INTEGER_NEGATIVE

INTEGER_POSITIVE

INTEGER_POSITIVE: TokenType = terminals.INTEGER_POSITIVE

INTO

INTO: TokenType = keywords.INTO

IRI

IRI: TokenType = keywords.IRI

IRIREF

IRIREF: TokenType = terminals.IRIREF

LANG

LANG: TokenType = keywords.LANG

LANGMATCHES

LANGMATCHES: TokenType = keywords.LANGMATCHES

LANGTAG

LANGTAG: TokenType = terminals.LANGTAG

LBracket

LBracket: TokenType = createToken({name: 'LBracket',pattern: '[',})

LCASE

LCASE: TokenType = keywords.LCASE

LCurly

LCurly: TokenType = createToken({ name: 'LCurly', pattern: '{' })

LEmbed

LEmbed: TokenType = createToken({name: 'LEmbed',pattern: '<<',})

LIMIT

LIMIT: TokenType = keywords.LIMIT

LOAD

LOAD: TokenType = keywords.LOAD

LParen

LParen: TokenType = createToken({ name: 'LParen', pattern: '(' })

LessThan

LessThan: TokenType = createToken({name: 'LessThan',pattern: '<',})

LessThanEquals

LessThanEquals: TokenType = createToken({name: 'LessThanEquals',pattern: '<=',})

LogicalAnd

LogicalAnd: TokenType = createToken({name: 'LogicalAnd',pattern: '&&',})

LogicalOr

LogicalOr: TokenType = createToken({name: 'LogicalOr',pattern: '||',})

MAX

MAX: TokenType = keywords.MAX

MAX_LENGTH

MAX_LENGTH: TokenType = keywords.MAX_LENGTH

MD5

MD5: TokenType = keywords.MD5

MIN

MIN: TokenType = keywords.MIN

MINUS

MINUS: TokenType = keywords.MINUS

MINUTES

MINUTES: TokenType = keywords.MINUTES

MONTH

MONTH: TokenType = keywords.MONTH

MOVE

MOVE: TokenType = keywords.MOVE

Minus

Minus: TokenType = createToken({name: 'Minus',pattern: '-',})

NAMED

NAMED: TokenType = keywords.NAMED

NIL

NIL: TokenType = terminals.NIL

NOT_EXISTS

NOT_EXISTS: TokenType = keywords.NOT_EXISTS

NOT_IN

NOT_IN: TokenType = keywords.NOT_IN

NOW

NOW: TokenType = keywords.NOW

NotEquals

NotEquals: TokenType = createToken({name: 'NotEquals',pattern: '!=',})

OFFSET

OFFSET: TokenType = keywords.OFFSET

OPTIONAL

OPTIONAL: TokenType = keywords.OPTIONAL

ORDER_BY

ORDER_BY: TokenType = keywords.ORDER_BY

PATHS

PATHS: TokenType = keywords.PATHS

PATHS_ALL

PATHS_ALL: TokenType = keywords.PATHS_ALL

PATHS_SHORTEST

PATHS_SHORTEST: TokenType = keywords.PATHS_SHORTEST

PER

PER: TokenType = keywords.PER

PERCENT

PERCENT: TokenType = terminals.PERCENT

PNAME_LN

PNAME_LN: TokenType = terminals.PNAME_LN

PNAME_NS

PNAME_NS: TokenType = terminals.PNAME_NS

PREFIX

PREFIX: TokenType = keywords.PREFIX

Period

Period: TokenType = createToken({name: 'Period',pattern: '.',})

Pipe

Pipe: TokenType = createToken({name: 'Pipe',pattern: '|',})

Plus

Plus: TokenType = createToken({name: 'Plus',pattern: '+',})

QuestionMark

QuestionMark: TokenType = createToken({name: 'QuestionMark',pattern: '?',})

RAND

RAND: TokenType = keywords.RAND

RBracket

RBracket: TokenType = createToken({name: 'RBracket',pattern: ']',})

RCurly

RCurly: TokenType = createToken({ name: 'RCurly', pattern: '}' })

REDUCED

REDUCED: TokenType = keywords.REDUCED

REGEX

REGEX: TokenType = keywords.REGEX

REPLACE

REPLACE: TokenType = keywords.REPLACE

REmbed

REmbed: TokenType = createToken({name: 'REmbed',pattern: '>>',})

ROUND

ROUND: TokenType = keywords.ROUND

RParen

RParen: TokenType = createToken({ name: 'RParen', pattern: ')' })

SAMPLE

SAMPLE: TokenType = keywords.SAMPLE

SECONDS

SECONDS: TokenType = keywords.SECONDS

SELECT

SELECT: TokenType = keywords.SELECT

SEPARATOR

SEPARATOR: TokenType = keywords.SEPARATOR

SERVICE

SERVICE: TokenType = keywords.SERVICE

SHA1

SHA1: TokenType = keywords.SHA1

SHA256

SHA256: TokenType = keywords.SHA256

SHA384

SHA384: TokenType = keywords.SHA384

SHA512

SHA512: TokenType = keywords.SHA512

SHAPE

SHAPE: TokenType = keywords.SHAPE

SHAPES

SHAPES: TokenType = keywords.SHAPES

SILENT

SILENT: TokenType = keywords.SILENT

START

START: TokenType = keywords.START

STR

STR: TokenType = keywords.STR

STRAFTER

STRAFTER: TokenType = keywords.STRAFTER

STRBEFORE

STRBEFORE: TokenType = keywords.STRBEFORE

STRDT

STRDT: TokenType = keywords.STRDT

STRENDS

STRENDS: TokenType = keywords.STRENDS

STRING_LITERAL1

STRING_LITERAL1: TokenType = terminals.STRING_LITERAL1

STRING_LITERAL2

STRING_LITERAL2: TokenType = terminals.STRING_LITERAL2

STRING_LITERAL_LONG1

STRING_LITERAL_LONG1: TokenType = terminals.STRING_LITERAL_LONG1

STRING_LITERAL_LONG2

STRING_LITERAL_LONG2: TokenType = terminals.STRING_LITERAL_LONG2

STRLANG

STRLANG: TokenType = keywords.STRLANG

STRLEN

STRLEN: TokenType = keywords.STRLEN

STRSTARTS

STRSTARTS: TokenType = keywords.STRSTARTS

STRUUID

STRUUID: TokenType = keywords.STRUUID

SUBSTR

SUBSTR: TokenType = keywords.SUBSTR

SUM

SUM: TokenType = keywords.SUM

Semicolon

Semicolon: TokenType = createToken({name: 'Semicolon',pattern: ';',})

Star

Star: TokenType = createToken({name: 'Star',pattern: '*',})

TIMEZONE

TIMEZONE: TokenType = keywords.TIMEZONE

TO

TO: TokenType = keywords.TO

TRUE

TRUE: TokenType = keywords.TRUE

TZ

TZ: TokenType = keywords.TZ

UCASE

UCASE: TokenType = keywords.UCASE

UNDEF

UNDEF: TokenType = keywords.UNDEF

UNION

UNION: TokenType = keywords.UNION

UNKNOWN

UNKNOWN: TokenType = keywords.UNKNOWN

UNNEST

UNNEST: TokenType = keywords.UNNEST

URI

URI: TokenType = keywords.URI

USING

USING: TokenType = keywords.USING

UUID

UUID: TokenType = keywords.UUID

VALIDATE

VALIDATE: TokenType = keywords.VALIDATE

VALUES

VALUES: TokenType = keywords.VALUES

VAR1

VAR1: TokenType = terminals.VAR1

VAR2

VAR2: TokenType = terminals.VAR2

VIA

VIA: TokenType = keywords.VIA

WHERE

WHERE: TokenType = keywords.WHERE

WITH

WITH: TokenType = keywords.WITH

WhiteSpace

WhiteSpace: TokenType = createToken({name: 'WhiteSpace',pattern: /\s+/,group: Lexer.SKIPPED,line_breaks: true,})

YEAR

YEAR: TokenType = keywords.YEAR

isBLANK

isBLANK: TokenType = keywords.isBLANK

isIRI

isIRI: TokenType = keywords.isIRI

isLITERAL

isLITERAL: TokenType = keywords.isLITERAL

isNUMERIC

isNUMERIC: TokenType = keywords.isNUMERIC

isURI

isURI: TokenType = keywords.isURI

sameTerm

sameTerm: TokenType = keywords.sameTerm

Const srsTokenMap

srsTokenMap: object

EndThen

EndThen: TokenType

GroupGraphPattern

GroupGraphPattern: TokenType

If

If: TokenType

Rule

Rule: TokenType

Then

Then: TokenType

TriplesBlock

TriplesBlock: TokenType

Const stardogGraphQlTokenMap

stardogGraphQlTokenMap: object

OrderByArgumentDescPropertyToken

OrderByArgumentDescPropertyToken: TokenType = stardogOrderByArgumentDescPropertyToken

OrderByArgumentFieldPropertyToken

OrderByArgumentFieldPropertyToken: TokenType = stardogOrderByArgumentFieldPropertyToken

SparqlReceivingStardogDirective

SparqlReceivingStardogDirective: TokenType

StardogArgument

StardogArgument: TokenType

StardogDirective

StardogDirective: TokenType

TopLevel

TopLevel: TokenType

Const terminals

terminals: object

ANON

ANON: TokenType = createToken({name: 'ANON',pattern: ANON,label: '[]',})

BLANK_NODE_LABEL

BLANK_NODE_LABEL: TokenType = createToken({name: 'BLANK_NODE_LABEL',pattern: BLANK_NODE_LABEL,})

DECIMAL

DECIMAL: TokenType = createToken({name: 'DECIMAL',pattern: DECIMAL,})

DECIMAL_NEGATIVE

DECIMAL_NEGATIVE: TokenType = createToken({name: 'DECIMAL_NEGATIVE',pattern: DECIMAL_NEGATIVE,})

DECIMAL_POSITIVE

DECIMAL_POSITIVE: TokenType = createToken({name: 'DECIMAL_POSITIVE',pattern: DECIMAL_POSITIVE,})

DOUBLE

DOUBLE: TokenType = createToken({name: 'DOUBLE',pattern: DOUBLE,})

DOUBLE_NEGATIVE

DOUBLE_NEGATIVE: TokenType = createToken({name: 'DOUBLE_NEGATIVE',pattern: DOUBLE_NEGATIVE,})

DOUBLE_POSITIVE

DOUBLE_POSITIVE: TokenType = createToken({name: 'DOUBLE_POSITIVE',pattern: DOUBLE_POSITIVE,})

INTEGER

INTEGER: TokenType = createToken({name: 'INTEGER',pattern: INTEGER,})

INTEGER_NEGATIVE

INTEGER_NEGATIVE: TokenType = createToken({name: 'INTEGER_NEGATIVE',pattern: INTEGER_NEGATIVE,})

INTEGER_POSITIVE

INTEGER_POSITIVE: TokenType = createToken({name: 'INTEGER_POSITIVE',pattern: INTEGER_POSITIVE,})

IRIREF

IRIREF: TokenType = createToken({name: 'IRIREF',pattern: IRIREF,label: '<http://example.com>',})

LANGTAG

LANGTAG: TokenType = createToken({name: 'LANGTAG',pattern: LANGTAG,})

NIL

NIL: TokenType = createToken({name: 'NIL',pattern: NIL,label: '()',})

PERCENT

PERCENT: TokenType = createToken({name: 'PERCENT',pattern: PERCENT,})

PNAME_LN

PNAME_LN: TokenType = PNAME_LN_TOKEN

PNAME_NS

PNAME_NS: TokenType = createToken({name: 'PNAME_NS',pattern: PNAME_NS,longer_alt: PNAME_LN_TOKEN,})

STRING_LITERAL1

STRING_LITERAL1: TokenType = createToken({name: 'STRING_LITERAL1',pattern: STRING_LITERAL1,longer_alt: STRING_LITERAL_LONG1_TOKEN,})

STRING_LITERAL2

STRING_LITERAL2: TokenType = createToken({name: 'STRING_LITERAL2',pattern: STRING_LITERAL2,longer_alt: STRING_LITERAL_LONG2_TOKEN,})

STRING_LITERAL_LONG1

STRING_LITERAL_LONG1: TokenType = STRING_LITERAL_LONG1_TOKEN

STRING_LITERAL_LONG2

STRING_LITERAL_LONG2: TokenType = STRING_LITERAL_LONG2_TOKEN

VAR1

VAR1: TokenType = createToken({name: 'VAR1',pattern: VAR1,label: '?foo',})

VAR2

VAR2: TokenType = createToken({name: 'VAR2',pattern: VAR2,label: '?bar',})

Const trigTokenMap

trigTokenMap: object

GRAPH

GRAPH: TokenType = sparqlTokenMap.GRAPH

Const turtleTokenMap

turtleTokenMap: object

A

A: any = sparqlTokenMap.A

ANON

ANON: any = sparqlTokenMap.ANON

BASE

BASE: any = sparqlTokenMap.BASE

BLANK_NODE_LABEL

BLANK_NODE_LABEL: any = sparqlTokenMap.BLANK_NODE_LABEL

Comma

Comma: any = sparqlTokenMap.Comma

Comment

Comment: TokenType = createToken({name: 'Comment',pattern: /#[^\n]*/,group: 'comments',})

DECIMAL

DECIMAL: TokenType = createToken({name: 'DECIMAL',pattern: regex.and(regex.option(/[+-]/), /(\d*\.\d+)/),})

DOUBLE

DOUBLE: TokenType = createToken({name: 'DOUBLE',pattern: regex.and(regex.option(/[+-]/),regex.or(regex.and(/\d+\.\d*/, EXPONENT),regex.and(/\.\d+/, EXPONENT),regex.and(/\d+/, EXPONENT))),})

DoubleCaret

DoubleCaret: any = sparqlTokenMap.DoubleCaret

ECHAR

ECHAR: TokenType = createToken({ name: 'ECHAR', pattern: ECHAR })

EXPONENT

EXPONENT: TokenType = createToken({ name: 'EXPONENT', pattern: EXPONENT })

FALSE

FALSE: TokenType = createToken({name: 'FALSE',pattern: /false/,})

HEX

HEX: TokenType = createToken({ name: 'HEX', pattern: HEX })

INTEGER

INTEGER: TokenType = createToken({name: 'INTEGER',pattern: regex.and(regex.option(/[+-]/), /\d+/),})

IRIREF

IRIREF: TokenType = createToken({name: 'IRIREF',pattern: (text: string, startOffset: number = 0) => {const textToMatch = text.slice(startOffset);let match = unescapedIri.exec(textToMatch);if (match) {return match;}match = escapedIri.exec(textToMatch);if (!match) {return null;}const value = unescape(match[1]);if (value === null || illegalIriChars.test(value)) {return null;}return match;},line_breaks: false,})

LANGTAG

LANGTAG: any = sparqlTokenMap.LANGTAG

LBracket

LBracket: any = sparqlTokenMap.LBracket

LCurly

LCurly: any = sparqlTokenMap.LCurly

LEmbed

LEmbed: any = sparqlTokenMap.LEmbed

LParen

LParen: any = sparqlTokenMap.LParen

PERCENT

PERCENT: any = sparqlTokenMap.PERCENT

PLX

PLX: TokenType = createToken({ name: 'PLX', pattern: PLX })

PNAME_LN

PNAME_LN: any = sparqlTokenMap.PNAME_LN

PNAME_NS

PNAME_NS: any = sparqlTokenMap.PNAME_NS

PN_CHARS

PN_CHARS: TokenType = createToken({ name: 'PN_CHARS', pattern: PN_CHARS })

PN_CHARS_BASE

PN_CHARS_BASE: TokenType = createToken({ name: 'PN_CHARS_BASE', pattern: PN_CHARS_BASE })

PN_CHARS_U

PN_CHARS_U: TokenType = createToken({ name: 'PN_CHARS_U', pattern: PN_CHARS_U })

PN_LOCAL

PN_LOCAL: TokenType = createToken({ name: 'PN_LOCAL', pattern: PN_LOCAL })

PN_LOCAL_ESC

PN_LOCAL_ESC: TokenType = createToken({ name: 'PN_LOCAL_ESC', pattern: PN_LOCAL_ESC })

PN_PREFIX

PN_PREFIX: TokenType = createToken({ name: 'PN_PREFIX', pattern: PN_PREFIX })

PREFIX

PREFIX: any = sparqlTokenMap.PREFIX

Period

Period: any = sparqlTokenMap.Period

RBracket

RBracket: any = sparqlTokenMap.RBracket

RCurly

RCurly: any = sparqlTokenMap.RCurly

REmbed

REmbed: any = sparqlTokenMap.REmbed

RParen

RParen: any = sparqlTokenMap.RParen

STRING_LITERAL_LONG_QUOTE

STRING_LITERAL_LONG_QUOTE: TokenType = createToken({name: 'STRING_LITERAL_LONG_QUOTE',pattern: (text: string, startOffset: number = 0) => {const match = stringLiteralLongQuote.exec(text.slice(startOffset));if (!match || unescape(match[1]) === null) {// Bad charactersreturn null;}return match;},line_breaks: true,})

STRING_LITERAL_LONG_SINGLE_QUOTE

STRING_LITERAL_LONG_SINGLE_QUOTE: TokenType = createToken({name: 'STRING_LITERAL_LONG_SINGLE_QUOTE',pattern: (text: string, startOffset: number = 0) => {const match = stringLiteralLongSingleQuote.exec(text.slice(startOffset));if (!match || unescape(match[1]) === null) {// Bad charactersreturn null;}return match;},line_breaks: true,})

STRING_LITERAL_QUOTE

STRING_LITERAL_QUOTE: TokenType = createToken({name: 'STRING_LITERAL_QUOTE',pattern: (text: string, startOffset: number = 0) => {const textToMatch = text.slice(startOffset);let match = unescapedStringLiteralQuote.exec(textToMatch);if (match) {return match;}match = stringLiteralQuote.exec(textToMatch);if (!match) {return null;}if (unescape(match[1]) === null) {// Bad charactersreturn null;}return match;},line_breaks: false,})

STRING_LITERAL_SINGLE_QUOTE

STRING_LITERAL_SINGLE_QUOTE: TokenType = createToken({name: 'STRING_LITERAL_SINGLE_QUOTE',pattern: (text: string, startOffset: number = 0) => {const textToMatch = text.slice(startOffset);let match = unescapedStringLiteralSingleQuote.exec(textToMatch);if (match) {return match;}match = stringLiteralSingleQuote.exec(textToMatch);if (!match) {return null;}if (unescape(match[1]) === null) {// Bad charactersreturn null;}return match;},line_breaks: false,})

Semicolon

Semicolon: any = sparqlTokenMap.Semicolon

TRUE

TRUE: TokenType = createToken({name: 'TRUE',pattern: /true/,})

TTL_BASE

TTL_BASE: TokenType = createToken({ name: 'TTL_BASE', pattern: /@base/ })

TTL_PREFIX

TTL_PREFIX: TokenType = createToken({ name: 'TTL_PREFIX', pattern: /@prefix/ })

UCHAR

UCHAR: TokenType = createToken({name: 'UCHAR',pattern: (text, startOffset: number = 0) =>unicodeRegexp.exec(text.slice(startOffset)),line_breaks: false,})

UNKNOWN

UNKNOWN: any = sparqlTokenMap.UNKNOWN

WhiteSpace

WhiteSpace: any = sparqlTokenMap.WhiteSpace

Const ungeneratedFixtures

ungeneratedFixtures: object

invalid

invalid: object

lex

lex: object

wrongBraceMatch

wrongBraceMatch: string = 'IF {{\n' +' ?X ?P ?Y. ?Y ?P ?Z\n' +'}\n' +'THEN {\n' +' ?X ?P ?Z\n' +'}\n'

wrongBraceMatch3

wrongBraceMatch3: string = 'IF {\n' +' ?X ?P ?Y. ?Y ?P ?Z\n' +'\n' +'THEN {\n' + // one less }' ?X ?P ?Z\n' +'}\n'

parse

parse: object

noEmbeddedTripleIf

noEmbeddedTripleIf: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' << :s :p :o >> a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' ?x a <http://example.org/Father> .\n' +'}\n'

noEmbeddedTripleIfObject

noEmbeddedTripleIfObject: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' ?x a <http://example.org/Male> , << :s :p :o >> .\n' +'}\n' +'THEN {\n' +' ?x a <http://example.org/Father> .\n' +'}\n'

noEmbeddedTripleThen

noEmbeddedTripleThen: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' ?x a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' << :s :p :o >> a <http://example.org/Father> .\n' +'}\n'

noEmbeddedTripleThenObject

noEmbeddedTripleThenObject: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' ?x a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' ?x a << :s :p :o >> .\n' +'}\n'

noLiteralRuleSubjects

noLiteralRuleSubjects: string = 'IF {\n' +' BIND ("literal" AS ?x)\n' +'}\n' +'THEN {\n' +' ?x a owl:Thing .\n' +'}'

noLiteralRuleSubjects2

noLiteralRuleSubjects2: string = 'IF {\n' +' BIND ("literal" AS ?x)\n' +' BIND ("literal" AS ?y)\n' +'}\n' +'THEN {\n' +' ?x a owl:Thing .\n' +' ?y a owl:Thing .\n' +'}'

noLiteralRuleSubjects3

noLiteralRuleSubjects3: string = 'IF {\n' +' "sLit" ?p "oLit"' +'}\n' +'THEN {\n' +' ?s a owl:Thing .\n' +'}'

noLiteralRuleSubjects4

noLiteralRuleSubjects4: string = 'IF {\n' +' ?s ?p ?o' +'}\n' +'THEN {\n' +' "literal" a owl:Thing .\n' +'}'

noLiteralRuleSubjects5

noLiteralRuleSubjects5: string = 'IF {\n' +' ?s ?p ?o' +'}\n' +'THEN {\n' +' ?s a owl:Thing . "literal" a owl:Thing .\n' +'}'

unsupportedSPARQLInIfClause

unsupportedSPARQLInIfClause: string = 'IF { ?x a <http://example.org/Male> . FILTER EXISTS {}}\n' +'THEN { ?x a <http://example.org/Father> . }'

wrongBraceMatch2

wrongBraceMatch2: string = 'IF {\n' +' ?X ?P ?Y. ?Y ?P ?Z\n' +'}}\n' +'THEN {\n' +' ?X ?P ?Z\n' +'}\n'

wrongBraceMatch4

wrongBraceMatch4: string = 'IF {\n' +' ?X ?P ?Y. ?Y ?P ?Z\n' +'\n' +' THEN \n' + // No } {' ?X ?P ?Z\n' +'}\n'

wrongIfContent

wrongIfContent: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' test:hasSibling ?Y. ?Y rdf:type test:Man\n' + // No subject'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongIfContent2

wrongIfContent2: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X ?Y. ?Y rdf:type test:Man\n' + // No predicate'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongIfContent3

wrongIfContent3: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling . ?Y rdf:type test:Man\n' + // No object'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongIfContent4

wrongIfContent4: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y ?Y rdf:type test:Man\n' + // No .'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongIfContent5

wrongIfContent5: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test2:hasSibling ?Y . ?Y rdf:type test:Man\n' + // Undefined prefix'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongKeyword

wrongKeyword: string = 'IT {\n' + // IT' ?X ?P ?Y. ?Y ?P ?Z\n' +'}\n' +'THEN {\n' +' ?X ?P ?Z\n' +'}\n'

wrongKeyword2

wrongKeyword2: string = 'IF {\n' +' ?X ?P ?Y. ?Y ?P ?Z\n' +'}\n' +'THAN {\n' + // THAN' ?X ?P ?Z\n' +'}\n'

wrongPrefix

wrongPrefix: string = 'PREFAX test: <http://test.com/test/0.1/>\n' + // PREFAX'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongPrefix2

wrongPrefix2: string = 'PREFIX test <http://test.com/test/0.1/>\n' + // Omit :'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongPrefix3

wrongPrefix3: string = 'PREFIX test: http://test.com/test/0.1/>\n' + // Omit <'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongPrefix4

wrongPrefix4: string = 'PREFIX test: <http://test.com/test/0.1/\n' + // Omit >'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y\n' +'}\n'

wrongThenContent

wrongThenContent: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' test:hasBrother ?Y\n' + // No subject'}\n'

wrongThenContent2

wrongThenContent2: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y. ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X ?Y\n' + // No predicate'}\n'

wrongThenContent3

wrongThenContent3: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y . ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother \n' + // No object'}\n'

wrongThenContent4

wrongThenContent4: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test:hasBrother ?Y ?Y test:hasSibling ?X\n' + // No .'}\n'

wrongThenContent5

wrongThenContent5: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X test:hasSibling ?Y . ?Y rdf:type test:Man\n' +'}\n' +'THEN {\n' +' ?X test22:hasBrother ?Y\n' + // Undefined prefix'}\n'

wrongThenContent6

wrongThenContent6: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +'?X a test:OldMan\n' +'}\n' +'THEN {\n' +'\n' + // Empty'}\n'

wrongThenContent7

wrongThenContent7: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +'?X a test:OldMan\n' +'}\n' +'THEN {\n' +'\t\n' + // Empty (Tab)'}\n'

wrongThenContent8

wrongThenContent8: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X a test:OldMan\n' +'}\n' +'THEN {\n' +' ?X test:age ?Y. FILTER (?Y > 50) .\n' + // Includes Filter'}\n'

wrongThenContent9

wrongThenContent9: string = 'PREFIX test: <http://test.com/test/0.1/>\n' +'IF {\n' +' ?X rdf:type test:GoodDeal\n' +'}\n' +'THEN {\n' +' ?x test:price ?p .\n' +' ?x test:discount ?discount \n' +' BIND (?p - ?realprice AS ?discount) .\n' + // Includes Bind'}\n'

valid

valid: object

basic

basic: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' ?x a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' ?x a <http://example.org/Father> .\n' +'}\n' +':FatherRule rdfs:comment "This rule defines fathers" ;\n' +' a :MyRule .'

basicIsolated

basicIsolated: string = 'IF { ?x a <http://example.org/Male> . FILTER(?x NOT IN (<urn:a>, <urn:b>))}' +'THEN { ?x a <http://example.org/Father> . }'

bindWithoutLiteralSubject

bindWithoutLiteralSubject: string = // Was previously failing: https://github.com/stardog-union/millan/issues/22'IF {\n' +' ?t a :Triangle ;\n' +' :base ?b ;\n' +' :height ?h\n' +' BIND(?b * ?h / 2 AS ?area)\n' +'}\n' +'THEN {' +' ?t :area ?area\n' +'}'

embedded_basic

embedded_basic: string = '@prefix : <http://example.org/> .\n' +'@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n' +'RULE :FatherRule\n' +'IF {\n' +' ?x a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' ?x a <http://example.org/Father> .\n' +'}\n' +':FatherRule rdfs:comment "This rule defines fathers" ;\n' +' a :MyRule .\n' +'<< :FatherRule a :MyRule >> :createdAt "2019-10-26" .'

filterIn

filterIn: string = 'IF {\n' +' ?x <urn:p> ?y . \n' +' FILTER (?x IN (<urn:a>, <urn:b>, <urn:c>))\n' +'}\n' +'THEN {\n' +' ?x a <urn:A> .\n' +'}'

filterNotIn

filterNotIn: string = 'IF {\n' +' ?x <urn:p> ?y . \n' +' FILTER (?x NOT IN (<urn:a>, <urn:b>))\n' +'}\n' +'THEN {\n' +' ?x a <urn:A> .\n' +'}'

propertyPath

propertyPath: string = 'IF {\n' + ' ?a :b+ :c .\n' + '} THEN {\n' + '?a :c :d }'

stardogOrCustomFunctions

stardogOrCustomFunctions: string = 'PREFIX rule: <tag:stardog:api:rule:>\n\n' +'RULE rules:LeaseTerminatedEarlyRule\n' +'IF {\n' +' ?l a ro:ActiveLease .\n' +' ?lease_event ro:lease ?l ;\n' +' a ro:LeaseEvent ;\n' +' ops:EventDate ?event_dt ;\n' +' rdfs:label ?event_name .\n' +' BIND (IF(strstarts(?event_name, "Early Termination"), "true"^^xsd:boolean, "false"^^xsd:boolean) as ?terminated_early)\n' +' # Stardog-supported function:\n' +' BIND (date(?event_dt) as ?event_date)\n' +'}\n' +'THEN {\n' +' ?l ro:terminatedEarly ?terminated_early ;\n' +' ro:earlyTerminationDate ?event_date .\n' +'}\n'

unionOptional

unionOptional: string = 'PREFIX : <http://test.com/test/0.1/>\n' +'IF {\n' +' ?x a :Product ; :price ?p .\n' +' OPTIONAL {\n' +' ?x :contains ?y \n' +' { ?y a :OnSale }\n' +' UNION \n' +' { ?y a :Discontinued }\n' +' }\n' +' OPTIONAL { ?x :producer ?producer } ' +'}\n' +'THEN {\n' +' ?x :specialPrice ?p ;\n' +' :specialItem ?y ;\n' +'}\n' +':FatherRule rdfs:comment "This rule defines fathers" ;\n' +' a :MyRule .\n' +'IF {\n' +' ?x a <http://example.org/Male> , <http://example.org/Parent> .\n' +'}\n' +'THEN {\n' +' ?x a <http://example.org/Father> .\n' +'}\n'

Generated using TypeDoc