diff --git a/3.10/administration-cluster.md b/3.10/administration-cluster.md index 1be997f0f3..c558f45c22 100644 --- a/3.10/administration-cluster.md +++ b/3.10/administration-cluster.md @@ -39,7 +39,7 @@ requested _shard_ (_numberOfShards_) within the Cluster. Example: -``` +```js 127.0.0.1:8530@_system> db._create("test", {"replicationFactor": 3}) ``` @@ -100,20 +100,20 @@ key attributes are present in the documents you send, or in case of AQL, that you use a document reference or an object for the UPDATE, REPLACE or REMOVE operation which includes the shard key attributes: -```js +```aql FOR doc IN sharded_collection FILTER doc._key == "123" UPDATE doc WITH { … } IN sharded_collection ``` -```js +```aql UPDATE { _key: "123", country: "…" } WITH { … } IN sharded_collection ``` Using a string with just the document key as key expression instead will be processed without shard hints and thus perform slower: -```js +```aql UPDATE "123" WITH { … } IN sharded_collection ``` @@ -277,13 +277,13 @@ do { This script has to be executed in the [`arangosh`](programs-arangosh.html) by issuing the following command: -``` +```bash arangosh --server.username --server.password --javascript.execute -- DBServer ``` The output should be similar to the one below: -``` +```bash arangosh --server.username root --server.password pass --javascript.execute ~./serverCleanMonitor.js -- DBServer0002 [7836] INFO Checking shard distribution every 10 seconds... [7836] INFO Shards to be moved away from node DBServer0002: 9 diff --git a/3.10/administration-configuration.md b/3.10/administration-configuration.md index 2577954448..26b81d4768 100644 --- a/3.10/administration-configuration.md +++ b/3.10/administration-configuration.md @@ -116,7 +116,7 @@ authentication = true So you see, a command line option `‑‑section.param value` can be easily translated to an option in a configuration file: -```js +```conf [section] param = value ``` diff --git a/3.10/administration-license.md b/3.10/administration-license.md index 68ac7cb237..8c3080484a 100644 --- a/3.10/administration-license.md +++ b/3.10/administration-license.md @@ -43,7 +43,7 @@ At any point you may check the current state of your license in _arangosh_: 127.0.0.1:8529@_system> db._getLicense(); ``` -```js +```json { "features": { "expires": 1632411828 @@ -51,7 +51,7 @@ At any point you may check the current state of your license in _arangosh_: "license": "JD4E ... dnDw==", "version": 1, "status": "good" - "hash" : "...." + "hash" : "..." } ``` diff --git a/3.10/administration-managing-users-in-arangosh.md b/3.10/administration-managing-users-in-arangosh.md index ff4a049eaa..c7df127cbf 100644 --- a/3.10/administration-managing-users-in-arangosh.md +++ b/3.10/administration-managing-users-in-arangosh.md @@ -23,7 +23,7 @@ This is again for backward compatibility. Fire up *arangosh* and require the users module. Use it to create a new user: -``` +```js arangosh --server.endpoint tcp://127.0.0.1:8529 ... ... > const users = require('@arangodb/users'); @@ -37,7 +37,7 @@ Note that running the command like this may store the password literally in ArangoShell's history. To avoid that, either disable the history (`--console.history false`) or use a dynamically created password, e.g.: -``` +```js > passwd = require('internal').genRandomAlphaNumbers(20); > users.save('JohnSmith', passwd); ``` @@ -49,7 +49,7 @@ While there, you probably want to change the password of the default `root` user too. Otherwise one will be able to connect with the default `root` user and its empty password. The following commands change the `root` user's password: -``` +```js > passwd = require('internal').genRandomAlphaNumbers(20); > require('@arangodb/users').update('root', passwd); ``` @@ -57,7 +57,7 @@ and its empty password. The following commands change the `root` user's password Back to our user account *JohnSmith*. Let us create a new database and grant him access to it with `grantDatabase()`: -``` +```js > db._createDatabase('testdb'); > users.grantDatabase('JohnSmith', 'testdb', 'rw'); ``` @@ -79,7 +79,7 @@ Before we can grant *JohnSmith* access to a collection, we first have to connect to the new database and create a collection. Disconnect `arangosh` by pressing Ctrl+C twice. Then reconnect, but to the database we created: -``` +```js arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database testdb ... ... > db._create('testcoll'); diff --git a/3.10/analyzers.md b/3.10/analyzers.md index 979f8df513..c0cd3311e0 100644 --- a/3.10/analyzers.md +++ b/3.10/analyzers.md @@ -860,7 +860,7 @@ attributes: removing tokens that contain non-printable characters. To encode UTF-8 strings to hex strings you can use e.g. - AQL: - ```js + ```aql FOR token IN ["and","the"] RETURN TO_HEX(token) ``` - arangosh / Node.js: @@ -1005,16 +1005,17 @@ The *properties* allowed for this Analyzer are an object with the following attr **Examples** -Create and use a `classification` Analyzer with a stored "cooking" classifier to classify items. +Create and use a `classification` Analyzer with a stored "cooking" classifier +to classify items. ```js var analyzers = require("@arangodb/analyzers"); var classifier_single = analyzers.save("classifier_single", "classification", { "model_location": "/path_to_local_fasttext_model_directory/model_cooking.bin" }, ["frequency", "norm", "position"]); var classifier_top_two = analyzers.save("classifier_double", "classification", { "model_location": "/path_to_local_fasttext_model_directory/model_cooking.bin", "top_k": 2 }, ["frequency", "norm", "position"]); -db._query(`LET str = 'Which baking dish is best to bake a banana bread ?' +db._query(`LET str = "Which baking dish is best to bake a banana bread ?" RETURN { - "all": TOKENS(str, 'classifier_single'), - "double": TOKENS(str, 'classifier_double') + "all": TOKENS(str, "classifier_single"), + "double": TOKENS(str, "classifier_double") } `); ``` @@ -1061,16 +1062,17 @@ The *properties* allowed for this Analyzer are an object with the following attr **Examples** -Create and use a `nearest_neighbors` Analyzer with a stored "cooking" classifier to find similar terms. +Create and use a `nearest_neighbors` Analyzer with a stored "cooking" classifier +to find similar terms. ```js var analyzers = require("@arangodb/analyzers"); var nn_single = analyzers.save("nn_single", "nearest_neighbors", { "model_location": "/path_to_local_fasttext_model_directory/model_cooking.bin" }, ["frequency", "norm", "position"]); var nn_top_two = analyzers.save("nn_double", "nearest_neighbors", { "model_location": "/path_to_local_fasttext_model_directory/model_cooking.bin", "top_k": 2 }, ["frequency", "norm", "position"]); -db._query(`LET str = 'salt, oil' +db._query(`LET str = "salt, oil" RETURN { - "all": TOKENS(str, 'nn_single'), - "double": TOKENS(str, 'nn_double') + "all": TOKENS(str, "nn_single"), + "double": TOKENS(str, "nn_double") } `); ``` diff --git a/3.10/appendix-glossary.md b/3.10/appendix-glossary.md index f9ccba1b8a..11bc4d4f4c 100644 --- a/3.10/appendix-glossary.md +++ b/3.10/appendix-glossary.md @@ -77,10 +77,10 @@ the application path. The filesystem layout could look like this: apps/ # the instance's application directory system/ # system applications (can be ignored) _db/ # sub-directory containing database-specific applications - / # sub-directory for a single database + / # sub-directory for a single database /APP # sub-directory for a single application /APP # sub-directory for a single application - / # sub-directory for another database + / # sub-directory for another database /APP # sub-directory for a single application ``` diff --git a/3.10/aql-tutorial-crud.md b/3.10/aql-tutorial-crud.md index b680a98868..178944e92d 100644 --- a/3.10/aql-tutorial-crud.md +++ b/3.10/aql-tutorial-crud.md @@ -33,7 +33,7 @@ run by clicking **Execute**: ![Insert query in query editor](images/Query_Insert.png) -```js +```aql INSERT { "name": "Ned", "surname": "Stark", @@ -62,7 +62,7 @@ of strings. The entire document is an object. Let's add a bunch of other characters in a single query: -```js +```aql LET data = [ { "name": "Robert", "surname": "Baratheon", "alive": false, "traits": ["A","H","C"] }, { "name": "Jaime", "surname": "Lannister", "alive": true, "age": 36, "traits": ["A","F","B"] }, @@ -121,7 +121,7 @@ literal array definition like `[ {...}, {...}, ... ]`. This variable is then used in the `INSERT` statement instead of a literal object definition. What it does is basically: -```js +```aql INSERT { "name": "Robert", "surname": "Baratheon", @@ -154,7 +154,7 @@ There are a couple of documents in the *Characters* collection by now. We can retrieve them all using a `FOR` loop again. This time however we use it to go through all documents in the collection instead of an array: -```js +```aql FOR c IN Characters RETURN c ``` @@ -191,7 +191,7 @@ attributes starting with an underscore `_` are read-only. We can use either the document key or the document ID to retrieve a specific document with the help of an AQL function `DOCUMENT()`: -```js +```aql RETURN DOCUMENT("Characters", "2861650") // --- or --- RETURN DOCUMENT("Characters/2861650") @@ -219,7 +219,7 @@ Here, `"2861650"` is the key for the *Ned Stark* document, and `"2861653"` for The `DOCUMENT()` function also allows you to fetch multiple documents at once: -```js +```aql RETURN DOCUMENT("Characters", ["2861650", "2861653"]) // --- or --- RETURN DOCUMENT(["Characters/2861650", "Characters/2861653"]) @@ -261,7 +261,7 @@ Update documents According to our *Ned Stark* document, he is alive. When we get to know that he died, we need to change the `alive` attribute. Let us modify the existing document: -```js +```aql UPDATE "2861650" WITH { alive: false } IN Characters ``` @@ -270,7 +270,7 @@ specified document with the attributes listed (or adds them if they don't exist) but leaves the rest untouched. To replace the entire document content, you may use `REPLACE` instead of `UPDATE`: -```js +```aql REPLACE "2861650" WITH { name: "Ned", surname: "Stark", @@ -282,7 +282,7 @@ REPLACE "2861650" WITH { This also works in a loop. For example, the following adds a new attribute to all documents: -```js +```aql FOR c IN Characters UPDATE c WITH { season: 1 } IN Characters ``` @@ -292,7 +292,7 @@ The query adds the `season` attribute to the documents' top level. You can inspect the result by re-running the query that returns all documents in a collection: -```js +```aql FOR c IN Characters RETURN c ``` @@ -335,13 +335,13 @@ Delete documents To fully remove documents from a collection, there is the `REMOVE` operation. It works similar to the other modification operations, yet without a `WITH` clause: -```js +```aql REMOVE "2861650" IN Characters ``` It can also be used in a loop body to effectively truncate a collection: -```js +```aql FOR c IN Characters REMOVE c IN Characters ``` diff --git a/3.10/aql-tutorial-filter.md b/3.10/aql-tutorial-filter.md index ffb10f7c05..efa8af5518 100644 --- a/3.10/aql-tutorial-filter.md +++ b/3.10/aql-tutorial-filter.md @@ -18,7 +18,7 @@ conditions for documents to match. Equality condition ------------------ -```js +```aql FOR c IN Characters FILTER c.name == "Ned" RETURN c @@ -28,7 +28,7 @@ The filter condition reads like: "the attribute *name* of a character document must be equal to the string *Ned*". If the condition applies, character document gets returned. This works with any attribute likewise: -```js +```aql FOR c IN Characters FILTER c.surname == "Stark" RETURN c @@ -41,7 +41,7 @@ Strict equality is one possible condition we can state. There are plenty of other conditions we can formulate however. For example, we could ask for all adult characters: -```js +```aql FOR c IN Characters FILTER c.age >= 13 RETURN c.name @@ -71,7 +71,7 @@ and age of all characters younger than 13 by changing the operator to *less-than* and using the object syntax to define a subset of attributes to return: -```js +```aql FOR c IN Characters FILTER c.age < 13 RETURN { name: c.name, age: c.age } @@ -99,7 +99,7 @@ Multiple conditions To not let documents pass the filter without an age attribute, we can add a second criterion: -```js +```aql FOR c IN Characters FILTER c.age < 13 FILTER c.age != null @@ -115,7 +115,7 @@ FOR c IN Characters This could equally be written with a boolean `AND` operator as: -```js +```aql FOR c IN Characters FILTER c.age < 13 AND c.age != null RETURN { name: c.name, age: c.age } @@ -129,7 +129,7 @@ Alternative conditions If you want documents to fulfill one or another condition, possibly for different attributes as well, use `OR`: -```js +```aql FOR c IN Characters FILTER c.name == "Jon" OR c.name == "Joffrey" RETURN { name: c.name, surname: c.surname } diff --git a/3.10/aql-tutorial-geospatial.md b/3.10/aql-tutorial-geospatial.md index d8240c8902..f08cb130e2 100644 --- a/3.10/aql-tutorial-geospatial.md +++ b/3.10/aql-tutorial-geospatial.md @@ -21,7 +21,7 @@ which you need to create first, then run the AQL query below: ![Create Locations collection](images/Locations_Collection_Creation.png) -```js +```aql LET places = [ { "name": "Dragonstone", "coordinate": [ 55.167801, -6.815096 ] }, { "name": "King's Landing", "coordinate": [ 42.639752, 18.110189 ] }, @@ -75,7 +75,7 @@ to restrict the number of results to at most *n* matches. In the example below, the limit is set to 3. The origin (the reference point) is a coordinate somewhere in downtown Dublin, Ireland: -```js +```aql FOR loc IN Locations LET distance = DISTANCE(loc.coordinate[0], loc.coordinate[1], 53.35, -6.25) SORT distance @@ -124,7 +124,7 @@ Find locations within radius locations within a given radius from a reference point. Remember that the unit is meters. The example uses a radius of 200,000 meters (200 kilometers): -```js +```aql FOR loc IN Locations LET distance = DISTANCE(loc.coordinate[0], loc.coordinate[1], 53.35, -6.25) SORT distance diff --git a/3.10/aql-tutorial-join.md b/3.10/aql-tutorial-join.md index efb954b6e0..b76890870a 100644 --- a/3.10/aql-tutorial-join.md +++ b/3.10/aql-tutorial-join.md @@ -112,7 +112,7 @@ Resolving traits Let's start by returning only the `traits` attribute of each character: -```js +```aql FOR c IN Characters RETURN c.traits ``` @@ -131,7 +131,7 @@ about attribute access. We can use the `traits` array together with the `DOCUMENT()` function to use the elements as document keys and look them up in the `Traits` collection: -```js +```aql FOR c IN Characters RETURN DOCUMENT("Traits", c.traits) ``` @@ -212,7 +212,7 @@ for each character. This is a bit too much information, so let's only return English labels using the [array expansion](aql/advanced-array-operators.html#array-expansion) notation: -```js +```aql FOR c IN Characters RETURN DOCUMENT("Traits", c.traits)[*].en ``` @@ -242,7 +242,7 @@ Great, we resolved the letters to meaningful traits! But we also need to know to which character they belong. Thus, we need to merge both the character document and the data from the traits documents: -```js +```aql FOR c IN Characters RETURN MERGE(c, { traits: DOCUMENT("Traits", c.traits)[*].en } ) ``` @@ -299,7 +299,7 @@ multiple collections, with a `FILTER` condition to match up attributes. In case of the traits key array, there needs to be a third loop to iterate over the keys: -```js +```aql FOR c IN Characters RETURN MERGE(c, { traits: ( diff --git a/3.10/aql-tutorial-sort-limit.md b/3.10/aql-tutorial-sort-limit.md index 79f306f068..57966e8dce 100644 --- a/3.10/aql-tutorial-sort-limit.md +++ b/3.10/aql-tutorial-sort-limit.md @@ -14,7 +14,7 @@ It may not always be necessary to return all documents, that a `FOR` loop normally returns. In those cases, we can limit the amount of documents with a `LIMIT()` operation: -```js +```aql FOR c IN Characters LIMIT 5 RETURN c.name @@ -34,7 +34,7 @@ FOR c IN Characters second syntax however, which allows you to skip a certain amount of records and return the next *n* documents: -```js +```aql FOR c IN Characters LIMIT 2, 5 RETURN c.name @@ -61,7 +61,7 @@ here was basically random. To return them in a defined order, we can add a `SORT()` operation. It can have a big impact on the result if combined with a `LIMIT()`, because the result becomes predictable if you sort first. -```js +```aql FOR c IN Characters SORT c.name LIMIT 10 @@ -86,7 +86,7 @@ FOR c IN Characters See how it sorted by name, then returned the first ten names in an alphabetical order. We can reverse the sort order with `DESC` like descending: -```js +```aql FOR c IN Characters SORT c.name DESC LIMIT 10 @@ -118,7 +118,7 @@ Assume we want to sort by surname. Many of the characters share a surname. The result order among characters with the same surname is undefined. We can first sort by surname, then name to determine the order: -```js +```aql FOR c IN Characters FILTER c.surname SORT c.surname, c.name @@ -156,7 +156,7 @@ Sort by age The order can also be determined by a numeric value, such as the age: -```js +```aql FOR c IN Characters FILTER c.age SORT c.age diff --git a/3.10/aql-tutorial-traversal.md b/3.10/aql-tutorial-traversal.md index 4d444b706c..008c70f4da 100644 --- a/3.10/aql-tutorial-traversal.md +++ b/3.10/aql-tutorial-traversal.md @@ -56,7 +56,7 @@ change the collection type to **Edge**. Then run the following query: -```js +```aql LET data = [ { "parent": { "name": "Ned", "surname": "Stark" }, @@ -126,7 +126,7 @@ FOR rel in data The character documents don't have user-defined keys. If they had, it would allow us to create the edges more easily like: -```js +```aql INSERT { _from: "Characters/robb", _to: "Characters/ned" } INTO ChildOf ``` @@ -168,7 +168,7 @@ query to find parents of a character – or in graph terms, we want to start at a vertex and follow the edges to other vertices in an [AQL graph traversal](aql/graphs-traversals.html): -```js +```aql FOR v IN 1..1 OUTBOUND "Characters/2901776" ChildOf RETURN v.name ``` @@ -187,7 +187,7 @@ child we start at. `"Characters/2901776"` is that start vertex. Note that the document ID is different for you, so please adjust it to your document ID of e.g. the Bran Stark document: -```js +```aql FOR c IN Characters FILTER c.name == "Bran" RETURN c._id @@ -200,7 +200,7 @@ FOR c IN Characters You may also combine this query with the traversal directly to easily change the start vertex by adjusting the filter condition(s): -```js +```aql FOR c IN Characters FILTER c.name == "Bran" FOR v IN 1..1 OUTBOUND c ChildOf @@ -226,7 +226,7 @@ Traverse to the children We can also walk from a parent in the reverse edge direction (`INBOUND`) to the children: -```js +```aql FOR c IN Characters FILTER c.name == "Ned" FOR v IN 1..1 INBOUND c ChildOf @@ -250,7 +250,7 @@ For the Lannister family, we have relations that span from a parent to a grandchild. Let's change the traversal depth to return grandchildren, which means to go exactly two steps: -```js +```aql FOR c IN Characters FILTER c.name == "Tywin" FOR v IN 2..2 INBOUND c ChildOf @@ -292,7 +292,7 @@ To return the parents and grandparents of Joffrey, we can walk edges in the `OUTBOUND` direction and adjust the traversal depth to go at least 1 step, and 2 at most: -```js +```aql FOR c IN Characters FILTER c.name == "Joffrey" FOR v IN 1..2 OUTBOUND c ChildOf diff --git a/3.10/aql/advanced-array-operators.md b/3.10/aql/advanced-array-operators.md index 105dcef9ee..0f4dafcb73 100644 --- a/3.10/aql/advanced-array-operators.md +++ b/3.10/aql/advanced-array-operators.md @@ -53,7 +53,7 @@ example *users* documents: With the `[*]` operator it becomes easy to query just the names of the friends for each user: -``` +```aql FOR u IN users RETURN { name: u.name, friends: u.friends[*].name } ``` @@ -70,7 +70,7 @@ This will produce: This is a shortcut for the longer, semantically equivalent query: -```js +```aql FOR u IN users RETURN { name: u.name, friends: (FOR f IN u.friends RETURN f.name) } ``` @@ -90,7 +90,7 @@ so on. Let's compare the array expansion operator with an array contraction operator. For example, the following query produces an array of friend names per user: -```js +```aql FOR u IN users RETURN u.friends[*].name ``` @@ -122,7 +122,7 @@ the `[**]` can be used if it has access to a multi-dimensional nested result. We can extend above query as follows and still create the same nested result: -```js +```aql RETURN ( FOR u IN users RETURN u.friends[*].name ) @@ -130,7 +130,7 @@ RETURN ( By now appending the `[**]` operator at the end of the query... -```js +```aql RETURN ( FOR u IN users RETURN u.friends[*].name )[**] @@ -167,11 +167,11 @@ These inline expressions can follow array expansion and contraction operators `[* ...]`, `[** ...]` etc. The keywords `FILTER`, `LIMIT` and `RETURN` must occur in this order if they are used in combination, and can only occur once: -`anyArray[* FILTER conditions LIMIT skip,limit RETURN projection]` +anyArray[* FILTER conditions LIMIT skip,limit RETURN projection] Example with nested numbers and array contraction: -```js +```aql LET arr = [ [ 1, 2 ], 3, [ 4, 5 ], 6 ] RETURN arr[** FILTER CURRENT % 2 == 0] ``` @@ -186,7 +186,7 @@ All even numbers are returned in a flat array: Complex example with multiple conditions, limit and projection: -```js +```aql FOR u IN users RETURN { name: u.name, @@ -227,7 +227,7 @@ older than 40 years are returned per user: To return only the names of friends that have an *age* value higher than the user herself, an inline `FILTER` can be used: -```js +```aql FOR u IN users RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name } ``` @@ -242,7 +242,7 @@ The number of elements returned can be restricted with `LIMIT`. It works the sam as the [limit operation](operations-limit.html). `LIMIT` must come after `FILTER` and before `RETURN`, if they are present. -```js +```aql FOR u IN users RETURN { name: u.name, friends: u.friends[* LIMIT 1].name } ``` @@ -259,7 +259,7 @@ Above example returns one friend each: A number of elements can also be skipped and up to *n* returned: -```js +```aql FOR u IN users RETURN { name: u.name, friends: u.friends[* LIMIT 1,2].name } ``` @@ -280,7 +280,7 @@ per user: To return a projection of the current element, use `RETURN`. If a `FILTER` is also present, `RETURN` must come later. -```js +```aql FOR u IN users RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)] ``` diff --git a/3.10/aql/common-errors.md b/3.10/aql/common-errors.md index 0999db48f1..4625ae06c2 100644 --- a/3.10/aql/common-errors.md +++ b/3.10/aql/common-errors.md @@ -25,7 +25,7 @@ In AQL, strings must be concatenated using the [CONCAT()](functions-string.html# function. Joining them together with the `+` operator is not supported. Especially as JavaScript programmer it is easy to walk into this trap: -```js +```aql RETURN "foo" + "bar" // [ 0 ] RETURN "foo" + 123 // [ 123 ] RETURN "123" + 200 // [ 323 ] @@ -40,7 +40,7 @@ valid string representation of a number, then it is casted to a number. Thus, ad To concatenate elements (with implicit casting to string for non-string values), do: -```js +```aql RETURN CONCAT("foo", "bar") // [ "foobar" ] RETURN CONCAT("foo", 123) // [ "foo123" ] RETURN CONCAT("123", 200) // [ "123200" ] @@ -128,7 +128,7 @@ If an attacker inserted `\` for parameter `value` and ` || true REMOVE doc IN collection //` for parameter `type`, then the effective query would become: -```js +```aql FOR doc IN collection FILTER doc.value == '\' && doc.type == ' || true REMOVE doc IN collection //' RETURN doc @@ -156,7 +156,7 @@ shouldn't be used. They were simply omitted here for the sake of simplicity. Bind parameters in AQL queries are special tokens that act as placeholders for actual values. Here's an example: -```js +```aql FOR doc IN collection FILTER doc.value == @what RETURN doc @@ -191,7 +191,7 @@ If a malicious user would set `@what` to a value of `1 || true`, this wouldn't d any harm. AQL would treat the contents of `@what` as a single string token, and the meaning of the query would remain unchanged. The actually executed query would be: -``` +```aql FOR doc IN collection FILTER doc.value == "1 || true" RETURN doc @@ -366,13 +366,13 @@ it is intended. You should also see a warning if you execute such a query: For example, instead of: -```js +```aql RETURN coll[* LIMIT 1] ``` ... with the execution plan ... -``` +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -382,7 +382,7 @@ Execution plan: ... you can use the following equivalent query: -```js +```aql FOR doc IN coll LIMIT 1 RETURN doc @@ -390,7 +390,7 @@ FOR doc IN coll ... with the (better) execution plan: -``` +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -402,7 +402,7 @@ Execution plan: Similarly, make sure you have not confused any variable names with collection names by accident: -```js +```aql LET names = ["John", "Mary", ...] // supposed to refer to variable "names", not collection "Names" FOR name IN Names diff --git a/3.10/aql/data-queries.md b/3.10/aql/data-queries.md index 4075e66464..899004e696 100644 --- a/3.10/aql/data-queries.md +++ b/3.10/aql/data-queries.md @@ -16,7 +16,7 @@ Data Access Queries Retrieving data from the database with AQL does always include a **RETURN** operation. It can be used to return a static value, such as a string: -```js +```aql RETURN "Hello ArangoDB!" ``` @@ -26,7 +26,7 @@ returned and contains a single element in that case: `["Hello ArangoDB!"]` The function `DOCUMENT()` can be called to retrieve a single document via its document handle, for instance: -```js +```aql RETURN DOCUMENT("users/phil") ``` @@ -35,14 +35,14 @@ documents of a collection. The following query executes the loop body for all documents of a collection called *users*. Each document is returned unchanged in this example: -```js +```aql FOR doc IN users RETURN doc ``` Instead of returning the raw `doc`, one can easily create a projection: -```js +```aql FOR doc IN users RETURN { user: doc, newAttribute: true } ``` @@ -55,7 +55,7 @@ Operations like **FILTER**, **SORT** and **LIMIT** can be added to the loop body to narrow and order the result. Instead of above shown call to `DOCUMENT()`, one can also retrieve the document that describes user *phil* like so: -```js +```aql FOR doc IN users FILTER doc._key == "phil" RETURN doc @@ -67,7 +67,7 @@ more than a single document will match this filter. For other attributes this may not be the case. To return a subset of active users (determined by an attribute called *status*), sorted by name in ascending order, you can do: -```js +```aql FOR doc IN users FILTER doc.status == "active" SORT doc.name @@ -103,7 +103,7 @@ The operations are detailed in the chapter [High Level Operations](operations.ht Let's start with the basics: `INSERT`, `UPDATE` and `REMOVE` operations on single documents. Here is an example that insert a document in an existing collection *users*: -```js +```aql INSERT { firstName: "Anna", name: "Pavlova", @@ -118,7 +118,7 @@ result, the above query still creates a new user document. You may provide a key for the new document; if not provided, ArangoDB creates one for you. -```js +```aql INSERT { _key: "GilbertoGil", firstName: "Gilberto", @@ -129,7 +129,7 @@ INSERT { As ArangoDB is schema-free, attributes of the documents may vary: -```js +```aql INSERT { _key: "PhilCarpenter", firstName: "Phil", @@ -139,7 +139,7 @@ INSERT { } IN users ``` -```js +```aql INSERT { _key: "NatachaDeclerck", firstName: "Natacha", @@ -150,7 +150,7 @@ INSERT { Update is quite simple. The following AQL statement will add or change the attributes status and location -```js +```aql UPDATE "PhilCarpenter" WITH { status: "active", location: "Beijing" @@ -159,7 +159,7 @@ UPDATE "PhilCarpenter" WITH { Replace is an alternative to update where all attributes of the document are replaced. -```js +```aql REPLACE { _key: "NatachaDeclerck", firstName: "Natacha", @@ -171,13 +171,13 @@ REPLACE { Removing a document if you know its key is simple as well : -```js +```aql REMOVE "GilbertoGil" IN users ``` or -```js +```aql REMOVE { _key: "GilbertoGil" } IN users ``` @@ -190,7 +190,7 @@ iterate over a given list of documents. They can optionally be combined with Let's start with an example that modifies existing documents in a collection *users* that match some condition: -```js +```aql FOR u IN users FILTER u.status == "not active" UPDATE u WITH { status: "inactive" } IN users @@ -200,7 +200,7 @@ FOR u IN users Now, let's copy the contents of the collection *users* into the collection *backup*: -```js +```aql FOR u IN users INSERT u IN backup ``` @@ -209,7 +209,7 @@ Subsequently, let's find some documents in collection *users* and remove them from collection *backup*. The link between the documents in both collections is established via the documents' keys: -```js +```aql FOR u IN users FILTER u.status == "deleted" REMOVE u IN backup @@ -217,7 +217,7 @@ FOR u IN users The following example will remove all documents from both *users* and *backup*: -```js +```aql LET r1 = (FOR u IN users REMOVE u IN users) LET r2 = (FOR u IN backup REMOVE u IN backup) RETURN true @@ -229,20 +229,20 @@ Data-modification queries can optionally return documents. In order to reference the inserted, removed or modified documents in a `RETURN` statement, data-modification statements introduce the `OLD` and/or `NEW` pseudo-values: -```js +```aql FOR i IN 1..100 INSERT { value: i } IN test RETURN NEW ``` -```js +```aql FOR u IN users FILTER u.status == "deleted" REMOVE u IN users RETURN OLD ``` -```js +```aql FOR u IN users FILTER u.status == "not active" UPDATE u WITH { status: "inactive" } IN users @@ -266,7 +266,7 @@ by queries. For example, the following query will return only the keys of the inserted documents: -```js +```aql FOR i IN 1..100 INSERT { value: i } IN test RETURN NEW._key @@ -277,7 +277,7 @@ FOR i IN 1..100 For `UPDATE`, `REPLACE` and `UPSERT` statements, both `OLD` and `NEW` can be used to return the previous revision of a document together with the updated revision: -```js +```aql FOR u IN users FILTER u.status == "not active" UPDATE u WITH { status: "inactive" } IN users @@ -293,7 +293,7 @@ updated, or a new document was inserted. It does so by checking the `OLD` variab after the `UPSERT` and using a `LET` statement to store a temporary string for the operation type: -```js +```aql UPSERT { name: "test" } INSERT { name: "test" } UPDATE { } IN users diff --git a/3.10/aql/examples-combining-queries.md b/3.10/aql/examples-combining-queries.md index 7ec832522a..4998525a10 100644 --- a/3.10/aql/examples-combining-queries.md +++ b/3.10/aql/examples-combining-queries.md @@ -13,7 +13,7 @@ variables and values in its outer scope(s). It is required that subqueries be put inside parentheses `(` and `)` to explicitly mark their start and end points: -```js +```aql FOR p IN persons LET recommendations = ( // subquery start FOR r IN recommendations @@ -33,7 +33,7 @@ Function calls also use parentheses and AQL allows you to omit an extra pair if you want to use a subquery as sole argument for a function, e.g. `MAX()` instead of `MAX(())`: -```js +```aql FOR p IN persons COLLECT city = p.city INTO g RETURN { @@ -56,7 +56,7 @@ Subqueries may also include other subqueries. Subqueries always return a result **array**, even if there is only a single return value: -```js +```aql RETURN ( RETURN 1 ) ``` @@ -67,7 +67,7 @@ RETURN ( RETURN 1 ) To avoid such a nested data structure, [FIRST()](functions-array.html#first) can be used for example: -```js +```aql RETURN FIRST( RETURN 1 ) ``` @@ -78,7 +78,7 @@ RETURN FIRST( RETURN 1 ) To unwind the result array of a subquery so that each element is returned as top-level element in the overall query result, you can use a `FOR` loop: -```js +```aql FOR elem IN (RETURN 1..3) // [1,2,3] RETURN elem ``` @@ -103,14 +103,14 @@ participate in lazy evaluation of operands, for example in the Consider the following query: -```js +```aql RETURN RAND() > 0.5 ? (RETURN 1) : 0 ``` It get transformed into something more like this, with the calculation of the subquery happening before the evaluation of the condition: -```js +```aql LET temp1 = (RETURN 1) LET temp2 = RAND() > 0.5 ? temp1 : 0 RETURN temp2 @@ -124,7 +124,7 @@ avoid query errors like > Query: AQL: collection or array expected as operand to FOR loop; you provided > a value of type 'null' (while executing) -```js +```aql LET maybe = DOCUMENT("coll/does_not_exist") LET dependent = maybe ? ( FOR attr IN ATTRIBUTES(maybe) @@ -139,7 +139,7 @@ account that `maybe` can be `null`, which cannot be iterated over with `FOR`. A possible solution is to fall back to an empty array in the subquery to effectively prevent the loop body from being run: -```js +```aql LET maybe = DOCUMENT("coll/does_not_exist") LET dependent = maybe ? ( FOR attr IN NOT_NULL(ATTRIBUTES(maybe || {}), []) diff --git a/3.10/aql/examples-counting.md b/3.10/aql/examples-counting.md index 980a1b5919..266768b67a 100644 --- a/3.10/aql/examples-counting.md +++ b/3.10/aql/examples-counting.md @@ -11,7 +11,7 @@ Amount of documents in a collection To return the count of documents that currently exist in a collection, you can call the [LENGTH() function](functions-array.html#length): -``` +```aql RETURN LENGTH(collection) ``` @@ -22,7 +22,7 @@ Internally, [COLLECTION_COUNT()](functions-miscellaneous.html#collection_count) In earlier versions with `COLLECT ... WITH COUNT INTO` available (since 2.4), you may use the following code instead of *LENGTH()* for better performance: -``` +```aql FOR doc IN collection COLLECT WITH COUNT INTO length RETURN length diff --git a/3.10/aql/examples-create-test-data.md b/3.10/aql/examples-create-test-data.md index a05165e54c..39fa41d83f 100644 --- a/3.10/aql/examples-create-test-data.md +++ b/3.10/aql/examples-create-test-data.md @@ -17,7 +17,7 @@ query that iterates over a range. Run the following AQL query e.g. from the _AQL Editor_ in the web interface to insert 1,000 documents into the collection: -```js +```aql FOR i IN 1..1000 INSERT { name: CONCAT("test", i) } IN myCollection ``` @@ -33,7 +33,7 @@ a `status` attribute, and fill it with integer values between `1` to `5` (inclusive), with equal distribution. A good way to achieve this is to use the modulo operator (`%`): -```js +```aql FOR i IN 1..1000 INSERT { name: CONCAT("test", i), @@ -48,7 +48,7 @@ numbers, and `FLOOR()` to convert the scaled number back to an integer. For example, the following query populates the `value` attribute with numbers between 100 and 150 (inclusive): -```js +```aql FOR i IN 1..1000 INSERT { name: CONCAT("test", i), @@ -60,7 +60,7 @@ After the test data has been created, it is often helpful to verify it. The `RAND()` function is also a good candidate for retrieving a random sample of the documents in the collection. This query will retrieve 10 random documents: -```js +```aql FOR doc IN myCollection SORT RAND() LIMIT 10 @@ -71,7 +71,7 @@ The `COLLECT` clause is an easy mechanism to run an aggregate analysis on some attribute. Let us say we wanted to verify the data distribution inside the `status` attribute. In this case we could run: -```js +```aql FOR doc IN myCollection COLLECT value = doc.value WITH COUNT INTO count RETURN { @@ -87,7 +87,7 @@ key, the count as attribute value and merge everything into a single result object. Note that attribute keys can only be strings, but for our purposes here it is acceptable. -```js +```aql RETURN MERGE( FOR doc IN myCollection COLLECT value = doc.value WITH COUNT INTO count diff --git a/3.10/aql/examples-data-modification-queries.md b/3.10/aql/examples-data-modification-queries.md index a159ffb94f..72f56382b4 100644 --- a/3.10/aql/examples-data-modification-queries.md +++ b/3.10/aql/examples-data-modification-queries.md @@ -21,7 +21,7 @@ and `REPLACE` completely replaces the found documents with the specified values. We'll start with an `UPDATE` query that rewrites the gender attribute in all documents: -```js +```aql FOR u IN users UPDATE u WITH { gender: TRANSLATE(u.gender, { m: 'male', f: 'female' }) } IN users ``` @@ -30,7 +30,7 @@ To add new attributes to existing documents, we can also use an `UPDATE` query. The following query adds an attribute *numberOfLogins* for all users with status active: -```js +```aql FOR u IN users FILTER u.active == true UPDATE u WITH { numberOfLogins: 0 } IN users @@ -38,7 +38,7 @@ FOR u IN users Existing attributes can also be updated based on their previous value: -```js +```aql FOR u IN users FILTER u.active == true UPDATE u WITH { numberOfLogins: u.numberOfLogins + 1 } IN users @@ -48,7 +48,7 @@ The above query will only work if there was already a *numberOfLogins* attribute present in the document. If it is unsure whether there is a *numberOfLogins* attribute in the document, the increase must be made conditional: -```js +```aql FOR u IN users FILTER u.active == true UPDATE u WITH { @@ -58,7 +58,7 @@ FOR u IN users Updates of multiple attributes can be combined in a single query: -```js +```aql FOR u IN users FILTER u.active == true UPDATE u WITH { @@ -83,7 +83,7 @@ the documents found in collection users. Documents common to both collections will be replaced. All other documents will remain unchanged. Documents are compared using their *_key* attributes: -```js +```aql FOR u IN users REPLACE u IN backup ``` @@ -96,7 +96,7 @@ also be rolled back. To make the query succeed for such case, use the *ignoreErrors* query option: -```js +```aql FOR u IN users REPLACE u IN backup OPTIONS { ignoreErrors: true } ``` @@ -108,7 +108,7 @@ Removing documents Deleting documents can be achieved with the `REMOVE` operation. To remove all users within a certain age range, we can use the following query: -```js +```aql FOR u IN users FILTER u.active == true && u.age >= 35 && u.age <= 37 REMOVE u IN users @@ -123,7 +123,7 @@ It can also be used to generate copies of existing documents from other collecti or to create synthetic documents (e.g. for testing purposes). The following query creates 1000 test users in collection users with some attributes set: -```js +```aql FOR i IN 1..1000 INSERT { id: 100000 + i, @@ -141,7 +141,7 @@ Copying data from one collection into another To copy data from one collection into another, an `INSERT` operation can be used: -```js +```aql FOR u IN users INSERT u IN backup ``` @@ -167,7 +167,7 @@ query in case of errors, there is the *ignoreErrors* option. To use it, place an *OPTIONS* keyword directly after the data modification part of the query, e.g. -```js +```aql FOR u IN users REPLACE u IN backup OPTIONS { ignoreErrors: true } ``` @@ -208,7 +208,7 @@ database.save({ Heres the Query which keeps the *subList* on *alteredList* to update it later: -```js +```aql FOR document in complexCollection LET alteredList = ( FOR element IN document.subList @@ -252,7 +252,7 @@ all documents in the collection **regardless whether the values change or not**. Therefore we want to only `UPDATE` the documents if we really change their value. Hence we employ a second `FOR` to test whether *subList* will be altered or not: -```js +```aql FOR document in complexCollection LET willUpdateDocument = ( FOR element IN docToAlter.subList diff --git a/3.10/aql/examples-diffing-documents.md b/3.10/aql/examples-diffing-documents.md index 4326c417bc..60b34bea94 100644 --- a/3.10/aql/examples-diffing-documents.md +++ b/3.10/aql/examples-diffing-documents.md @@ -11,7 +11,7 @@ Diffing Two Documents in AQL There is no built-in AQL function to compare the attributes of two documents, but it is easily possible to build a query that does: -```js +```aql // input document 1 LET doc1 = { "foo": "bar", diff --git a/3.10/aql/examples-dynamic-attribute-names.md b/3.10/aql/examples-dynamic-attribute-names.md index 5572bda60c..3ecf0bb6fa 100644 --- a/3.10/aql/examples-dynamic-attribute-names.md +++ b/3.10/aql/examples-dynamic-attribute-names.md @@ -38,7 +38,7 @@ For this we also need attribute name expressions. Here is a query showing how to do this. The attribute name expressions all required to be enclosed in `[` and `]` in order to make this work: -```js +```aql LET documents = [ { "_key" : "3231748397810", "gender" : "f", "status" : "active", "type" : "user" }, { "_key" : "3231754427122", "gender" : "m", "status" : "inactive", "type" : "unknown" } @@ -95,7 +95,7 @@ values. To extract the attribute names and values from the original documents, we can use a subquery as follows: -```js +```aql LET documents = [ { "name": "test"," gender": "f", "status": "active", "type": "user" }, { "name": "dummy", "gender": "m", "status": "inactive", "type": "unknown", "magicFlag": 23 } @@ -127,7 +127,7 @@ Instead of directly returning the subquery result, we first capture it in a variable, and pass the variable's `name` and `value` components into `ZIP()` like this: -```js +```aql LET documents = [ { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" }, { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 } @@ -169,7 +169,7 @@ As can be seen, the two results have a different amount of result attributes. We can also make the result a bit more dynamic by prefixing each attribute with the value of the `name` attribute: -```js +```aql LET documents = [ { "name": "test"," gender": "f", "status": "active", "type": "user" }, { "name": "dummy", "gender": "m", "status": "inactive", "type": "unknown", "magicFlag": 23 } diff --git a/3.10/aql/examples-grouping.md b/3.10/aql/examples-grouping.md index ad08ad8d73..1fbeea6f31 100644 --- a/3.10/aql/examples-grouping.md +++ b/3.10/aql/examples-grouping.md @@ -15,7 +15,7 @@ Ensuring uniqueness `COLLECT` can be used to make a result set unique. The following query will return each distinct `age` attribute value only once: -```js +```aql FOR u IN users COLLECT age = u.age RETURN age @@ -25,7 +25,7 @@ This is grouping without tracking the group values, but just the group criterion Grouping can also be done on multiple levels using `COLLECT`: -```js +```aql FOR u IN users COLLECT status = u.status, age = u.age RETURN { status, age } @@ -34,7 +34,7 @@ FOR u IN users Alternatively `RETURN DISTINCT` can be used to make a result set unique. `RETURN DISTINCT` supports a single criterion only: -```js +```aql FOR u IN users RETURN DISTINCT u.age ``` @@ -49,7 +49,7 @@ Fetching group values To group users by age, and return the names of the users with the highest ages, we'll issue a query like this: -```js +```aql FOR u IN users FILTER u.active == true COLLECT age = u.age INTO usersByAge @@ -82,14 +82,14 @@ The *usersByAge* variable contains the full documents found, and as we're only interested in user names, we'll use the expansion operator `[*]` to extract just the *name* attribute of all user documents in each group: -```js +```aql usersByAge[*].u.name ``` The `[*]` expansion operator is just a handy short-cut. We could also write a subquery: -```js +```aql ( FOR temp IN usersByAge RETURN temp.u.name ) ``` @@ -100,7 +100,7 @@ To group by multiple criteria, we'll use multiple arguments in the `COLLECT` cla For example, to group users by *ageGroup* (a derived value we need to calculate first) and then by *gender*, we'll do: -```js +```aql FOR u IN users FILTER u.active == true COLLECT ageGroup = FLOOR(u.age / 5) * 5, @@ -130,7 +130,7 @@ If the goal is to count the number of values in each group, AQL provides the spe *COLLECT WITH COUNT INTO* syntax. This is a simple variant for grouping with an additional group length calculation: -```js +```aql FOR u IN users FILTER u.active == true COLLECT ageGroup = FLOOR(u.age / 5) * 5, @@ -160,7 +160,7 @@ Aggregation Adding further aggregation is also simple in AQL by using an `AGGREGATE` clause in the `COLLECT`: -```js +```aql FOR u IN users FILTER u.active == true COLLECT ageGroup = FLOOR(u.age / 5) * 5, @@ -222,7 +222,7 @@ The same query as before can be turned into a post-aggregation query as shown be that this query will build and pass on all group values for all groups inside the variable *g*, and perform the aggregation at the latest possible stage: -```js +```aql FOR u IN users FILTER u.active == true COLLECT ageGroup = FLOOR(u.age / 5) * 5, @@ -269,7 +269,7 @@ statement. For example, to get the 3 *ageGroup*s with the most users in them: -```js +```aql FOR u IN users FILTER u.active == true COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO group diff --git a/3.10/aql/examples-join.md b/3.10/aql/examples-join.md index 54ec9982b6..18fe6c579e 100644 --- a/3.10/aql/examples-join.md +++ b/3.10/aql/examples-join.md @@ -709,7 +709,7 @@ can be returned in a horizontal list. This will return each user at most once. The AQL query for doing so is: -```js +```aql FOR u IN users FILTER u.active == true LIMIT 0, 4 RETURN { @@ -764,7 +764,7 @@ list of related users. To not only return friend ids but also the names of friends, we could "join" the *users* collection once more (something like a "self join"): -```js +```aql FOR u IN users FILTER u.active == true LIMIT 0, 4 @@ -822,7 +822,7 @@ users collection. Lets find the lonely people in our database - those without friends. -```js +```aql FOR user IN users LET friendList = ( @@ -866,7 +866,7 @@ Since we're free of schemata, there is by default no way to tell the format of t documents. So, if your documents don't contain an attribute, it defaults to null. We can however check our data for accuracy like this: -```js +```aql RETURN LENGTH(FOR u IN users FILTER u.userId == null RETURN 1) ``` @@ -876,7 +876,7 @@ RETURN LENGTH(FOR u IN users FILTER u.userId == null RETURN 1) ] ``` -```js +```aql RETURN LENGTH(FOR f IN relations FILTER f.friendOf == null RETURN 1) ``` diff --git a/3.10/aql/examples-projections-and-filters.md b/3.10/aql/examples-projections-and-filters.md index 0884b234e1..8b299e9915 100644 --- a/3.10/aql/examples-projections-and-filters.md +++ b/3.10/aql/examples-projections-and-filters.md @@ -10,7 +10,7 @@ Returning unaltered documents To return three complete documents from collection *users*, the following query can be used: -```js +```aql FOR u IN users LIMIT 0, 3 RETURN u @@ -60,7 +60,7 @@ Projections To return a projection from the collection *users* use a modified `RETURN` instruction: -```js +```aql FOR u IN users LIMIT 0, 3 RETURN { @@ -101,7 +101,7 @@ To return a filtered projection from collection *users*, you can use the `FILTER` keyword. Additionally, a `SORT` clause is used to have the result returned in a specific order: -```js +```aql FOR u IN users FILTER u.active == true && u.age >= 30 SORT u.age DESC diff --git a/3.10/aql/examples-upsert-repsert.md b/3.10/aql/examples-upsert-repsert.md index caa96f4867..983b3ab0fb 100644 --- a/3.10/aql/examples-upsert-repsert.md +++ b/3.10/aql/examples-upsert-repsert.md @@ -50,7 +50,7 @@ replace it (`REPLACE`). To recap, the syntaxes of AQL `UPSERT` are, depending on whether you want to update replace a document: -```js +```aql UPSERT INSERT UPDATE @@ -59,7 +59,7 @@ IN OPTIONS or -```js +```aql UPSERT INSERT REPLACE @@ -70,7 +70,7 @@ The `OPTIONS` part is optional. An example `UPSERT` operation looks like this: -```js +```aql UPSERT { page: "index.html" } INSERT { page: "index.html", status: "inserted" } UPDATE { status: "updated" } @@ -90,7 +90,7 @@ The `UPSERT` AQL operation is sometimes used in combination with date/time-keeping. For example, the following query keeps track of when a document was first created, and when it was last updated: -```js +```aql UPSERT { page: "index.html" } INSERT { page: "index.html", created: DATE_NOW() } UPDATE { updated: DATE_NOW() } @@ -104,7 +104,7 @@ to the existing document and its values in the `UPDATE`/`REPLACE` part. Following is an example that increments a counter on a document whenever the `UPSERT` operation is executed: -```js +```aql UPSERT { page: "index.html" } INSERT { page: "index.html", hits: 1 } UPDATE { hits: OLD.value + 1 } @@ -122,7 +122,7 @@ First of all, the `INSERT` part of an `UPSERT` operation should contain all attributes that are used in the search expression. Consider the following counter-example: -```js +```aql UPSERT { page: "index.html" } INSERT { status: "inserted" } /* page attribute missing here! */ UPDATE { status: "updated" } @@ -142,7 +142,7 @@ unintentional. The problem can easily be avoided by adding the search attributes to the `INSERT` part: -```js +```aql UPSERT { page: "index.html" } INSERT { page: "index.html", status: "inserted" } UPDATE { status: "updated" } @@ -158,7 +158,7 @@ what is specified in the `REPLACE` part. That means when using the `REPLACE` operation, the query should look like: -```js +```aql UPSERT { page: "index.html" } INSERT { page: "index.html", status: "inserted" } REPLACE { page: "index.html", status: "updated" } @@ -220,7 +220,7 @@ when the operation is executed, and none of the old values need to be referenced The general syntax of the `INSERT` AQL operation is: -```js +```aql INSERT IN OPTIONS ``` @@ -228,7 +228,7 @@ IN OPTIONS As we will deal with the `overwriteMode` option here, we are focussing on `INSERT` operations with this option set, for example: -```js +```aql INSERT { _key: "index.html", status: "created" } IN pages OPTIONS { overwriteMode: "ignore" } ``` @@ -282,7 +282,7 @@ previous version of the document in case the document is already present. This can be achieved by appending a `RETURN OLD` to the `INSERT` operation, e.g. -```js +```aql INSERT { _key: "index.html", status: "created" } IN pages OPTIONS { overwriteMode: "replace" } RETURN OLD @@ -292,7 +292,7 @@ It is also possible to return the new version of the document (the inserted document if no previous document existed, or the updated/replaced version in case a document already existed) by using `RETURN NEW`: -```js +```aql INSERT { _key: "index.html", status: "created" } IN pages OPTIONS { overwriteMode: "replace" } RETURN NEW diff --git a/3.10/aql/execution-and-performance-optimizer.md b/3.10/aql/execution-and-performance-optimizer.md index 5a80b027f3..bd0663c1d2 100644 --- a/3.10/aql/execution-and-performance-optimizer.md +++ b/3.10/aql/execution-and-performance-optimizer.md @@ -338,7 +338,7 @@ Using a cluster, there is a *Site* column if you explain a query. Snippets marked with **DBS** are executed on DB-Servers, **COOR** ones are executed on the respective Coordinator. -``` +```aql Query String (57 chars, cacheable: false): FOR doc IN test UPDATE doc WITH { updated: true } IN test diff --git a/3.10/aql/execution-and-performance-query-cache.md b/3.10/aql/execution-and-performance-query-cache.md index d33956e064..e3518df571 100644 --- a/3.10/aql/execution-and-performance-query-cache.md +++ b/3.10/aql/execution-and-performance-query-cache.md @@ -96,7 +96,7 @@ If the result of the following query is present in the query results cache, then either modifying data in collection `users` or in collection `organizations` will remove the already computed result from the cache: -``` +```aql FOR user IN users FOR organization IN organizations FILTER user.organization == organization._key @@ -144,7 +144,7 @@ above. After the server is started, the cache mode can be changed at runtime as follows: -``` +```js require("@arangodb/aql/cache").properties({ mode: "on" }); ``` @@ -161,7 +161,7 @@ results in each database's query cache and thus restrict the cache's memory cons These value can also be adjusted at runtime as follows: -``` +```js require("@arangodb/aql/cache").properties({ maxResults: 200, maxResultsSize: 8 * 1024 * 1024, @@ -188,7 +188,7 @@ When the query cache mode is `off`, the executor will not look for the query in The `cache` attribute can be set as follows via the `db._createStatement()` function: -``` +```js var stmt = db._createStatement({ query: "FOR doc IN users LIMIT 5 RETURN doc", cache: true /* cache attribute set here */ @@ -199,7 +199,7 @@ stmt.execute(); When using the `db._query()` function, the `cache` attribute can be set as follows: -``` +```js db._query({ query: "FOR doc IN users LIMIT 5 RETURN doc", cache: true /* cache attribute set here */ @@ -219,7 +219,7 @@ Query results cache inspection The contents of the query results cache can be checked at runtime using the cache's `toArray()` function: -``` +```js require("@arangodb/aql/cache").toArray(); ``` @@ -229,7 +229,7 @@ results cache. The query results cache for the current database can be cleared at runtime using the cache's `clear` function: -``` +```js require("@arangodb/aql/cache").clear(); ``` diff --git a/3.10/aql/execution-and-performance-query-profiler.md b/3.10/aql/execution-and-performance-query-profiler.md index 74d052fa44..17dcce1b15 100644 --- a/3.10/aql/execution-and-performance-query-profiler.md +++ b/3.10/aql/execution-and-performance-query-profiler.md @@ -191,7 +191,7 @@ mistakes that we see quite often: Bad example: -```js +```aql LET vertices = ( FOR v IN 1..2 ANY @startVertex GRAPH 'my_graph' // <-- add a LIMIT 1 here @@ -217,7 +217,7 @@ edge in _pruchased_ to zero or more _products_. If we want to know all users that have purchased the product _playstation_ as well as produts of `type` _legwarmer_ we could use this query: -```js +```aql FOR prod IN products FILTER prod.type == 'legwarmer' FOR v,e,p IN 2..2 OUTBOUND prod purchased @@ -230,7 +230,7 @@ for each of them. But we could also inverse the traversal by starting of with the known _playstation_ product. This way we only need a single traversal to achieve the same result: -```js +```aql FOR v,e,p IN 2..2 OUTBOUND 'product/playstation' purchased FILTER v.type == 'legwarmer' // <-- last vertex of the path RETURN p.vertices[1] // <-- the user diff --git a/3.10/aql/extending-conventions.md b/3.10/aql/extending-conventions.md index 1843d058a2..b1fa6f894d 100644 --- a/3.10/aql/extending-conventions.md +++ b/3.10/aql/extending-conventions.md @@ -18,7 +18,7 @@ qualified to also include the user-defined namespace. The `::` symbol is used as the namespace separator. Users can create a multi-level hierarchy of function groups if required: -```js +```aql MYGROUP::MYFUNC() MYFUNCTIONS::MATH::RANDOM() ``` diff --git a/3.10/aql/extending-functions.md b/3.10/aql/extending-functions.md index 1051c3397d..4267a29075 100644 --- a/3.10/aql/extending-functions.md +++ b/3.10/aql/extending-functions.md @@ -49,7 +49,7 @@ module.exports = greeting; Then require it in the shell in order to register a user-defined function: -``` +```js arangosh> var func = require("path/to/file.js"); arangosh> aqlfunctions.register("HUMAN::GREETING", func, true); ``` @@ -134,7 +134,7 @@ In the example above, `%s` is replaced by `this.name` (the AQL function name), and both `%d` placeholders by `1` (number of expected arguments). If you call the function without an argument, you will see this: -``` +```js arangosh> db._query("RETURN MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()") [object ArangoQueryCursor, count: 1, hasMore: false, warning: 1541 - invalid number of arguments for function 'MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()', diff --git a/3.10/aql/functions-arangosearch.md b/3.10/aql/functions-arangosearch.md index 525597c5e1..4e2079e228 100644 --- a/3.10/aql/functions-arangosearch.md +++ b/3.10/aql/functions-arangosearch.md @@ -73,7 +73,7 @@ Assuming a View definition with an Analyzer whose name and type is `delimiter`: `{ "text": "foo|bar|baz" }` in the collection `coll`, the following query would return the document: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(doc.text == "bar", "delimiter") RETURN doc @@ -86,7 +86,7 @@ but the View does not even process the indexed fields with the `identity` Analyzer. The following query would also return an empty result because of the Analyzer mismatch: -```js +```aql FOR doc IN viewName SEARCH doc.text == "foo|bar|baz" //SEARCH ANALYZER(doc.text == "foo|bar|baz", "identity") @@ -98,7 +98,7 @@ FOR doc IN viewName In below query, the search expression is swapped by `ANALYZER()` to set the `text_en` Analyzer for both `PHRASE()` functions: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(PHRASE(doc.text, "foo") OR PHRASE(doc.text, "bar"), "text_en") RETURN doc @@ -106,7 +106,7 @@ FOR doc IN viewName Without the usage of `ANALYZER()`: -```js +```aql FOR doc IN viewName SEARCH PHRASE(doc.text, "foo", "text_en") OR PHRASE(doc.text, "bar", "text_en") RETURN doc @@ -119,7 +119,7 @@ but in the second call to `PHRASE()` a different Analyzer is set (`identity`) which overrules `ANALYZER()`. Therefore, the `text_en` Analyzer is used to find the phrase *foo* and the `identity` Analyzer to find *bar*: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(PHRASE(doc.text, "foo") OR PHRASE(doc.text, "bar", "identity"), "text_en") RETURN doc @@ -132,7 +132,7 @@ for the `TOKENS()` function itself. This is because the `TOKENS()` function is a regular string function that does not take the Analyzer context into account: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(doc.text IN TOKENS("foo", "text_en"), "text_en") RETURN doc @@ -152,7 +152,7 @@ value equal to `1.0`. #### Example: Boosting a search sub-expression -```js +```aql FOR doc IN viewName SEARCH ANALYZER(BOOST(doc.text == "foo", 2.5) OR doc.text == "bar", "text_en") LET score = BM25(doc) @@ -216,7 +216,7 @@ Match documents where the attribute at **path** is present. returned. The function can only be called in a search expression. It throws an error if used outside of a [SEARCH operation](operations-search.html). -```js +```aql FOR doc IN viewName SEARCH EXISTS(doc.text) RETURN doc @@ -240,7 +240,7 @@ specified data type. returned. The function can only be called in a search expression. It throws an error if used outside of a [SEARCH operation](operations-search.html). -```js +```aql FOR doc IN viewName SEARCH EXISTS(doc.text, "string") RETURN doc @@ -262,7 +262,7 @@ by the specified **analyzer**. returned. The function can only be called in a search expression. It throws an error if used outside of a [SEARCH operation](operations-search.html). -```js +```aql FOR doc IN viewName SEARCH EXISTS(doc.text, "analyzer", "text_en") RETURN doc @@ -316,7 +316,7 @@ match either. To match documents with the attribute `value >= 3` and `value <= 5` using the default `"identity"` Analyzer you would write the following query: -```js +```aql FOR doc IN viewName SEARCH IN_RANGE(doc.value, 3, 5, true, true) RETURN doc.value @@ -330,7 +330,7 @@ attribute where at least one of the numbers is in the specified boundaries. Using string boundaries and a text Analyzer allows to match documents which have at least one token within the specified character range: -```js +```aql FOR doc IN valView SEARCH ANALYZER(IN_RANGE(doc.value, "a","f", true, false), "text_en") RETURN doc @@ -361,7 +361,7 @@ that is used outside of `SEARCH` operations. Assuming a View with a text Analyzer, you may use it to match documents where the attribute contains at least two out of three tokens: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(MIN_MATCH(doc.text == 'quick', doc.text == 'brown', doc.text == 'fox', 2), "text_en") RETURN doc.text @@ -418,7 +418,7 @@ Given a View indexing an attribute `text`, a custom _n_-gram Analyzer `"bigram"` `{ "text": "quick red fox" }`, the following query would match it (with a threshold of `1.0`): -```js +```aql FOR doc IN viewName SEARCH NGRAM_MATCH(doc.text, "quick fox", "bigram") RETURN doc.text @@ -426,7 +426,7 @@ FOR doc IN viewName The following will also match (note the low threshold value): -```js +```aql FOR doc IN viewName SEARCH NGRAM_MATCH(doc.text, "quick blue fox", 0.4, "bigram") RETURN doc.text @@ -434,7 +434,7 @@ FOR doc IN viewName The following will not match (note the high threshold value): -```js +```aql FOR doc IN viewName SEARCH NGRAM_MATCH(doc.text, "quick blue fox", 0.9, "bigram") RETURN doc.text @@ -445,13 +445,13 @@ FOR doc IN viewName `NGRAM_MATCH()` can be called with constant arguments, but for such calls the *analyzer* argument is mandatory (even for calls inside of a `SEARCH` clause): -```js +```aql FOR doc IN viewName SEARCH NGRAM_MATCH("quick fox", "quick blue fox", 0.9, "bigram") RETURN doc.text ``` -```js +```aql RETURN NGRAM_MATCH("quick fox", "quick blue fox", "bigram") ``` @@ -531,7 +531,7 @@ Given a View indexing an attribute *text* with the `"text_en"` Analyzer and a document `{ "text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit" }`, the following query would match it: -```js +```aql FOR doc IN viewName SEARCH PHRASE(doc.text, "lorem ipsum", "text_en") RETURN doc.text @@ -540,7 +540,7 @@ FOR doc IN viewName However, this search expression does not because the tokens `"ipsum"` and `"lorem"` do not appear in this order: -```js +```aql PHRASE(doc.text, "ipsum lorem", "text_en") ``` @@ -549,7 +549,7 @@ PHRASE(doc.text, "ipsum lorem", "text_en") To match `"ipsum"` and `"amet"` with any two tokens in between, you can use the following search expression: -```js +```aql PHRASE(doc.text, "ipsum", 2, "amet", "text_en") ``` @@ -558,7 +558,7 @@ between *ipsum* and *amet*. A *skipTokens* value of `0` means that the tokens must be adjacent. Negative values are allowed, but not very useful. These three search expressions are equivalent: -```js +```aql PHRASE(doc.text, "lorem ipsum", "text_en") PHRASE(doc.text, "lorem", 0, "ipsum", "text_en") PHRASE(doc.text, "ipsum", -1, "lorem", "text_en") @@ -569,41 +569,41 @@ PHRASE(doc.text, "ipsum", -1, "lorem", "text_en") The `PHRASE()` function also accepts an array as second argument with *phrasePart* and *skipTokens* parameters as elements. -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, ["quick brown fox"], "text_en") RETURN doc FOR doc IN myView SEARCH PHRASE(doc.title, ["quick", "brown", "fox"], "text_en") RETURN doc ``` This syntax variation enables the usage of computed expressions: -```js +```aql LET proximityCondition = [ "foo", ROUND(RAND()*10), "bar" ] FOR doc IN viewName SEARCH PHRASE(doc.text, proximityCondition, "text_en") RETURN doc ``` -```js +```aql LET tokens = TOKENS("quick brown fox", "text_en") // ["quick", "brown", "fox"] FOR doc IN myView SEARCH PHRASE(doc.title, tokens, "text_en") RETURN doc ``` Above example is equivalent to the more cumbersome and static form: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, "quick", 0, "brown", 0, "fox", "text_en") RETURN doc ``` You can optionally specify the number of skipTokens in the array form before every string element: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, ["quick", 1, "fox", "jumps"], "text_en") RETURN doc ``` It is the same as the following: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, "quick", 1, "fox", 0, "jumps", "text_en") RETURN doc ``` @@ -611,13 +611,13 @@ FOR doc IN myView SEARCH PHRASE(doc.title, "quick", 1, "fox", 0, "jumps", "text_ Empty arrays are skipped: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, "quick", 1, [], 1, "jumps", "text_en") RETURN doc ``` The query is equivalent to: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, "quick", 2 "jumps", "text_en") RETURN doc ``` @@ -628,7 +628,7 @@ Providing only empty arrays is valid, but will yield no results. Using object tokens `STARTS_WITH`, `WILDCARD`, `LEVENSHTEIN_MATCH`, `TERMS` and `IN_RANGE`: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, {STARTS_WITH: ["qui"]}, 0, {WILDCARD: ["b%o_n"]}, 0, @@ -645,7 +645,7 @@ be stemmed away is removed from both words manually in the example. Above example is equivalent to: -```js +```aql FOR doc IN myView SEARCH PHRASE(doc.title, [ {STARTS_WITH: "qui"}, 0, @@ -705,7 +705,7 @@ optionally with at least *minMatchCount* of the prefixes. To match a document `{ "text": "lorem ipsum..." }` using a prefix and the `"identity"` Analyzer you can use it like this: -```js +```aql FOR doc IN viewName SEARCH STARTS_WITH(doc.text, "lorem ip") RETURN doc @@ -717,7 +717,7 @@ This query will match `{ "text": "lorem ipsum" }` as well as `{ "text": [ "lorem", "ipsum" ] }` given a View which indexes the `text` attribute and processes it with the `"text_en"` Analyzer: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(STARTS_WITH(doc.text, "ips"), "text_en") RETURN doc.text @@ -728,7 +728,7 @@ modification to the query. The prefixes were passed to `STARTS_WITH()` as-is, but the built-in `text_en` Analyzer used for indexing has stemming enabled. So the indexed values are the following: -```js +```aql RETURN TOKENS("IPS (in-plane switching)", "text_en") ``` @@ -747,7 +747,7 @@ The *s* is removed from *ips*, which leads to the prefix *ips* not matching the indexed token *ip*. You may either create a custom text Analyzer with stemming disabled to avoid this issue, or apply stemming to the prefixes: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(STARTS_WITH(doc.text, TOKENS("ips", "text_en")), "text_en") RETURN doc.text @@ -758,7 +758,7 @@ FOR doc IN viewName The `STARTS_WITH()` function accepts an array of prefix alternatives of which only one has to match: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(STARTS_WITH(doc.text, ["something", "ips"]), "text_en") RETURN doc.text @@ -770,7 +770,7 @@ given prefix. The same query again, but with an explicit `minMatchCount`: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(STARTS_WITH(doc.text, ["wrong", "ips"], 1), "text_en") RETURN doc.text @@ -779,7 +779,7 @@ FOR doc IN viewName The number can be increased to require that at least this many prefixes must be present: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(STARTS_WITH(doc.text, ["lo", "ips", "something"], 2), "text_en") RETURN doc.text @@ -830,7 +830,7 @@ The Levenshtein distance between _quick_ and _quikc_ is `2` because it requires two operations to go from one to the other (remove _k_, insert _k_ at a different position). -```js +```aql FOR doc IN viewName SEARCH LEVENSHTEIN_MATCH(doc.text, "quikc", 2, false) // matches "quick" RETURN doc.text @@ -838,7 +838,7 @@ FOR doc IN viewName The Damerau-Levenshtein distance is `1` (move _k_ to the end). -```js +```aql FOR doc IN viewName SEARCH LEVENSHTEIN_MATCH(doc.text, "quikc", 1) // matches "quick" RETURN doc.text @@ -851,7 +851,7 @@ distance is calculated using the search term `kc` (`quikc` with the prefix `qui` removed) and the stored value without the prefix (e.g. `ck`). The prefix `qui` is constant. -```js +```aql FOR doc IN viewName SEARCH LEVENSHTEIN_MATCH(doc.text, "kc", 1, false, 64, "qui") // matches "quick" RETURN doc.text @@ -859,7 +859,7 @@ FOR doc IN viewName You may compute the prefix and suffix from the input string as follows: -```js +```aql LET input = "quikc" LET prefixSize = 3 LET prefix = LEFT(input, prefixSize) @@ -877,7 +877,7 @@ _quicksands_, then the Levenshtein distance is 5, with 50% of the characters mismatching. If the inputs are _q_ and _qu_, then the distance is only 1, although it is also a 50% mismatch. -```js +```aql LET target = "input" LET targetLength = LENGTH(target) LET maxDistance = (targetLength > 5 ? 2 : (targetLength >= 3 ? 1 : 0)) @@ -930,7 +930,7 @@ case-insensitive matching. This can be controlled with Analyzers instead. #### Example: Searching with wildcards -```js +```aql FOR doc IN viewName SEARCH ANALYZER(LIKE(doc.text, "foo%b_r"), "text_en") RETURN doc.text @@ -938,7 +938,7 @@ FOR doc IN viewName `LIKE` can also be used in operator form: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(doc.text LIKE "foo%b_r", "text_en") RETURN doc.text @@ -1039,7 +1039,7 @@ first, sort in **descending order** by the score (e.g. `SORT BM25(...) DESC`). You may calculate custom scores based on a scoring function using document attributes and numeric functions (e.g. `TFIDF(doc) * LOG(doc.value)`): -```js +```aql FOR movie IN imdbView SEARCH PHRASE(movie.title, "Star Wars", "text_en") SORT BM25(movie) * LOG(movie.runtime + 1) DESC @@ -1049,7 +1049,7 @@ FOR movie IN imdbView Sorting by more than one score is allowed. You may also sort by a mix of scores and attributes from multiple Views as well as collections: -```js +```aql FOR a IN viewA FOR c IN coll FOR b IN viewB @@ -1089,7 +1089,7 @@ the `"norm"` feature as it has no length normalization. Sorting by relevance with BM25 at default settings: -```js +```aql FOR doc IN viewName SEARCH ... SORT BM25(doc) DESC @@ -1101,7 +1101,7 @@ FOR doc IN viewName Sorting by relevance, with double-weighted term frequency and with full text length normalization: -```js +```aql FOR doc IN viewName SEARCH ... SORT BM25(doc, 2.4, 1) DESC @@ -1132,7 +1132,7 @@ The Analyzers need to have the `"norm"` feature enabled, too, if you want to use Sort by relevance using the TF-IDF score: -```js +```aql FOR doc IN viewName SEARCH ... SORT TFIDF(doc) DESC @@ -1143,7 +1143,7 @@ FOR doc IN viewName Sort by relevance using a normalized TF-IDF score: -```js +```aql FOR doc IN viewName SEARCH ... SORT TFIDF(doc, true) DESC @@ -1155,7 +1155,7 @@ FOR doc IN viewName Sort by the value of the `text` attribute in ascending order, then by the TFIDF score in descending order where the attribute values are equivalent: -```js +```aql FOR doc IN viewName SEARCH ... SORT doc.text, TFIDF(doc) DESC diff --git a/3.10/aql/functions-array.md b/3.10/aql/functions-array.md index 15026dbb36..878bbef074 100644 --- a/3.10/aql/functions-array.md +++ b/3.10/aql/functions-array.md @@ -261,7 +261,7 @@ of two arrays. This similarity measure is also known as _Intersection over Union_ and could be computed (less efficient and more verbose) as follows: -```js +```aql COUNT(a) == 0 && COUNT(b) == 0 ? 1 // two empty sets have a similarity of 1 by definition : COUNT(INTERSECTION(array1, array2)) / COUNT(UNION_DISTINCT(array1, array2)) diff --git a/3.10/aql/functions-bit.md b/3.10/aql/functions-bit.md index 62c501c4e3..b3f0ad0e36 100644 --- a/3.10/aql/functions-bit.md +++ b/3.10/aql/functions-bit.md @@ -50,7 +50,7 @@ range 0 to 232 - 1 are allowed as input values. - **value2** (number): second operand - returns **result** (number\|null): and-combined result -```js +```aql BIT_AND([1, 4, 8, 16]) // 0 BIT_AND([3, 7, 63]) // 3 BIT_AND([255, 127, null, 63]) // 63 @@ -73,7 +73,7 @@ the array must be numbers, which must not be negative. The maximum supported input number value is 31. Input number values outside the allowed range will make the function return `null` and produce a warning. -```js +```aql BIT_CONSTRUCT([1, 2, 3]) // 14 BIT_CONSTRUCT([0, 4, 8]) // 273 BIT_CONSTRUCT([0, 1, 10, 31]) // 2147484675 @@ -94,7 +94,7 @@ set bits. The positions in the output array are zero-based. The input value must be a number between 0 and 232 - 1 (including). The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_DECONSTRUCT(14) // [1, 2, 3] BIT_DECONSTRUCT(273) // [0, 4, 8] BIT_DECONSTRUCT(2147484675) // [0, 1, 10, 31] @@ -118,7 +118,7 @@ Note that the bitstring must not start with `0b`. If the bitstring has an invalid format, this function returns `null` and produces a warning. -```js +```aql BIT_FROM_STRING("0111") // 7 BIT_FROM_STRING("000000000000010") // 2 BIT_FROM_STRING("11010111011101") // 13789 @@ -142,7 +142,7 @@ The input value must be a number between 0 and 232 - 1 (including). The number of bits must be between 0 and 32. The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_NEGATE(0, 8) // 255 BIT_NEGATE(0, 10) // 1023 BIT_NEGATE(3, 4) // 12 @@ -178,7 +178,7 @@ will return the bitwise or value of its two operands. Only numbers in the range - **value2** (number): second operand - returns **result** (number\|null): or-combined result -```js +```aql BIT_OR([1, 4, 8, 16]) // 29 BIT_OR([3, 7, 63]) // 63 BIT_OR([255, 127, null, 63]) // 255 @@ -199,7 +199,7 @@ Counts the number of bits set in the input value. The input value must be a number between 0 and 232 - 1 (including). The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_POPCOUNT(0) // 0 BIT_POPCOUNT(255) // 8 BIT_POPCOUNT(69399252) // 12 @@ -224,7 +224,7 @@ The input value must be a number between 0 and 232 - 1 (including). The number of bits must be between 0 and 32. The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_SHIFT_LEFT(0, 1, 8) // 0 BIT_SHIFT_LEFT(7, 1, 16) // 14 BIT_SHIFT_LEFT(2, 10, 16) // 2048 @@ -249,7 +249,7 @@ The input value must be a number between 0 and 232 - 1 (including). The number of bits must be between 0 and 32. The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_SHIFT_RIGHT(0, 1, 8) // 0 BIT_SHIFT_RIGHT(33, 1, 16) // 16 BIT_SHIFT_RIGHT(65536, 13, 16) // 8 @@ -271,7 +271,7 @@ The input value must be a number between 0 and 232 - 1 (including). The **index** must be between 0 and 31. The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_TEST(0, 3) // false BIT_TEST(255, 0) // true BIT_TEST(7, 2) // true @@ -293,7 +293,7 @@ To convert a bitstring into a number, see [BIT_FROM_STRING()](#bit_from_string). The input value must be a number between 0 and 232 - 1 (including). The function will return `null` for any other inputs and produce a warning. -```js +```aql BIT_TO_STRING(7, 4) // "0111" BIT_TO_STRING(255, 8) // "11111111" BIT_TO_STRING(60, 8) // "00011110" @@ -329,7 +329,7 @@ the range 0 to 232 - 1 are allowed as input values. - **value2** (number): second operand - returns **result** (number\|null): xor-combined result -```js +```aql BIT_XOR([1, 4, 8, 16]) // 29 BIT_XOR([3, 7, 63]) // 59 BIT_XOR([255, 127, null, 63]) // 191 diff --git a/3.10/aql/functions-date.md b/3.10/aql/functions-date.md index 9de96a180a..6e39432747 100644 --- a/3.10/aql/functions-date.md +++ b/3.10/aql/functions-date.md @@ -61,7 +61,7 @@ function will make the function return `null` and trigger a warning for the quer which can optionally be escalated to an error and abort the query. This also applies to operations which produce an invalid value. -```js +```aql DATE_HOUR( 2 * 60 * 60 * 1000 ) // 2 DATE_HOUR("1970-01-01T02:00:00") // 2 ``` @@ -113,7 +113,7 @@ Zulu time will be used. The following calls to *DATE_TIMESTAMP()* are equivalent and will all return *1399472349522*: -```js +```aql DATE_TIMESTAMP("2014-05-07T14:19:09.522") DATE_TIMESTAMP("2014-05-07T14:19:09.522Z") DATE_TIMESTAMP("2014-05-07 14:19:09.522") @@ -125,7 +125,7 @@ DATE_TIMESTAMP(1399472349522) The same is true for calls to *DATE_ISO8601()* that also accepts variable input formats: -```js +```aql DATE_ISO8601("2014-05-07T14:19:09.522Z") DATE_ISO8601("2014-05-07 14:19:09.522Z") DATE_ISO8601(2014, 5, 7, 14, 19, 9, 522) @@ -190,7 +190,7 @@ Negative values are not allowed, result in *null* and cause a warning. Values greater than the upper range bound overflow to the larger components (e.g. an hour of 26 is automatically turned into an additional day and two hours): -```js +```aql DATE_TIMESTAMP(2016, 12, -1) // returns null and issues a warning DATE_TIMESTAMP(2016, 2, 32) // returns 1456963200000, which is March 3rd, 2016 DATE_TIMESTAMP(1970, 1, 1, 26) // returns 93600000, which is January 2nd, 1970, at 2 a.m. @@ -575,7 +575,7 @@ Truncates the given date after *unit* and returns the modified date. - f, millisecond, milliseconds - returns **isoDate** (string): the truncated ISO 8601 date time string -```js +```aql DATE_TRUNC('2017-02-03', 'month') // 2017-02-01T00:00:00.000Z DATE_TRUNC('2017-02-03 04:05:06', 'hours') // 2017-02-03 04:00:00.000Z ``` @@ -621,7 +621,7 @@ grouping. - f, millisecond, milliseconds - returns **isoDate** (string): the rounded ISO 8601 date time string -```js +```aql DATE_ROUND('2000-04-28T11:11:11.111Z', 1, 'day') // 2000-04-28T00:00:00.000Z DATE_ROUND('2000-04-10T11:39:29Z', 15, 'minutes') // 2000-04-10T11:30:00.000Z ``` @@ -717,7 +717,7 @@ together with `CONCAT()` if possible. Examples: -```js +```aql DATE_FORMAT(DATE_NOW(), "%q/%yyyy") // quarter and year (e.g. "3/2015") DATE_FORMAT(DATE_NOW(), "%dd.%mm.%yyyy %hh:%ii:%ss,%fff") // e.g. "18.09.2015 15:30:49,374" DATE_FORMAT("1969", "Summer of '%yy") // "Summer of '69" @@ -750,7 +750,7 @@ Add *amount* given in *unit* to *date* and return the calculated date. - f, millisecond, milliseconds - returns **isoDate** (string): the calculated ISO 8601 date time string -```js +```aql DATE_ADD(DATE_NOW(), -1, "day") // yesterday; also see DATE_SUBTRACT() DATE_ADD(DATE_NOW(), 3, "months") // in three months DATE_ADD(DATE_ADD("2015-04-01", 5, "years"), 1, "month") // May 1st 2020 @@ -784,7 +784,7 @@ The string must be prefixed by a `P`. A separating `T` is only required if `H`, `M` and/or `S` are specified. You only need to specify the needed pairs of letters and numbers. -```js +```aql DATE_ADD(DATE_NOW(), "P1Y") // add 1 year DATE_ADD(DATE_NOW(), "P3M2W") // add 3 months and 2 weeks DATE_ADD(DATE_NOW(), "P5DT26H") // add 5 days and 26 hours (=6 days and 2 hours) @@ -846,7 +846,7 @@ The string must be prefixed by a `P`. A separating `T` is only required if `H`, `M` and/or `S` are specified. You only need to specify the needed pairs of letters and numbers. -```js +```aql DATE_SUBTRACT(DATE_NOW(), 1, "day") // yesterday DATE_SUBTRACT(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), 4, "years") // Christmas four years ago DATE_SUBTRACT(DATE_ADD("2016-02", "month", 1), 1, "day") // last day of February (29th, because 2016 is a leap year!) @@ -906,7 +906,7 @@ You can refer to the units as: - s, second, seconds - f, millisecond, milliseconds -```js +```aql // Compare months and days, true on birthdays if you're born on 4th of April DATE_COMPARE("1985-04-04", DATE_NOW(), "months", "days") @@ -926,7 +926,7 @@ compare partial date strings, `DATE_COMPARE()` is basically a convenience function for that. However, neither is really required to limit a search to a certain day as demonstrated here: -```js +```aql FOR doc IN coll FILTER doc.date >= "2015-05-15" AND doc.date < "2015-05-16" RETURN doc @@ -946,7 +946,7 @@ and can only occur if inserted manually (you may want to pass dates through Leap days in leap years (29th of February) must be always handled manually, if you require so (e.g. birthday checks): -```js +```aql LET today = DATE_NOW() LET noLeapYear = NOT DATE_LEAPYEAR(today) diff --git a/3.10/aql/functions-document.md b/3.10/aql/functions-document.md index 63b3e51deb..837a1e7a43 100644 --- a/3.10/aql/functions-document.md +++ b/3.10/aql/functions-document.md @@ -64,7 +64,7 @@ Return the attribute keys of an object in alphabetic order: Complex example to count how often every top-level attribute key occurs in the documents of a collection (expensive on large collections): -```js +```aql LET attributesPerDocument = ( FOR doc IN collection RETURN ATTRIBUTES(doc, true) ) @@ -98,7 +98,7 @@ Other ways of testing for the existence of an attribute may behave differently if the attribute has a falsy value or is not present (implicitly `null` on object access): -```js +```aql !!{ name: "" }.name // false HAS( { name: "" }, "name") // true @@ -113,7 +113,7 @@ between explicit and implicit *null* values in your query, you may use an equali comparison to test for *null* and create a non-sparse index on the attribute you want to test against: -```js +```aql FILTER !HAS(doc, "name") // can not use indexes FILTER IS_NULL(doc, "name") // can not use indexes FILTER doc.name == null // can utilize non-sparse indexes @@ -453,7 +453,7 @@ skips attributes with a value of `undefined`, turning `{attr: undefined}` into ` `MATCHES()` can not utilize indexes. You may use plain `FILTER` conditions instead to potentially benefit from existing indexes: -```js +```aql FOR doc IN coll FILTER (cond1 AND cond2 AND cond3) OR (cond4 AND cond5) ... ``` diff --git a/3.10/aql/functions-fulltext.md b/3.10/aql/functions-fulltext.md index fb701c8aab..0139ee6703 100644 --- a/3.10/aql/functions-fulltext.md +++ b/3.10/aql/functions-fulltext.md @@ -30,7 +30,7 @@ will fail with an error at runtime. It doesn't fail when explaining the query ho *FULLTEXT()* is not meant to be used as an argument to `FILTER`, but rather to be used as the expression of a `FOR` statement: -```js +```aql FOR oneMail IN FULLTEXT(emails, "body", "banana,-apple") RETURN oneMail._id ``` diff --git a/3.10/aql/functions-geo.md b/3.10/aql/functions-geo.md index a3a244c63b..f7cd9a2b2b 100644 --- a/3.10/aql/functions-geo.md +++ b/3.10/aql/functions-geo.md @@ -28,7 +28,7 @@ which is sufficient for most use cases such as location-aware services. - **longitude2** (number): the longitude portion of the second coordinate - returns **distance** (number): the distance between both coordinates in **meters** -```js +```aql // Distance from Brandenburg Gate (Berlin) to ArangoDB headquarters (Cologne) DISTANCE(52.5163, 13.3777, 50.9322, 6.94) // 476918.89688380965 (~477km) @@ -68,7 +68,7 @@ boundary edges! You can optimize queries that contain a `FILTER` expression of the following form with an S2-based [geospatial index](../indexing-geo.html): -```js +```aql FOR doc IN coll FILTER GEO_CONTAINS(geoJson, doc.geo) ... @@ -99,7 +99,7 @@ of each shape. For a list of supported types see the - returns **distance** (number): the distance between the centroid points of the two objects on the reference ellipsoid -```js +```aql LET polygon = { type: "Polygon", coordinates: [[[-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]]] @@ -112,7 +112,7 @@ FOR doc IN collectionName You can optimize queries that contain a `FILTER` expression of the following form with an S2-based [geospatial index](../indexing-geo.html): -```js +```aql FOR doc IN coll FILTER GEO_DISTANCE(geoJson, doc.geo) <= limit ... @@ -127,7 +127,7 @@ a lower bound with `>` or `>=`, or both, are equally supported. You can also optimize queries that use a `SORT` condition of the following form with a geospatial index: -```js +```aql SORT GEO_DISTANCE(geoJson, doc.geo) ``` @@ -152,7 +152,7 @@ see the [geo index page](../indexing-geo.html#geojson). Supported are `"sphere"` (default) and `"wgs84"`. - returns **area** (number): the area in square meters of the polygon -```js +```aql LET polygon = { type: "Polygon", coordinates: [[[-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]]] @@ -173,7 +173,7 @@ types see the [geo index page](../indexing-geo.html#geojson). - **geoJsonB** (object): second GeoJSON object. - returns **bool** (bool): true for equality. -```js +```aql LET polygonA = GEO_POLYGON([ [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5] ]) @@ -183,7 +183,7 @@ LET polygonB = GEO_POLYGON([ RETURN GEO_EQUALS(polygonA, polygonB) // true ``` -```js +```aql LET polygonA = GEO_POLYGON([ [-11.1, 24.0], [-10.5, 26.1], [-11.2, 27.1], [-11.1, 24.0] ]) @@ -209,7 +209,7 @@ intersects with `geoJsonB` (i.e. at least one point in B is also A or vice-versa You can optimize queries that contain a `FILTER` expression of the following form with an S2-based [geospatial index](../indexing-geo.html): -```js +```aql FOR doc IN coll FILTER GEO_INTERSECTS(geoJson, doc.geo) ... @@ -268,7 +268,7 @@ favor of the new `GEO_CONTAINS` AQL function, which works with *true* or *false*) if the specified point is exactly on a boundary of the polygon. -```js +```aql // will check if the point (lat 4, lon 7) is contained inside the polygon IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], 4, 7 ) ``` @@ -295,7 +295,7 @@ the same way. or *false* if it's not. The result is undefined (can be *true* or *false*) if the specified point is exactly on a boundary of the polygon. -```js +```aql // will check if the point (lat 4, lon 7) is contained inside the polygon IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], [ 4, 7 ] ) @@ -493,7 +493,7 @@ the query however. `NEAR` is a deprecated AQL function from version 3.4.0 on. Use [DISTANCE()](#distance) in a query like this instead: -```js +```aql FOR doc IN doc SORT DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude) ASC RETURN doc @@ -531,7 +531,7 @@ contain the distance value in an attribute of that name. `WITHIN` is a deprecated AQL function from version 3.4.0 on. Use [DISTANCE()](#distance) in a query like this instead: -```js +```aql FOR doc IN doc LET d = DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude) FILTER d <= radius @@ -570,7 +570,7 @@ value in an attribute of that name. `WITHIN_RECTANGLE` is a deprecated AQL function from version 3.4.0 on. Use [GEO_CONTAINS](#geo_contains) and a GeoJSON polygon instead: -```js +```aql LET rect = {type: "Polygon", coordinates: [[[longitude1, latitude1], ...]]]} FOR doc IN doc FILTER GEO_CONTAINS(poly, [doc.longitude, doc.latitude]) diff --git a/3.10/aql/functions-miscellaneous.md b/3.10/aql/functions-miscellaneous.md index 8c1444b19f..778a32d8ef 100644 --- a/3.10/aql/functions-miscellaneous.md +++ b/3.10/aql/functions-miscellaneous.md @@ -47,7 +47,7 @@ that can utilize View indexes. You can use `MIN_MATCH()` to filter if two out of three conditions evaluate to `true` for instance: -```js +```aql LET members = [ { name: "Carol", age: 41, active: true }, { name: "Doug", age: 56, active: true }, @@ -59,7 +59,7 @@ FOR doc IN members An equivalent filter expression without `MIN_MATCH()` would be more cumbersome: -```js +```aql FILTER (LENGTH(doc.name) == 5 AND doc.age >= 50) OR (doc.age >= 50 AND doc.active) OR (doc.active AND LENGTH(doc.name) == 5) @@ -102,7 +102,7 @@ is not supposed to be useful for anything else. The primary use case for this function is to apply it on all documents in a given collection as follows: -```js +```aql FOR doc IN collection FILTER !CHECK_DOCUMENT(doc) RETURN JSON_STRINGIFY(doc) @@ -194,7 +194,7 @@ in case of concurrent document operations the exact document storage order cannot be derived unambiguously from the revision value. It should thus be treated as a rough estimate of when a document was created or last updated. -```js +```aql DECODE_REV( "_YU0HOEG---" ) // { "date" : "2019-03-11T16:15:05.314Z", "count" : 0 } ``` @@ -473,7 +473,7 @@ Both built-in and user-defined functions can be called. - **arguments** (array, *optional*): an array with elements of arbitrary type - returns **retVal** (any): the return value of the called function -```js +```aql APPLY( "SUBSTRING", [ "this is a test", 0, 7 ] ) // "this is" ``` @@ -493,7 +493,7 @@ Both built-in and user-defined functions can be called. multiple arguments, can be omitted - returns **retVal** (any): the return value of the called function -```js +```aql CALL( "SUBSTRING", "this is a test", 0, 4 ) // "this" ``` @@ -516,7 +516,7 @@ conditions. - **message** (string): message that will be used in exception or warning if expression evaluates to false - returns **retVal** (bool): returns true if expression evaluates to true -```js +```aql FOR i IN 1..3 FILTER ASSERT(i > 0, "i is not greater 0") RETURN i FOR i IN 1..3 FILTER WARN(i < 2, "i is not smaller 2") RETURN i ``` @@ -632,7 +632,7 @@ if lazy evaluation / short circuiting is used for instance. - **reason** (string): an error message - returns nothing, because the query is aborted -```js +```aql RETURN 1 == 1 ? "okay" : FAIL("error") // "okay" RETURN 1 == 1 || FAIL("error") ? true : false // true RETURN 1 == 2 && FAIL("error") ? true : false // false @@ -658,7 +658,7 @@ internal testing. - **value** (any): a value of arbitrary type - returns **retVal** (any): *value* -```js +```aql // differences in execution plan (explain) FOR i IN 1..3 RETURN (1 + 1) // const assignment FOR i IN 1..3 RETURN NOOPT(1 + 1) // simple expression @@ -688,7 +688,7 @@ specified collection. - **collection** (string): name of a collection - returns **schema** (object): schema definition object -```js +```aql RETURN SCHEMA_GET("myColl") ``` @@ -721,7 +721,7 @@ Wait for a certain amount of time before continuing the query. - **seconds** (number): amount of time to wait - returns a *null* value -```js +```aql SLEEP(1) // wait 1 second SLEEP(0.02) // wait 20 milliseconds ``` @@ -737,7 +737,7 @@ testing. - **expression** (any): arbitrary expression - returns **retVal** (any): the return value of the *expression* -```js +```aql // differences in execution plan (explain) FOR i IN 1..3 RETURN (1 + 1) // const assignment FOR i IN 1..3 RETURN V8(1 + 1) // simple expression @@ -752,6 +752,6 @@ of the Coordinator. - returns **serverVersion** (string): the server version string -```js +```aql RETURN VERSION() // e.g. "3.4.0" ``` diff --git a/3.10/aql/functions-numeric.md b/3.10/aql/functions-numeric.md index b8ec29715b..19c2765016 100644 --- a/3.10/aql/functions-numeric.md +++ b/3.10/aql/functions-numeric.md @@ -18,7 +18,7 @@ Return the absolute part of *value*. - **value** (number): any number, positive or negative - returns **unsignedValue** (number): the number without + or - sign -```js +```aql ABS(-5) // 5 ABS(+5) // 5 ABS(3.5) // 3.5 @@ -35,7 +35,7 @@ Return the arccosine of *value*. - returns **num** (number\|null): the arccosine of *value*, or *null* if *value* is outside the valid range -1 and 1 (inclusive) -```js +```aql ACOS(-1) // 3.141592653589793 ACOS(0) // 1.5707963267948966 ACOS(1) // 0 @@ -53,7 +53,7 @@ Return the arcsine of *value*. - returns **num** (number\|null): the arcsine of *value*, or *null* if *value* is outside the valid range -1 and 1 (inclusive) -```js +```aql ASIN(1) // 1.5707963267948966 ASIN(0) // 0 ASIN(-1) // -1.5707963267948966 @@ -70,7 +70,7 @@ Return the arctangent of *value*. - **value** (number): the input value - returns **num** (number): the arctangent of *value* -```js +```aql ATAN(-1) // -0.7853981633974483 ATAN(0) // 0 ATAN(10) // 1.4711276743037347 @@ -83,7 +83,7 @@ ATAN2() Return the arctangent of the quotient of *y* and *x*. -```js +```aql ATAN2(0, 0) // 0 ATAN2(1, 0) // 1.5707963267948966 ATAN2(1, 1) // 0.7853981633974483 @@ -101,7 +101,7 @@ Return the average (arithmetic mean) of the values in *array*. - returns **mean** (number\|null): the average value of *numArray*. If the array is empty or contains *null* values only, *null* will be returned. -```js +```aql AVERAGE( [5, 2, 9, 2] ) // 4.5 AVERAGE( [ -3, -5, 2 ] ) // -2 AVERAGE( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 137.5 @@ -124,7 +124,7 @@ To round to the nearest integer value, see [ROUND()](#round). - **value** (number): any number - returns **roundedValue** (number): the value rounded to the ceiling -```js +```aql CEIL(2.49) // 3 CEIL(2.50) // 3 CEIL(-2.50) // -2 @@ -141,7 +141,7 @@ Return the cosine of *value*. - **value** (number): the input value - returns **num** (number): the cosine of *value* -```js +```aql COS(1) // 0.5403023058681398 COS(0) // 1 COS(-3.141592653589783) // -1 @@ -170,7 +170,7 @@ To calculate the distance, see [L1_DISTANCE()](#l1_distance) and In case of invalid input values the function returns **null** and produces a warning. -```js +```aql COSINE_SIMILARITY([0,1], [1,0]) // 0 COSINE_SIMILARITY([[0,1,0,1],[1,0,0,1],[1,1,1,0],[0,0,0,1]], [1,1,1,1]) // [0.707, 0.707, 0.866, 0.5] COSINE_SIMILARITY([-1,0], [1,0]) // -1 @@ -197,7 +197,7 @@ decays depending on the distance of a numeric value from a user-given origin. - returns **score** (number\|array): a single score or an array of scores depending on the type of the input `value` -```js +```aql DECAY_GAUSS(41, 40, 5, 5, 0.5) // 1 DECAY_GAUSS([20, 41], 40, 5, 5, 0.5) // [0.0019531250000000017, 1.0] DECAY_GAUSS(49.9889, 49.987, 0.001, 0.001, 0.2) // 0.2715403018822964 @@ -224,7 +224,7 @@ that decays depending on the distance of a numeric value from a user-given origi - returns **score** (number\|array): a single score or an array of scores depending on the type of the input `value` -```js +```aql DECAY_EXP(41, 40, 5, 5, 0.7) // 1 DECAY_EXP(2, 0, 10, 0, 0.2) // 0.7247796636776955 DECAY_EXP(49.9889, 50, 0.001, 0.001, 0.2) // 8.717720806626885e-08 @@ -251,7 +251,7 @@ decays depending on the distance of a numeric value from a user-given origin. - returns **score** (number\|array): a single score or an array of scores depending on the type of the input `value` -```js +```aql DECAY_LINEAR(41, 40, 5, 5, 0.5) // 1 DECAY_LINEAR(9.8, 0, 10, 0, 0.2) // 0.21599999999999994 DECAY_LINEAR(5..7, 0, 10, 0, 0.2) // [0.6, 0.52, 0.44] @@ -267,7 +267,7 @@ Return the angle converted from radians to degrees. - **rad** (number): the input value - returns **num** (number): the angle in degrees -```js +```aql DEGREES(0.7853981633974483) // 45 DEGREES(0) // 0 DEGREES(3.141592653589793) // 180 @@ -283,7 +283,7 @@ Return Euler's constant (2.71828...) raised to the power of *value*. - **value** (number): the input value - returns **num** (number): Euler's constant raised to the power of *value* -```js +```aql EXP(1) // 2.718281828459045 EXP(10) // 22026.46579480671 EXP(0) // 1 @@ -299,7 +299,7 @@ Return 2 raised to the power of *value*. - **value** (number): the input value - returns **num** (number): 2 raised to the power of *value* -```js +```aql EXP2(16) // 65536 EXP2(1) // 2 EXP2(0) // 1 @@ -318,7 +318,7 @@ To round to the nearest integer value, see [ROUND()](#round). - **value** (number): any number - returns **roundedValue** (number): the value rounded downward -```js +```aql FLOOR(2.49) // 2 FLOOR(2.50) // 2 FLOOR(-2.50) // -3 @@ -337,7 +337,7 @@ constant (2.71828...). - returns **num** (number\|null): the natural logarithm of *value*, or *null* if *value* is equal or less than 0 -```js +```aql LOG(2.718281828459045) // 1 LOG(10) // 2.302585092994046 LOG(0) // null @@ -354,7 +354,7 @@ Return the base 2 logarithm of *value*. - returns **num** (number\|null): the base 2 logarithm of *value*, or *null* if *value* is equal or less than 0 -```js +```aql LOG2(1024) // 10 LOG2(8) // 3 LOG2(0) // null @@ -371,7 +371,7 @@ Return the base 10 logarithm of *value*. - returns **num** (number): the base 10 logarithm of *value*, or *null* if *value* is equal or less than 0 -```js +```aql LOG10(10000) // 4 LOG10(10) // 1 LOG10(0) // null @@ -398,7 +398,7 @@ To calculate the similarity, see [COSINE_SIMILARITY()](#cosine_similarity). In case of invalid input values the function returns **null** and produces a warning. -```js +```aql L1_DISTANCE([-1,-1], [2,2]) // 6 L1_DISTANCE([[1,2,3],[-1,-2,-3],[3,4,5],[-5,2,1]], [1,1,1]) // [3,9,9,7] L1_DISTANCE([1.5], [3]) // 1.5 @@ -425,7 +425,7 @@ To calculate the similarity, see [COSINE_SIMILARITY()](#cosine_similarity). In case of invalid input values the function returns **null** and produces a warning. -```js +```aql L2_DISTANCE([1,1], [5,2]) // 4.1231056256176606 L2_DISTANCE([[1,2,3], [4,5,6], [7,8,9]], [3,2,1]) // [2.8284271247461903, 5.916079783099616, 10.770329614269007] L2_DISTANCE([0,1], [1,0]) // 1.4142135623730951 @@ -443,7 +443,7 @@ Also see [type and value order](fundamentals-type-value-order.html). - returns **max** (any\|null): the element with the greatest value. If the array is empty or contains *null* values only, the function will return *null*. -```js +```aql MAX( [5, 9, -2, null, 1] ) // 9 MAX( [ null, null ] ) // null ``` @@ -463,7 +463,7 @@ the average value (arithmetic mean). - returns **median** (number\|null): the median of *numArray*. If the array is empty or contains *null* values only, the function will return *null*. -```js +```aql MEDIAN( [ 1, 2, 3] ) // 2 MEDIAN( [ 1, 2, 3, 4 ] ) // 2.5 MEDIAN( [ 4, 2, 3, 1 ] ) // 2.5 @@ -482,7 +482,7 @@ Also see [type and value order](fundamentals-type-value-order.html). - returns **min** (any\|null): the element with the smallest value. If the array is empty or contains *null* values only, the function will return *null*. -```js +```aql MIN( [5, 9, -2, null, 1] ) // -2 MIN( [ null, null ] ) // null ``` @@ -501,7 +501,7 @@ Return the *n*th percentile of the values in *numArray*. array is empty or only *null* values are contained in it or the percentile cannot be calculated -```js +```aql PERCENTILE( [1, 2, 3, 4], 50 ) // 2 PERCENTILE( [1, 2, 3, 4], 50, "rank" ) // 2 PERCENTILE( [1, 2, 3, 4], 50, "interpolation" ) // 2.5 @@ -516,7 +516,7 @@ Return pi. - returns **pi** (number): the first few significant digits of pi (3.141592653589793) -```js +```aql PI() // 3.141592653589793 ``` @@ -531,7 +531,7 @@ Return the *base* to the exponent *exp*. - **exp** (number): the exponent value - returns **num** (number): the exponentiated value -```js +```aql POW( 2, 4 ) // 16 POW( 5, -1 ) // 0.2 POW( 5, 0 ) // 1 @@ -550,7 +550,7 @@ Return the product of the values in *array*. - returns **product** (number): the product of all values in *numArray*. If the array is empty or only *null* values are contained in the array, *1* will be returned. -```js +```aql PRODUCT( [1, 2, 3, 4] ) // 24 PRODUCT( [null, -5, 6] ) // -30 PRODUCT( [ ] ) // 1 @@ -566,7 +566,7 @@ Return the angle converted from degrees to radians. - **deg** (number): the input value - returns **num** (number): the angle in radians -```js +```aql RADIANS(180) // 3.141592653589793 RADIANS(90) // 1.5707963267948966 RADIANS(0) // 0 @@ -581,14 +581,14 @@ Return a pseudo-random number between 0 and 1. - returns **randomNumber** (number): a number greater than 0 and less than 1 -```js +```aql RAND() // 0.3503170117504508 RAND() // 0.6138226173882478 ``` Complex example: -```js +```aql LET coinFlips = ( FOR i IN 1..100000 RETURN RAND() > 0.5 ? "heads" : "tails" @@ -629,7 +629,7 @@ with integer bounds and a step size of 1. the default is *1.0* - returns **numArray** (array): all numbers in the range as array -```js +```aql RANGE(1, 4) // [ 1, 2, 3, 4 ] RANGE(1, 4, 2) // [ 1, 3 ] RANGE(1, 4, 3) // [ 1, 4 ] @@ -649,7 +649,7 @@ Return the integer closest to *value*. - **value** (number): any number - returns **roundedValue** (number): the value rounded to the closest integer -```js +```aql ROUND(2.49) // 2 ROUND(2.50) // 3 ROUND(-2.50) // -2 @@ -661,7 +661,7 @@ a combination of the [ternary operator](operators.html#ternary-operator), [CEIL()](#ceil) and [FLOOR()](#floor): -```js +```aql value >= 0 ? FLOOR(value) : CEIL(value) ``` @@ -675,7 +675,7 @@ Return the sine of *value*. - **value** (number): the input value - returns **num** (number): the sine of *value* -```js +```aql SIN(3.141592653589783 / 2) // 1 SIN(0) // 0 SIN(-3.141592653589783 / 2) // -1 @@ -692,14 +692,14 @@ Return the square root of *value*. - **value** (number): a number - returns **squareRoot** (number): the square root of *value* -```js +```aql SQRT(9) // 3 SQRT(2) // 1.4142135623730951 ``` Other roots can be calculated with [POW()](#pow) like `POW(value, 1/n)`: -```js +```aql // 4th root of 8*8*8*8 = 4096 POW(4096, 1/4) // 8 @@ -722,7 +722,7 @@ Return the population standard deviation of the values in *array*. If the array is empty or only *null* values are contained in the array, *null* will be returned. -```js +```aql STDDEV_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 1.854723699099141 ``` @@ -738,7 +738,7 @@ Return the sample standard deviation of the values in *array*. If the array is empty or only *null* values are contained in the array, *null* will be returned. -```js +```aql STDDEV_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 2.0736441353327724 ``` @@ -757,7 +757,7 @@ Return the sum of the values in *array*. - returns **sum** (number): the total of all values in *numArray*. If the array is empty or only *null* values are contained in the array, *0* will be returned. -```js +```aql SUM( [1, 2, 3, 4] ) // 10 SUM( [null, -5, 6] ) // 1 SUM( [ ] ) // 0 @@ -773,7 +773,7 @@ Return the tangent of *value*. - **value** (number): the input value - returns **num** (number): the tangent of *value* -```js +```aql TAN(10) // 0.6483608274590866 TAN(5) // -3.380515006246586 TAN(0) // 0 @@ -791,7 +791,7 @@ Return the population variance of the values in *array*. If the array is empty or only *null* values are contained in the array, *null* will be returned. -```js +```aql VARIANCE_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 3.4400000000000004 ``` @@ -807,7 +807,7 @@ Return the sample variance of the values in *array*. If the array is empty or only *null* values are contained in the array, *null* will be returned. -```js +```aql VARIANCE_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 4.300000000000001 ``` diff --git a/3.10/aql/functions-string.md b/3.10/aql/functions-string.md index d8a925f406..32e5f1d547 100644 --- a/3.10/aql/functions-string.md +++ b/3.10/aql/functions-string.md @@ -1864,7 +1864,7 @@ case conversion and accent removal for German text): To search a View for documents where the `text` attribute contains certain words/tokens in any order, you can use the function like this: -```js +```aql FOR doc IN viewName SEARCH ANALYZER(doc.text IN TOKENS("dolor amet lorem", "text_en"), "text_en") RETURN doc @@ -1909,7 +1909,7 @@ In most cases you will want to flatten the resulting array for further usage, because nested arrays are not accepted in `SEARCH` statements such as ` ALL IN doc.`: -```js +```aql LET tokens = TOKENS(["quick brown", ["fox"]], "text_en") // [ ["quick", "brown"], [["fox"]] ] LET tokens_flat = FLATTEN(tokens, 2) // [ "quick", "brown", "fox" ] FOR doc IN myView SEARCH ANALYZER(tokens_flat ALL IN doc.title, "text_en") RETURN doc diff --git a/3.10/aql/functions-type-cast.md b/3.10/aql/functions-type-cast.md index 42c8f62fd2..cba0256edc 100644 --- a/3.10/aql/functions-type-cast.md +++ b/3.10/aql/functions-type-cast.md @@ -38,7 +38,7 @@ boolean value. It's also possible to use double negation to cast to boolean: -```js +```aql !!1 // true !!0 // false !!-0.0 // false @@ -68,20 +68,19 @@ Take an input *value* of any type and convert it into a numeric value. result of `TO_NUMBER()` for its sole member. An array with two or more members is converted to the number *0*. - An object / document is converted to the number *0*. - - A unary plus will also cast to a number, but `TO_NUMBER()` is the preferred way: - ```js -+'5' // 5 -+[8] // 8 -+[8,9] // 0 -+{} // 0 + - A unary plus will also cast to a number, but `TO_NUMBER()` is the preferred way: + ```aql + +'5' // 5 + +[8] // 8 + +[8,9] // 0 + +{} // 0 ``` - A unary minus works likewise, except that a numeric value is also negated: - ```js --'5' // -5 --[8] // -8 --[8,9] // 0 --{} // 0 + - A unary minus works likewise, except that a numeric value is also negated: + ```aql + -'5' // -5 + -[8] // -8 + -[8,9] // 0 + -{} // 0 ``` ### TO_STRING() @@ -99,7 +98,7 @@ Take an input *value* of any type and convert it into a string value. - Arrays and objects / documents are converted to string representations, which means JSON-encoded strings with no additional whitespace -```js +```aql TO_STRING(null) // "" TO_STRING(true) // "true" TO_STRING(false) // "false" @@ -126,7 +125,7 @@ Take an input *value* of any type and convert it into an array value. - Objects / documents are converted to an array containing their attribute **values** as array elements, just like [VALUES()](functions-document.html#values) -```js +```aql TO_ARRAY(null) // [] TO_ARRAY(false) // [false] TO_ARRAY(true) // [true] diff --git a/3.10/aql/functions.md b/3.10/aql/functions.md index 8c4842ec3d..c9cf789938 100644 --- a/3.10/aql/functions.md +++ b/3.10/aql/functions.md @@ -10,7 +10,7 @@ AQL supports functions to allow more complex computations. Functions can be called at any query position where an expression is allowed. The general function call syntax is: -```js +```aql FUNCTIONNAME(arguments) ``` @@ -22,7 +22,7 @@ calls distinguishable from variable names. Some example function calls: -```js +```aql HAS(user, "name") LENGTH(friends) COLLECTIONS() diff --git a/3.10/aql/fundamentals-bind-parameters.md b/3.10/aql/fundamentals-bind-parameters.md index d30f05128b..e8cbf98b3d 100644 --- a/3.10/aql/fundamentals-bind-parameters.md +++ b/3.10/aql/fundamentals-bind-parameters.md @@ -22,7 +22,7 @@ The general syntax for bind parameters is `@name` where `@` signifies that this is a value bind parameter and *name* is the actual parameter name. It can be used to substitute values in a query. -```js +```aql RETURN @value ``` @@ -30,7 +30,7 @@ For collections, there is a slightly different syntax `@@coll` where `@@` signifies that it is a collection bind parameter and *coll* is the parameter name. -```js +```aql FOR doc IN @@coll RETURN doc ``` @@ -44,12 +44,12 @@ or the underscore symbol. They must not be quoted in the query code: -```js +```aql FILTER u.name == "@name" // wrong FILTER u.name == @name // correct ``` -```js +```aql FOR doc IN "@@collection" // wrong FOR doc IN @@collection // correct ``` @@ -57,7 +57,7 @@ FOR doc IN @@collection // correct If you need to do string processing (concatenation, etc.) in the query, you need to use [string functions](functions-string.html) to do so: -```js +```aql FOR u IN users FILTER u.id == CONCAT('prefix', @id, 'suffix') && u.name == @name RETURN u @@ -74,7 +74,7 @@ there is a pane next to the query editor where the bind parameters can be entered. For below query, two input fields will show up to enter values for the parameters `id` and `name`. -```js +```aql FOR u IN users FILTER u.id == @id && u.name == @name RETURN u @@ -110,7 +110,7 @@ Specific information about parameters binding can also be found in: Bind parameters can be used for both, the dot notation as well as the square bracket notation for sub-attribute access. They can also be chained: -```js +```aql LET doc = { foo: { bar: "baz" } } RETURN doc.@attr.@subattr @@ -132,7 +132,7 @@ specified using the dot notation and a single bind parameter, by passing an array of strings as parameter value. The elements of the array represent the attribute keys of the path: -```js +```aql LET doc = { a: { b: { c: 1 } } } RETURN doc.@attr ``` @@ -151,7 +151,7 @@ A special type of bind parameter exists for injecting collection names. This type of bind parameter has a name prefixed with an additional `@` symbol, so `@@name` in the query. -```js +```aql FOR u IN @@collection FILTER u.active == true RETURN u diff --git a/3.10/aql/fundamentals-data-types.md b/3.10/aql/fundamentals-data-types.md index b3583e9453..3d4a1a9fed 100644 --- a/3.10/aql/fundamentals-data-types.md +++ b/3.10/aql/fundamentals-data-types.md @@ -98,7 +98,7 @@ character is to be used itself within the string literal, it must be escaped using the backslash symbol. A literal backslash also needs to be escaped with a backslash. -``` +```aql "yikes!" "don't know" "this is a \"quoted\" word" @@ -158,7 +158,7 @@ supported. A trailing comma after the last element is allowed (introduced in v3.7.0): -```js +```aql [ 1, 2, @@ -173,7 +173,7 @@ to access array values starting from the end of the array. This is convenient if the length of the array is unknown and access to elements at the end of the array is required. -```js +```aql // access 1st array element (elements start at index 0) u.friends[0] @@ -212,7 +212,7 @@ whereas the value can be of any type including sub-objects. The attribute name is mandatory - there can't be anonymous values in an object. It can be specified as a quoted or unquoted string: -```js +```aql { name: … } // unquoted { 'name': … } // quoted (apostrophe / "single quote mark") { "name": … } // quoted (quotation mark / "double quote mark") @@ -226,7 +226,7 @@ letter, underscore or dollar sign. If a [keyword](fundamentals-syntax.html#keywords) is used as an attribute name then the attribute name must be quoted or escaped by ticks or backticks: -```js +```aql { return: … } // error, return is a keyword! { 'return': … } // quoted { "return": … } // quoted @@ -236,7 +236,7 @@ then the attribute name must be quoted or escaped by ticks or backticks: A trailing comma after the last element is allowed (introduced in v3.7.0): -```js +```aql { "a": 1, "b": 2, @@ -248,14 +248,14 @@ Attribute names can be computed using dynamic expressions, too. To disambiguate regular attribute names from attribute name expressions, computed attribute names must be enclosed in square brackets `[ … ]`: -```js +```aql { [ CONCAT("test/", "bar") ] : "someValue" } ``` There is also shorthand notation for attributes which is handy for returning existing variables easily: -```js +```aql LET name = "Peter" LET age = 42 RETURN { name, age } @@ -263,7 +263,7 @@ RETURN { name, age } The above is the shorthand equivalent for the generic form: -```js +```aql LET name = "Peter" LET age = 42 RETURN { name: name, age: age } @@ -272,7 +272,7 @@ RETURN { name: name, age: age } Any valid expression can be used as an attribute value. That also means nested objects can be used as attribute values: -```js +```aql { name : "Peter" } { "name" : "Vanessa", "age" : 15 } { "name" : "John", likes : [ "Swimming", "Skiing" ], "address" : { "street" : "Cucumber lane", "zip" : "94242" } } @@ -281,21 +281,21 @@ objects can be used as attribute values: Individual object attributes can later be accessed by their names using the dot `.` accessor: -```js +```aql u.address.city.name u.friends[0].name.first ``` Attributes can also be accessed using the square bracket `[]` accessor: -```js +```aql u["address"]["city"]["name"] u["friends"][0]["name"]["first"] ``` In contrast to the dot accessor, the square brackets allow for expressions: -```js +```aql LET attr1 = "friends" LET attr2 = "name" u[attr1][0][attr2][ CONCAT("fir", "st") ] diff --git a/3.10/aql/fundamentals-document-data.md b/3.10/aql/fundamentals-document-data.md index bc88593458..46d1ccb4d3 100644 --- a/3.10/aql/fundamentals-document-data.md +++ b/3.10/aql/fundamentals-document-data.md @@ -28,7 +28,7 @@ For example, the following query will return all documents from the collection *users* that have a value of *null* in the attribute *name*, plus all documents from *users* that do not have the *name* attribute at all: -```js +```aql FOR u IN users FILTER u.name == null RETURN u @@ -42,7 +42,7 @@ For example, the following query will return all documents from the collection *users* that have an attribute *age* with a value less than *39*, but also all documents from the collection that do not have the attribute *age* at all. -```js +```aql FOR u IN users FILTER u.age < 39 RETURN u diff --git a/3.10/aql/fundamentals-query-results.md b/3.10/aql/fundamentals-query-results.md index 8873843f93..7c82711393 100644 --- a/3.10/aql/fundamentals-query-results.md +++ b/3.10/aql/fundamentals-query-results.md @@ -17,7 +17,7 @@ For example, when returning data from a collection with inhomogeneous documents without modification, the result values will as well have an inhomogeneous structure. Each result value itself is a document: -```js +```aql FOR u IN users RETURN u ``` @@ -34,7 +34,7 @@ However, if a fixed set of attributes from the collection is queried, then the query result values will have a homogeneous structure. Each result value is still a document: -```js +```aql FOR u IN users RETURN { "id": u.id, "name": u.name } ``` @@ -50,7 +50,7 @@ FOR u IN users It is also possible to query just scalar values. In this case, the result set is an array of scalars, and each result value is a scalar value: -```js +```aql FOR u IN users RETURN u.id ``` diff --git a/3.10/aql/fundamentals-syntax.md b/3.10/aql/fundamentals-syntax.md index 64e515120a..0508689180 100644 --- a/3.10/aql/fundamentals-syntax.md +++ b/3.10/aql/fundamentals-syntax.md @@ -45,7 +45,7 @@ AQL supports two types of comments: end with an asterisk and a following forward slash. They can span as many lines as necessary. -```js +```aql /* this is a comment */ RETURN 1 /* these */ RETURN /* are */ 1 /* multiple */ + /* comments */ 1 /* this is @@ -84,7 +84,7 @@ above operations. An example AQL query may look like this: -```js +```aql FOR u IN users FILTER u.type == "newbie" && u.active == true RETURN u.name @@ -219,7 +219,7 @@ Variable names can be longer, but are discouraged. Keywords must not be used as names. If a reserved keyword should be used as a name, the name must be enclosed in backticks or forward ticks. -```js +```aql FOR doc IN `filter` RETURN doc.`sort` ``` @@ -229,14 +229,14 @@ keywords here. The example can alternatively written as: -```js +```aql FOR f IN ´filter´ RETURN f.´sort´ ``` Instead of ticks, you may use the bracket notation for the attribute access: -```js +```aql FOR f IN `filter` RETURN f["sort"] ``` @@ -247,7 +247,7 @@ conflict with the reserved word. Escaping is also required if special characters such as hyphen minus (`-`) are contained in a name: -```js +```aql FOR doc IN `my-coll` RETURN doc ``` @@ -278,7 +278,7 @@ allowed to refer to an unqualified attribute name. Please refer to the [Naming Conventions in ArangoDB](../data-modeling-naming-conventions-attribute-names.html) for more information about the attribute naming conventions. -```js +```aql FOR u IN users FOR f IN friends FILTER u.active == true && f.active == true && u.id == f.userId @@ -296,7 +296,7 @@ variables that are assigned a value must have a name that is unique within the context of the query. Variable names must be different from the names of any collection name used in the same query. -```js +```aql FOR u IN users LET friends = u.friends RETURN { "name" : u.name, "friends" : friends } diff --git a/3.10/aql/fundamentals-type-value-order.md b/3.10/aql/fundamentals-type-value-order.md index c6ff231880..360e68317a 100644 --- a/3.10/aql/fundamentals-type-value-order.md +++ b/3.10/aql/fundamentals-type-value-order.md @@ -26,7 +26,7 @@ string value, any array (even an empty array) or any object / document. Addition string value (even an empty string) will always be greater than any numeric value, a boolean value, *true* or *false*. -```js +```aql null < false null < true null < 0 @@ -98,7 +98,7 @@ If an array element is itself a compound value (an array or an object / document comparison algorithm will check the element's sub values recursively. The element's sub-elements are compared recursively. -```js +```aql [ ] < [ 0 ] [ 1 ] < [ 2 ] [ 1, 2 ] < [ 2 ] @@ -123,7 +123,7 @@ unambiguous comparison result. If an unambiguous comparison result is found, the comparison is finished. If there is no unambiguous comparison result, the two compared objects / documents are considered equal. -```js +```aql { } == { "a" : null } { } < { "a" : 1 } diff --git a/3.10/aql/graphs-k-paths.md b/3.10/aql/graphs-k-paths.md index 35efb74c05..12558df97d 100644 --- a/3.10/aql/graphs-k-paths.md +++ b/3.10/aql/graphs-k-paths.md @@ -113,7 +113,7 @@ connected graphs it can return a large number of paths. ### Working with named graphs -``` +```aql FOR path IN MIN..MAX OUTBOUND|INBOUND|ANY K_PATHS startVertex TO targetVertex @@ -147,7 +147,7 @@ FOR path ### Working with collection sets -``` +```aql FOR path IN MIN..MAX OUTBOUND|INBOUND|ANY K_PATHS startVertex TO targetVertex @@ -168,7 +168,7 @@ has no relevance, but in *edges1* and *edges3* the direction should be taken into account. In this case you can use `OUTBOUND` as general search direction and `ANY` specifically for *edges2* as follows: -``` +```aql FOR vertex IN OUTBOUND K_PATHS startVertex TO targetVertex edges1, ANY edges2, edges3 diff --git a/3.10/aql/graphs-kshortest-paths.md b/3.10/aql/graphs-kshortest-paths.md index 891cd48018..9c97c4ce0e 100644 --- a/3.10/aql/graphs-kshortest-paths.md +++ b/3.10/aql/graphs-kshortest-paths.md @@ -91,7 +91,7 @@ graphs it can return a large number of paths, or perform an expensive ### Working with named graphs -``` +```aql FOR path IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS startVertex TO targetVertex @@ -133,7 +133,7 @@ number, then the query is aborted with an error. ### Working with collection sets -``` +```aql FOR path IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS startVertex TO targetVertex @@ -155,7 +155,7 @@ has no relevance, but in *edges1* and *edges3* the direction should be taken int account. In this case you can use `OUTBOUND` as general search direction and `ANY` specifically for *edges2* as follows: -``` +```aql FOR vertex IN OUTBOUND K_SHORTEST_PATHS startVertex TO targetVertex edges1, ANY edges2, edges3 diff --git a/3.10/aql/graphs-shortest-path.md b/3.10/aql/graphs-shortest-path.md index bbc1c5777e..46487a3561 100644 --- a/3.10/aql/graphs-shortest-path.md +++ b/3.10/aql/graphs-shortest-path.md @@ -50,7 +50,7 @@ collections (anonymous graph). ### Working with named graphs -``` +```aql FOR vertex[, edge] IN OUTBOUND|INBOUND|ANY SHORTEST_PATH startVertex TO targetVertex @@ -88,7 +88,7 @@ number, then the query is aborted with an error. ### Working with collection sets -``` +```aql FOR vertex[, edge] IN OUTBOUND|INBOUND|ANY SHORTEST_PATH startVertex TO targetVertex @@ -109,7 +109,7 @@ has no relevance, but in *edges1* and *edges3* the direction should be taken int account. In this case you can use `OUTBOUND` as general search direction and `ANY` specifically for *edges2* as follows: -``` +```aql FOR vertex IN OUTBOUND SHORTEST_PATH startVertex TO targetVertex edges1, ANY edges2, edges3 diff --git a/3.10/aql/graphs-traversals.md b/3.10/aql/graphs-traversals.md index be64139719..511b10ec45 100644 --- a/3.10/aql/graphs-traversals.md +++ b/3.10/aql/graphs-traversals.md @@ -19,7 +19,7 @@ There are two slightly different syntaxes for traversals in AQL, one for ### Working with named graphs -``` +```aql [WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]] FOR vertex[, edge[, path]] IN [min[..max]] @@ -156,7 +156,7 @@ number, then the query is aborted with an error. ### Working with collection sets -``` +```aql [WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]] FOR vertex[, edge[, path]] IN [min[..max]] @@ -186,7 +186,7 @@ no relevance but in *edges1* and *edges3* the direction should be taken into acc In this case you can use `OUTBOUND` as general traversal direction and `ANY` specifically for *edges2* as follows: -``` +```aql FOR vertex IN OUTBOUND startVertex edges1, ANY edges2, edges3 diff --git a/3.10/aql/invocation-with-web-interface.md b/3.10/aql/invocation-with-web-interface.md index 8343930ea3..db972d1dc2 100644 --- a/3.10/aql/invocation-with-web-interface.md +++ b/3.10/aql/invocation-with-web-interface.md @@ -22,7 +22,7 @@ application code. Here is an example: -```js +```aql FOR doc IN @@collection FILTER CONTAINS(LOWER(doc.author), @search, false) RETURN { "name": doc.name, "descr": doc.description, "author": doc.author } diff --git a/3.10/aql/operations-collect.md b/3.10/aql/operations-collect.md index cf57bd045c..188f6ac2f1 100644 --- a/3.10/aql/operations-collect.md +++ b/3.10/aql/operations-collect.md @@ -43,7 +43,7 @@ This variable contains the group value. Here's an example query that find the distinct values in `u.city` and makes them available in variable `city`: -```js +```aql FOR u IN users COLLECT city = u.city RETURN { @@ -63,7 +63,7 @@ on the top level, in which case all variables are taken. Furthermore note that it is possible that the optimizer moves `LET` statements out of `FOR` statements to improve performance. -```js +```aql FOR u IN users COLLECT city = u.city INTO groups RETURN { @@ -80,7 +80,7 @@ made available in the variable `groups`. This is due to the `INTO` clause. `COLLECT` also allows specifying multiple group criteria. Individual group criteria can be separated by commas: -```js +```aql FOR u IN users COLLECT country = u.country, city = u.city INTO groups RETURN { @@ -100,7 +100,7 @@ Discarding obsolete variables The third form of `COLLECT` allows rewriting the contents of the *groupsVariable* using an arbitrary *projectionExpression*: -```js +```aql FOR u IN users COLLECT country = u.country, city = u.city INTO groups = u.name RETURN { @@ -117,7 +117,7 @@ the *groupsVariable* as it would happen without a *projectionExpression*. The expression following `INTO` can also be used for arbitrary computations: -```js +```aql FOR u IN users COLLECT country = u.country, city = u.city INTO groups = { "name" : u.name, @@ -141,7 +141,7 @@ The following example limits the variables that are copied into the *groupsVaria to just `name`. The variables `u` and `someCalculation` also present in the scope will not be copied into *groupsVariable* because they are not listed in the `KEEP` clause: -```js +```aql FOR u IN users LET name = u.name LET someCalculation = u.value1 + u.value2 @@ -165,7 +165,7 @@ determine the number of group members efficiently. The simplest form just returns the number of items that made it into the `COLLECT`: -```js +```aql FOR u IN users COLLECT WITH COUNT INTO length RETURN length @@ -173,14 +173,14 @@ FOR u IN users The above is equivalent to, but less efficient than: -```js +```aql RETURN LENGTH(users) ``` The `WITH COUNT` clause can also be used to efficiently count the number of items in each group: -```js +```aql FOR u IN users COLLECT age = u.age WITH COUNT INTO length RETURN { @@ -203,7 +203,7 @@ used as described before. For other aggregations, it is possible to run aggregate functions on the `COLLECT` results: -```js +```aql FOR u IN users COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO g RETURN { @@ -221,7 +221,7 @@ incrementally during the collect operation, and is therefore often more efficien With the `AGGREGATE` variant the above query becomes: -```js +```aql FOR u IN users COLLECT ageGroup = FLOOR(u.age / 5) * 5 AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age) @@ -236,7 +236,7 @@ The `AGGREGATE` keyword can only be used after the `COLLECT` keyword. If used, i must directly follow the declaration of the grouping keys. If no grouping keys are used, it must follow the `COLLECT` keyword directly: -```js +```aql FOR u IN users COLLECT AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age) RETURN { @@ -274,12 +274,12 @@ assignment: In order to make a result set unique, one can either use `COLLECT` or `RETURN DISTINCT`. -```js +```aql FOR u IN users RETURN DISTINCT u.age ``` -```js +```aql FOR u IN users COLLECT age = u.age RETURN age @@ -308,7 +308,7 @@ the *sorted* and the *hash* variant. The `method` option can be used in a `COLLECT` statement to inform the optimizer about the preferred method, `"sorted"` or `"hash"`. -```js +```aql COLLECT ... OPTIONS { method: "sorted" } ``` @@ -343,7 +343,7 @@ If the sort order of the `COLLECT` is irrelevant to the user, adding the extra instruction `SORT null` after the `COLLECT` will allow the optimizer to remove the sorts altogether: -```js +```aql FOR u IN users COLLECT age = u.age SORT null /* note: will be optimized away */ @@ -369,7 +369,7 @@ require its input to be sorted. Which variant of `COLLECT` will actually be used can be figured out by looking at the execution plan of a query, specifically the comment of the *CollectNode*: -```js +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT diff --git a/3.10/aql/operations-filter.md b/3.10/aql/operations-filter.md index 2caaaabb79..48fa656ae3 100644 --- a/3.10/aql/operations-filter.md +++ b/3.10/aql/operations-filter.md @@ -25,7 +25,7 @@ true, the current element is not skipped and can be further processed. See [Operators](operators.html) for a list of comparison operators, logical operators etc. that you can use in conditions. -```js +```aql FOR u IN users FILTER u.active == true && u.age < 39 RETURN u @@ -36,7 +36,7 @@ the same block. If multiple `FILTER` statements are used, their results will be combined with a logical `AND`, meaning all filter conditions must be true to include an element. -```js +```aql FOR u IN users FILTER u.active == true FILTER u.age < 39 @@ -57,7 +57,7 @@ for a description of the impact of non-existent or null attributes. While `FILTER` typically occurs in combination with `FOR`, it can also be used at the top level or in subqueries without a surrounding `FOR` loop. -```js +```aql FILTER false RETURN ASSERT(false, "never reached") ``` @@ -69,7 +69,7 @@ Note that the positions of `FILTER` statements can influence the result of a que There are 16 active users in the [test data](examples.html#example-data) for instance: -```js +```aql FOR u IN users FILTER u.active == true RETURN u @@ -77,7 +77,7 @@ FOR u IN users We can limit the result set to 5 users at most: -```js +```aql FOR u IN users FILTER u.active == true LIMIT 5 @@ -89,7 +89,7 @@ instance. Which ones are returned is undefined, since there is no `SORT` stateme to ensure a particular order. If we add a second `FILTER` statement to only return women... -```js +```aql FOR u IN users FILTER u.active == true LIMIT 5 @@ -103,7 +103,7 @@ and not all of them fulfill the gender criterion, even though there are more tha 5 active female users in the collection. A more deterministic result can be achieved by adding a `SORT` block: -```js +```aql FOR u IN users FILTER u.active == true SORT u.age ASC @@ -116,7 +116,7 @@ This will return the users *Mariah* and *Mary*. If sorted by age in `DESC` order then the Sophia, Emma and Madison documents are returned. A `FILTER` after a `LIMIT` is not very common however, and you probably want such a query instead: -```js +```aql FOR u IN users FILTER u.active == true AND u.gender == "f" SORT u.age ASC diff --git a/3.10/aql/operations-for.md b/3.10/aql/operations-for.md index 908f853180..2a47e002cf 100644 --- a/3.10/aql/operations-for.md +++ b/3.10/aql/operations-for.md @@ -26,7 +26,7 @@ For Views, there is a special (optional) [`SEARCH` keyword](operations-search.ht {% hint 'info' %} Views cannot be used as edge collections in traversals: -```js +```aql FOR v IN 1..3 ANY startVertex viewName /* invalid! */ ``` {% endhint %} @@ -41,7 +41,7 @@ required that *expression* returns an array in all cases. The empty array is allowed, too. The current array element is made available for further processing in the variable specified by *variableName*. -```js +```aql FOR u IN users RETURN u ``` @@ -60,7 +60,7 @@ placed in is closed. Another example that uses a statically declared array of values to iterate over: -```js +```aql FOR year IN [ 2011, 2012, 2013 ] RETURN { "year" : year, "isLeapYear" : year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) } ``` @@ -69,7 +69,7 @@ Nesting of multiple `FOR` statements is allowed, too. When `FOR` statements are nested, a cross product of the array elements returned by the individual `FOR` statements will be created. -```js +```aql FOR u IN users FOR l IN locations RETURN { "user" : u, "location" : l } @@ -94,11 +94,11 @@ For collections, index hints can be given to the optimizer with the `indexHint` option. The value can be a single **index name** or a list of index names in order of preference: -```js +```aql FOR … IN … OPTIONS { indexHint: "byName" } ``` -```js +```aql FOR … IN … OPTIONS { indexHint: ["byName", "byColor"] } ``` @@ -117,7 +117,7 @@ Index hints are not enforced by default. If `forceIndexHint` is set to `true`, then an error is generated if `indexHint` does not contain a usable index, instead of using a fallback index or not using an index at all. -```js +```aql FOR … IN … OPTIONS { indexHint: … , forceIndexHint: true } ``` @@ -134,7 +134,7 @@ be satisfied from the index data alone. Consider the following query and an index on the `value` attribute being present: -```js +```aql FOR doc IN collection FILTER doc.value <= 99 RETURN doc.other @@ -153,7 +153,7 @@ even if an index scan turns out to be slower in the end. You can force the optimizer to not use an index for any given `FOR` loop by using the `disableIndex` hint and setting it to `true`: -```js +```aql FOR doc IN collection OPTIONS { disableIndex: true } FILTER doc.value <= 99 RETURN doc.other @@ -181,7 +181,7 @@ previously hard-coded default value. For example, using a `maxProjections` hint of 7, the following query will extract 7 attributes as projections from the original document: -```js +```aql FOR doc IN collection OPTIONS { maxProjections: 7 } RETURN [ doc.val1, doc.val2, doc.val3, doc.val4, doc.val5, doc.val6, doc.val7 ] ``` @@ -206,7 +206,7 @@ enabled in-memory caches, but for which it is known that using the cache will have a negative performance impact. In this case, you can set the `useCache` hint to `false`: -```js +```aql FOR doc IN collection OPTIONS { useCache: false } FILTER doc.value == @value ... @@ -229,7 +229,7 @@ Also see [Caching of index values](../indexing-persistent.html#caching-of-index- The multi-dimensional index type `zkd` supports an optional index hint for tweaking performance: -```js +```aql FOR … IN … OPTIONS { lookahead: 32 } ``` diff --git a/3.10/aql/operations-insert.md b/3.10/aql/operations-insert.md index 7553aed464..7cec5d6af7 100644 --- a/3.10/aql/operations-insert.md +++ b/3.10/aql/operations-insert.md @@ -32,7 +32,7 @@ a `_key` attribute. If no `_key` attribute is provided, ArangoDB will auto-gener a value for `_key` value. Inserting a document will also auto-generate a document revision number for the document. -```js +```aql FOR i IN 1..100 INSERT { value: i } INTO numbers ``` @@ -40,14 +40,14 @@ FOR i IN 1..100 An insert operation can also be performed without a `FOR` loop to insert a single document: -```js +```aql INSERT { value: 1 } INTO numbers ``` When inserting into an [edge collection](../appendix-glossary.html#edge-collection), it is mandatory to specify the attributes `_from` and `_to` in document: -```js +```aql FOR u IN users FOR p IN products FILTER u._key == p.recommendedBy @@ -65,7 +65,7 @@ be provided in an `INSERT` operation. `ignoreErrors` can be used to suppress query errors that may occur when violating unique key constraints: -```js +```aql FOR i IN 1..1000 INSERT { _key: CONCAT('test', i), @@ -79,7 +79,7 @@ FOR i IN 1..1000 To make sure data are durable when an insert query returns, there is the `waitForSync` query option: -```js +```aql FOR i IN 1..1000 INSERT { _key: CONCAT('test', i), @@ -99,7 +99,7 @@ If you want to replace existing documents with documents having the same key there is the `overwrite` query option. This will let you safely replace the documents instead of raising a "unique constraint violated error": -```js +```aql FOR i IN 1..1000 INSERT { _key: CONCAT('test', i), @@ -142,7 +142,7 @@ When using the `update` overwrite mode, the `keepNull` and `mergeObjects` options control how the update is done. See [UPDATE operation](operations-update.html#query-options). -```js +```aql FOR i IN 1..1000 INSERT { _key: CONCAT('test', i), @@ -162,7 +162,7 @@ Exclusive access can also speed up modification queries, because we avoid confli Use the `exclusive` option to achieve this effect on a per query basis: -```js +```aql FOR doc IN collection INSERT { myval: doc.val + 1 } INTO users OPTIONS { exclusive: true } @@ -180,14 +180,14 @@ The documents contained in `NEW` will contain all attributes, even those auto-ge the database (e.g. `_id`, `_key`, `_rev`). -```js +```aql INSERT document INTO collection RETURN NEW ``` Following is an example using a variable named `inserted` to return the inserted documents. For each inserted document, the document key is returned: -```js +```aql FOR i IN 1..100 INSERT { value: i } INTO users diff --git a/3.10/aql/operations-let.md b/3.10/aql/operations-let.md index 69ec9c0c26..f1c16c7842 100644 --- a/3.10/aql/operations-let.md +++ b/3.10/aql/operations-let.md @@ -22,7 +22,7 @@ Usage Variables are immutable in AQL, which means they can not be re-assigned: -```js +```aql LET a = [1, 2, 3] // initial assignment a = PUSH(a, 4) // syntax error, unexpected identifier @@ -33,7 +33,7 @@ LET b = PUSH(a, 4) // allowed, result: [1, 2, 3, 4] `LET` statements are mostly used to declare complex computations and to avoid repeated computations of the same value at multiple parts of a query. -```js +```aql FOR u IN users LET numRecommendations = LENGTH(u.recommendations) RETURN { @@ -50,7 +50,7 @@ the `RETURN` statement. Another use case for `LET` is to declare a complex computation in a subquery, making the whole query more readable. -```js +```aql FOR u IN users LET friends = ( FOR f IN friends diff --git a/3.10/aql/operations-limit.md b/3.10/aql/operations-limit.md index 274672b30c..b1ff5e9f8b 100644 --- a/3.10/aql/operations-limit.md +++ b/3.10/aql/operations-limit.md @@ -24,7 +24,7 @@ the second form with an *offset* value of *0*. Usage ----- -```js +```aql FOR u IN users LIMIT 5 RETURN u @@ -40,7 +40,7 @@ The *offset* value specifies how many elements from the result shall be skipped. It must be 0 or greater. The *count* value specifies how many elements should be at most included in the result. -```js +```aql FOR u IN users SORT u.firstName, u.lastName, u.id DESC LIMIT 2, 5 diff --git a/3.10/aql/operations-remove.md b/3.10/aql/operations-remove.md index e09e693bf1..2c0ecb6659 100644 --- a/3.10/aql/operations-remove.md +++ b/3.10/aql/operations-remove.md @@ -31,17 +31,17 @@ document, which must contain a `_key` attribute. The following queries are thus equivalent: -```js +```aql FOR u IN users REMOVE { _key: u._key } IN users ``` -```js +```aql FOR u IN users REMOVE u._key IN users ``` -```js +```aql FOR u IN users REMOVE u IN users ``` @@ -49,12 +49,12 @@ FOR u IN users A remove operation can remove arbitrary documents, and the documents do not need to be identical to the ones produced by a preceding `FOR` statement: -```js +```aql FOR i IN 1..1000 REMOVE { _key: CONCAT('test', i) } IN users ``` -```js +```aql FOR u IN users FILTER u.active == false REMOVE { _key: u._key } IN backup @@ -63,11 +63,11 @@ FOR u IN users A single document can be removed as well, using a document key string or a document with `_key` attribute: -```js +```aql REMOVE 'john' IN users ``` -```js +```aql LET doc = DOCUMENT('users/john') REMOVE doc IN users ``` @@ -76,7 +76,7 @@ The restriction of a single remove operation per query and collection applies. The following query causes an _access after data-modification_ error because of the third remove operation: -```js +```aql REMOVE 'john' IN users REMOVE 'john' IN backups // OK, different collection REMOVE 'mary' IN users // Error, users collection again @@ -91,7 +91,7 @@ Query options remove non-existing documents. For example, the following query will fail if one of the to-be-deleted documents does not exist: -```js +```aql FOR i IN 1..1000 REMOVE { _key: CONCAT('test', i) } IN users ``` @@ -99,7 +99,7 @@ FOR i IN 1..1000 By specifying the `ignoreErrors` query option, these errors can be suppressed so the query completes: -```js +```aql FOR i IN 1..1000 REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { ignoreErrors: true } ``` @@ -109,7 +109,7 @@ FOR i IN 1..1000 To make sure data has been written to disk when a query returns, there is the `waitForSync` query option: -```js +```aql FOR i IN 1..1000 REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { waitForSync: true } ``` @@ -120,7 +120,7 @@ In order to not accidentally remove documents that have been updated since you l them, you can use the option `ignoreRevs` to either let ArangoDB compare the `_rev` values and only succeed if they still match, or let ArangoDB ignore them (default): -```js +```aql FOR i IN 1..1000 REMOVE { _key: CONCAT('test', i), _rev: "1287623" } IN users OPTIONS { ignoreRevs: false } ``` @@ -136,7 +136,7 @@ Exclusive access can also speed up modification queries, because we avoid confli Use the `exclusive` option to achieve this effect on a per query basis: -```js +```aql FOR doc IN collection REPLACE doc._key WITH { replaced: true } @@ -151,14 +151,14 @@ The removed documents can also be returned by the query. In this case, the statements are allowed, too).`REMOVE` introduces the pseudo-value `OLD` to refer to the removed documents: -``` +```aql REMOVE keyExpression IN collection options RETURN OLD ``` Following is an example using a variable named `removed` for capturing the removed documents. For each removed document, the document key will be returned. -```js +```aql FOR u IN users REMOVE u IN users LET removed = OLD diff --git a/3.10/aql/operations-replace.md b/3.10/aql/operations-replace.md index 302f615e8b..7a31370f6a 100644 --- a/3.10/aql/operations-replace.md +++ b/3.10/aql/operations-replace.md @@ -28,7 +28,7 @@ Both variants can optionally end with an `OPTIONS { … }` clause. be replaced. `document` is the replacement document. When using the first syntax, `document` must also contain the `_key` attribute to identify the document to be replaced. -```js +```aql FOR u IN users REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName), status: u.status } IN users ``` @@ -36,7 +36,7 @@ FOR u IN users The following query is invalid because it does not contain a `_key` attribute and thus it is not possible to determine the documents to be replaced: -```js +```aql FOR u IN users REPLACE { name: CONCAT(u.firstName, u.lastName, status: u.status) } IN users ``` @@ -47,22 +47,22 @@ document, which must contain a `_key` attribute. The following queries are equivalent: -```js +```aql FOR u IN users REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName) } IN users ``` -```js +```aql FOR u IN users REPLACE u._key WITH { name: CONCAT(u.firstName, u.lastName) } IN users ``` -```js +```aql FOR u IN users REPLACE { _key: u._key } WITH { name: CONCAT(u.firstName, u.lastName) } IN users ``` -```js +```aql FOR u IN users REPLACE u WITH { name: CONCAT(u.firstName, u.lastName) } IN users ``` @@ -74,12 +74,12 @@ will modify a document's revision number with a server-generated value. A replace operation may update arbitrary documents which do not need to be identical to the ones produced by a preceding `FOR` statement: -```js +```aql FOR i IN 1..1000 REPLACE CONCAT('test', i) WITH { foobar: true } IN users ``` -```js +```aql FOR u IN users FILTER u.active == false REPLACE u WITH { status: 'inactive', name: u.name } IN backup @@ -93,7 +93,7 @@ Query options `ignoreErrors` can be used to suppress query errors that may occur when trying to replace non-existing documents or when violating unique key constraints: -```js +```aql FOR i IN 1..1000 REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true } ``` @@ -103,7 +103,7 @@ FOR i IN 1..1000 To make sure data are durable when a replace query returns, there is the `waitForSync` query option: -```js +```aql FOR i IN 1..1000 REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { waitForSync: true } ``` @@ -114,7 +114,7 @@ In order to not accidentally overwrite documents that have been updated since yo them, you can use the option `ignoreRevs` to either let ArangoDB compare the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default): -```js +```aql FOR i IN 1..1000 REPLACE { _key: CONCAT('test', i), _rev: "1287623" } WITH { foobar: true } IN users OPTIONS { ignoreRevs: false } ``` @@ -130,7 +130,7 @@ Exclusive access can also speed up modification queries, because we avoid confli Use the `exclusive` option to achieve this effect on a per query basis: -```js +```aql FOR doc IN collection REPLACE doc._key WITH { replaced: true } IN collection @@ -149,7 +149,7 @@ Both `OLD` and `NEW` will contain all document attributes, even those not specif in the replace expression. -``` +```aql REPLACE document IN collection options RETURN OLD REPLACE document IN collection options RETURN NEW REPLACE keyExpression WITH document IN collection options RETURN OLD @@ -160,7 +160,7 @@ Following is an example using a variable named `previous` to return the original documents before modification. For each replaced document, the document key will be returned: -```js +```aql FOR u IN users REPLACE u WITH { value: "test" } IN users @@ -171,7 +171,7 @@ FOR u IN users The following query uses the `NEW` pseudo-value to return the replaced documents (without some of their system attributes): -```js +```aql FOR u IN users REPLACE u WITH { value: "test" } IN users LET replaced = NEW diff --git a/3.10/aql/operations-return.md b/3.10/aql/operations-return.md index 3dc7cf0264..9f3806d181 100644 --- a/3.10/aql/operations-return.md +++ b/3.10/aql/operations-return.md @@ -40,7 +40,7 @@ Usage To iterate over all documents of a collection called *users* and return the full documents, you can write: -```js +```aql FOR u IN users RETURN u ``` @@ -49,14 +49,14 @@ In each iteration of the for-loop, a document of the *users* collection is assigned to a variable *u* and returned unmodified in this example. To return only one attribute of each document, you could use a different return expression: -```js +```aql FOR u IN users RETURN u.name ``` Or to return multiple attributes, an object can be constructed like this: -```js +```aql FOR u IN users RETURN { name: u.name, age: u.age } ``` @@ -67,7 +67,7 @@ This is important to remember when working with [subqueries](examples-combining- [Dynamic attribute names](fundamentals-data-types.html#objects--documents) are supported as well: -```js +```aql FOR u IN users RETURN { [ u._id ]: u.age } ``` @@ -93,7 +93,7 @@ The result contains one object per user with a single key/value pair each. This is usually not desired. For a single object, that maps user IDs to ages, the individual results need to be merged and returned with another `RETURN`: -```js +```aql RETURN MERGE( FOR u IN users RETURN { [ u._id ]: u.age } @@ -116,7 +116,7 @@ times, only one of the key/value pairs with the duplicate name will survive dynamic attribute names, use static names instead and return all document properties as attribute values: -```js +```aql FOR u IN users RETURN { name: u.name, age: u.age } ``` @@ -153,7 +153,7 @@ loop preceding it. Below example returns `["foo", "bar", "baz"]`: -```js +```aql FOR value IN ["foo", "bar", "bar", "baz", "foo"] RETURN DISTINCT value ``` @@ -172,7 +172,7 @@ array or the subquery. For example, the following query will apply `DISTINCT` on its subquery results, but not inside the subquery: -```js +```aql FOR what IN 1..2 RETURN DISTINCT ( FOR i IN [ 1, 2, 3, 4, 1, 3 ] @@ -194,7 +194,7 @@ only be one occurrence of the value `[ 1, 2, 3, 4, 1, 3 ]` left: If the goal is to apply the `DISTINCT` inside the subquery, it needs to be moved there: -```js +```aql FOR what IN 1..2 LET sub = ( FOR i IN [ 1, 2, 3, 4, 1, 3 ] diff --git a/3.10/aql/operations-search.md b/3.10/aql/operations-search.md index ab905a73e8..f057b7314e 100644 --- a/3.10/aql/operations-search.md +++ b/3.10/aql/operations-search.md @@ -13,7 +13,7 @@ ArangoSearch. Conceptually, a View is just another document data source, similar to an array or a document/edge collection, over which you can iterate using a [FOR operation](operations-for.html) in AQL: -```js +```aql FOR doc IN viewName RETURN doc ``` @@ -109,7 +109,7 @@ Also see the [`IN_RANGE()` function](functions-arangosearch.html#in_range) for an alternative to a combination of `<`, `<=`, `>`, `>=` operators for range searches. -```js +```aql FOR doc IN viewName SEARCH ANALYZER(doc.text == "quick" OR doc.text == "brown", "text_en") // -- or -- @@ -130,7 +130,7 @@ Also see [Known Issues](../release-notes-known-issues310.html#arangosearch). [Array comparison operators](operators.html#array-comparison-operators) are supported (introduced in v3.6.0): -```js +```aql LET tokens = TOKENS("some input", "text_en") // ["some", "input"] FOR doc IN myView SEARCH tokens ALL IN doc.text RETURN doc // dynamic conjunction FOR doc IN myView SEARCH tokens ANY IN doc.text RETURN doc // dynamic disjunction @@ -188,7 +188,7 @@ For example, given a collection `myCol` with the following documents: … a search on `someAttr` yields the following result: -```js +```aql FOR doc IN myView SEARCH doc.someAttr == "One" RETURN doc @@ -201,7 +201,7 @@ FOR doc IN myView A search on `anotherAttr` yields an empty result because only `someAttr` is indexed by the View: -```js +```aql FOR doc IN myView SEARCH doc.anotherAttr == "One" RETURN doc @@ -222,7 +222,7 @@ The documents emitted from a View can be sorted by attribute values with the standard [SORT() operation](operations-sort.html), using one or multiple attributes, in ascending or descending order (or a mix thereof). -```js +```aql FOR doc IN viewName SORT doc.text, doc.value DESC RETURN doc @@ -245,7 +245,7 @@ Therefore the ArangoSearch scoring functions can work _only_ on documents emitted from a View, as both the corresponding `SEARCH` expression and the View itself are consulted in order to sort the results. -```js +```aql FOR doc IN viewName SEARCH ... SORT BM25(doc) DESC @@ -290,7 +290,7 @@ Given a View with three linked collections `coll1`, `coll2` and `coll3` it is possible to return documents from the first two collections only and ignore the third using the `collections` option: -```js +```aql FOR doc IN viewName SEARCH true OPTIONS { collections: ["coll1", "coll2"] } RETURN doc diff --git a/3.10/aql/operations-sort.md b/3.10/aql/operations-sort.md index cc6997f192..e8a42e4ddd 100644 --- a/3.10/aql/operations-sort.md +++ b/3.10/aql/operations-sort.md @@ -23,7 +23,7 @@ Usage Example query that is sorting by lastName (in ascending order), then firstName (in ascending order), then by id (in descending order): -```js +```aql FOR u IN users SORT u.lastName, u.firstName, u.id DESC RETURN u @@ -35,21 +35,21 @@ the keywords `ASC` (ascending) and `DESC` can be used. Multiple sort criteria ca separated using commas. In this case the direction is specified for each expression separately. For example -```js +```aql SORT doc.lastName, doc.firstName ``` will first sort documents by lastName in ascending order and then by firstName in ascending order. -```js +```aql SORT doc.lastName DESC, doc.firstName ``` will first sort documents by lastName in descending order and then by firstName in ascending order. -```js +```aql SORT doc.lastName, doc.firstName DESC ``` @@ -65,7 +65,7 @@ always **undefined unless an explicit sort order is defined** using `SORT`. Constant `SORT` expressions can be used to indicate that no particular sort order is desired. -```js +```aql SORT null ``` diff --git a/3.10/aql/operations-update.md b/3.10/aql/operations-update.md index 48768b85de..f60d4a9082 100644 --- a/3.10/aql/operations-update.md +++ b/3.10/aql/operations-update.md @@ -29,7 +29,7 @@ be updated. `document` must be a document that contains the attributes and value to be updated. When using the first syntax, `document` must also contain the `_key` attribute to identify the document to be updated. -```js +```aql FOR u IN users UPDATE { _key: u._key, name: CONCAT(u.firstName, " ", u.lastName) } IN users ``` @@ -37,7 +37,7 @@ FOR u IN users The following query is invalid because it does not contain a `_key` attribute and thus it is not possible to determine the documents to be updated: -```js +```aql FOR u IN users UPDATE { name: CONCAT(u.firstName, " ", u.lastName) } IN users ``` @@ -53,17 +53,17 @@ to get the document key as string. The following queries are equivalent: -```js +```aql FOR u IN users UPDATE u._key WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users ``` -```js +```aql FOR u IN users UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users ``` -```js +```aql FOR u IN users UPDATE u WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users ``` @@ -71,12 +71,12 @@ FOR u IN users An update operation may update arbitrary documents which do not need to be identical to the ones produced by a preceding `FOR` statement: -```js +```aql FOR i IN 1..1000 UPDATE CONCAT('test', i) WITH { foobar: true } IN users ``` -```js +```aql FOR u IN users FILTER u.active == false UPDATE u WITH { status: 'inactive' } IN backup @@ -90,7 +90,7 @@ available after `UPDATE`). To access the current attribute value, you can usually refer to a document via the variable of the `FOR` loop, which is used to iterate over a collection: -```js +```aql FOR doc IN users UPDATE doc WITH { fullName: CONCAT(doc.firstName, " ", doc.lastName) @@ -101,11 +101,11 @@ If there is no loop, because a single document is updated only, then there might not be a variable like above (`doc`), which would let you refer to the document which is being updated: -```js +```aql UPDATE "john" WITH { ... } IN users ``` -```js +```aql LET key = PARSE_IDENTIFIER("users/john").key UPDATE key WITH { ... } IN users ``` @@ -113,7 +113,7 @@ UPDATE key WITH { ... } IN users To access the current value in this situation, the document has to be retrieved and stored in a variable first: -```js +```aql LET doc = DOCUMENT("users/john") UPDATE doc WITH { fullName: CONCAT(doc.firstName, " ", doc.lastName) @@ -123,7 +123,7 @@ UPDATE doc WITH { An existing attribute can be modified based on its current value this way, to increment a counter for instance: -```js +```aql UPDATE doc WITH { karma: doc.karma + 1 } IN users @@ -135,7 +135,7 @@ If the attribute does exist, then it is increased by `1`. Arrays can be mutated too of course: -```js +```aql UPDATE doc WITH { hobbies: PUSH(doc.hobbies, "swimming") } IN users @@ -149,7 +149,7 @@ Query options You can optionally set query options for the `UPDATE` operation: -```js +```aql UPDATE ... IN users OPTIONS { ... } ``` @@ -158,7 +158,7 @@ UPDATE ... IN users OPTIONS { ... } `ignoreErrors` can be used to suppress query errors that may occur when trying to update non-existing documents or violating unique key constraints: -```js +```aql FOR i IN 1..1000 UPDATE { _key: CONCAT('test', i) @@ -178,7 +178,7 @@ When updating an attribute with a null value, ArangoDB will not remove the attri from the document but store a null value for it. To get rid of attributes in an update operation, set them to null and provide the `keepNull` option: -```js +```aql FOR u IN users UPDATE u WITH { foobar: true, @@ -199,7 +199,7 @@ The following query will set the updated document's `name` attribute to the exac same value that is specified in the query. This is due to the `mergeObjects` option being set to `false`: -```js +```aql FOR u IN users UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } @@ -209,7 +209,7 @@ FOR u IN users Contrary, the following query will merge the contents of the `name` attribute in the original document with the value specified in the query: -```js +```aql FOR u IN users UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } @@ -228,7 +228,7 @@ explicitly. To make sure data are durable when an update query returns, there is the `waitForSync` query option: -```js +```aql FOR u IN users UPDATE u WITH { foobar: true @@ -241,7 +241,7 @@ In order to not accidentally overwrite documents that have been updated since yo them, you can use the option `ignoreRevs` to either let ArangoDB compare the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default): -```js +```aql FOR i IN 1..1000 UPDATE { _key: CONCAT('test', i), _rev: "1287623" } WITH { foobar: true } IN users @@ -259,7 +259,7 @@ Exclusive access can also speed up modification queries, because we avoid confli Use the `exclusive` option to achieve this effect on a per query basis: -```js +```aql FOR doc IN collection UPDATE doc WITH { updated: true } IN collection @@ -278,7 +278,7 @@ refers to document revisions after the update. Both `OLD` and `NEW` will contain all document attributes, even those not specified in the update expression. -``` +```aql UPDATE document IN collection options RETURN OLD UPDATE document IN collection options RETURN NEW UPDATE keyExpression WITH document IN collection options RETURN OLD @@ -288,7 +288,7 @@ UPDATE keyExpression WITH document IN collection options RETURN NEW Following is an example using a variable named `previous` to capture the original documents before modification. For each modified document, the document key is returned. -```js +```aql FOR u IN users UPDATE u WITH { value: "test" } IN users @@ -299,7 +299,7 @@ FOR u IN users The following query uses the `NEW` pseudo-value to return the updated documents, without some of the system attributes: -```js +```aql FOR u IN users UPDATE u WITH { value: "test" } IN users @@ -309,7 +309,7 @@ FOR u IN users It is also possible to return both `OLD` and `NEW`: -```js +```aql FOR u IN users UPDATE u WITH { value: "test" } IN users diff --git a/3.10/aql/operations-upsert.md b/3.10/aql/operations-upsert.md index f5cf44920e..78296fbea0 100644 --- a/3.10/aql/operations-upsert.md +++ b/3.10/aql/operations-upsert.md @@ -49,7 +49,7 @@ The following query will look in the *users* collection for a document with a sp by one. If it does not exist, a new document will be inserted, consisting of the attributes *name*, *logins*, and *dateCreated*: -```js +```aql UPSERT { name: 'superuser' } INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() } UPDATE { logins: OLD.logins + 1 } IN users @@ -94,7 +94,7 @@ In order to not accidentally update documents that have been written and updated you last fetched them you can use the option `ignoreRevs` to either let ArangoDB compare the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default): -```js +```aql FOR i IN 1..1000 UPSERT { _key: CONCAT('test', i)} INSERT {foobar: false} @@ -121,7 +121,7 @@ Exclusive access can also speed up modification queries, because we avoid confli Use the `exclusive` option to achieve this effect on a per query basis: -```js +```aql FOR i IN 1..1000 UPSERT { _key: CONCAT('test', i) } INSERT { foobar: false } @@ -135,7 +135,7 @@ The `indexHint` option will be used as a hint for the document lookup performed as part of the `UPSERT` operation, and can help in cases such as `UPSERT` not picking the best index automatically. -```js +```aql UPSERT { a: 1234 } INSERT { a: 1234, name: "AB" } UPDATE { name: "ABC" } IN myCollection @@ -151,7 +151,7 @@ Makes the index or indices specified in `indexHint` mandatory if enabled. The default is `false`. Also see [`forceIndexHint` Option of the `FOR` Operation](operations-for.html#forceindexhint). -```js +```aql UPSERT { a: 1234 } INSERT { a: 1234, name: "AB" } UPDATE { name: "ABC" } IN myCollection @@ -175,7 +175,7 @@ update/replace. This can also be used to check whether the upsert has performed an insert or an update internally: -```js +```aql UPSERT { name: 'superuser' } INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() } UPDATE { logins: OLD.logins + 1 } IN users diff --git a/3.10/aql/operations-with.md b/3.10/aql/operations-with.md index 1ed44998af..2c320930f2 100644 --- a/3.10/aql/operations-with.md +++ b/3.10/aql/operations-with.md @@ -66,7 +66,7 @@ the edges of the edge collection reference vertices of a collection called `managers`. This collection is declared at the beginning of the query using the `WITH` operation: -```js +```aql WITH managers FOR v, e, p IN 1..2 OUTBOUND 'users/1' usersHaveManagers RETURN { v, e, p } diff --git a/3.10/aql/operators.md b/3.10/aql/operators.md index 920daaa00e..0df580afc0 100644 --- a/3.10/aql/operators.md +++ b/3.10/aql/operators.md @@ -45,7 +45,7 @@ they test for strict equality or inequality (`0` is different to `"0"`, Some examples for comparison operations in AQL: -```js +```aql 0 == null // false 1 > 0 // true true != null // true @@ -73,7 +73,7 @@ means that two reverse solidus characters need to precede a literal percent sign or underscore. In arangosh, additional escaping is required, making it four backslashes in total preceding the to-be-escaped character. -```js +```aql "abc" LIKE "a%" // true "abc" LIKE "_bc" // true "a_b_foo" LIKE "a\\_b\\_foo" // true @@ -85,7 +85,7 @@ The `NOT LIKE` operator has the same characteristics as the `LIKE` operator but with the result negated. It is thus identical to `NOT (… LIKE …)`. Note the parentheses, which are necessary for certain expressions: -```js +```aql FOR doc IN coll RETURN NOT doc.attr LIKE "…" ``` @@ -111,7 +111,7 @@ of an array operator is an array. Examples: -```js +```aql [ 1, 2, 3 ] ALL IN [ 2, 3, 4 ] // false [ 1, 2, 3 ] ALL IN [ 1, 2, 3 ] // true [ 1, 2, 3 ] NONE IN [ 3 ] // false @@ -171,7 +171,7 @@ The result of the logical operators in AQL is defined as follows: Some examples for logical operations in AQL: -```js +```aql u.age > 15 && u.address.city != "" true || false NOT u.isInvalid @@ -193,7 +193,7 @@ type and is not necessarily a boolean value. For example, the following logical operations will return boolean values: -```js +```aql 25 > 1 && 42 != 7 // true 22 IN [ 23, 42 ] || 23 NOT IN [ 22, 7 ] // true 25 != 25 // false @@ -201,7 +201,7 @@ For example, the following logical operations will return boolean values: … whereas the following logical operations will not return boolean values: -```js +```aql 1 || 7 // 1 null || "foo" // "foo" null && true // null @@ -224,7 +224,7 @@ AQL supports the following arithmetic operators: Unary plus and unary minus are supported as well: -```js +```aql LET x = -5 LET y = 1 RETURN [-x, +y] @@ -240,7 +240,7 @@ Also see [Common Errors](common-errors.html). Some example arithmetic operations: -``` +```aql 1 + 1 33 - 99 12.4 * 4.5 @@ -271,7 +271,7 @@ aborted, but you may see a warning. Here are a few examples: -```js +```aql 1 + "a" // 1 1 + "99" // 100 1 + null // 1 @@ -300,7 +300,7 @@ evaluates to true, and the third operand otherwise. The expression gives back `u.userId` if `u.age` is greater than 15 or if `u.active` is *true*. Otherwise it returns *null*: -```js +```aql u.age > 15 || u.active == true ? u.userId : null ``` @@ -313,7 +313,7 @@ condition and the return value should be the same. The expression evaluates to `u.value` if `u.value` is truthy, otherwise a fixed string is given back: -```js +```aql u.value ? : 'value is null, 0 or not present' ``` @@ -341,7 +341,7 @@ defined range, with both bounding values included. *Examples* -``` +```aql 2010..2013 ``` diff --git a/3.10/arangosearch-case-sensitivity-and-diacritics.md b/3.10/arangosearch-case-sensitivity-and-diacritics.md index 7907352412..2f17a7b2f2 100644 --- a/3.10/arangosearch-case-sensitivity-and-diacritics.md +++ b/3.10/arangosearch-case-sensitivity-and-diacritics.md @@ -46,7 +46,7 @@ analyzers.save("norm_en", "norm", { locale: "en.utf-8", accent: false, case: "lo Match movie title, ignoring capitalization and using the base characters instead of accented characters (full string): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == TOKENS("thé mäTRïX", "norm_en")[0], "norm_en") RETURN doc.title @@ -58,7 +58,7 @@ FOR doc IN imdb Match a title prefix (case-insensitive): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, "the matr"), "norm_en") RETURN doc.title diff --git a/3.10/arangosearch-exact-value-matching.md b/3.10/arangosearch-exact-value-matching.md index 6b364cf3d3..b3caa6c213 100644 --- a/3.10/arangosearch-exact-value-matching.md +++ b/3.10/arangosearch-exact-value-matching.md @@ -42,7 +42,7 @@ the entire string is equal (not matching substrings). Match exact movie title (case-sensitive, full string): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == "The Matrix", "identity") RETURN doc.title @@ -56,7 +56,7 @@ It is not necessary to set the Analyzer context with the `ANALYZER()` function here, because the default Analyzer is `identity` anyway. The following query will return the exact same results: -```js +```aql FOR doc IN imdb SEARCH doc.title == "The Matrix" RETURN doc.title @@ -82,7 +82,7 @@ strings that you want to match. Match multiple exact movie titles using `OR`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == "The Matrix" OR doc.title == "The Matrix Reloaded", "identity") RETURN doc.title @@ -95,7 +95,7 @@ FOR doc IN imdb Match multiple exact movie titles using `IN`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title IN ["The Matrix", "The Matrix Reloaded"], "identity") RETURN doc.title @@ -109,7 +109,7 @@ FOR doc IN imdb By substituting the array of strings with a bind parameter, it becomes possible to use the same query for an arbitrary amount of alternative strings to match: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title IN @titles, "identity") RETURN doc.title @@ -147,7 +147,7 @@ fulfill the criterion. This is also works with multiple alternatives using the Match movies that do not have the title `The Matrix`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title != "The Matrix", "identity") RETURN doc.title @@ -176,7 +176,7 @@ with the effect of returning many `null` values in the result. Match movies that neither have the title `The Matrix` nor `The Matrix Reloaded`. Post-filter the results to exclude implicit `null`s: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title NOT IN ["The Matrix", "The Matrix Reloaded"], "identity") FILTER doc.title != null @@ -200,7 +200,7 @@ to test whether there is a title field or not. On a single server with this particular dataset, the query is roughly five times faster than the previous one without `EXISTS()`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(EXISTS(doc.title) AND doc.title NOT IN ["The Matrix", "The Matrix Reloaded"], "identity") RETURN doc.title diff --git a/3.10/arangosearch-faceted-search.md b/3.10/arangosearch-faceted-search.md index fbd893cb7b..ef97f34ea4 100644 --- a/3.10/arangosearch-faceted-search.md +++ b/3.10/arangosearch-faceted-search.md @@ -46,7 +46,7 @@ collection. Find out all genre values by grouping by the `genre` attribute and count the number of occurrences: -```js +```aql FOR doc IN imdb COLLECT genre = doc.genre WITH COUNT INTO count RETURN { genre, count } @@ -69,7 +69,7 @@ not need to be indexed for this query. To look up a specific genre, the field needs to be indexed. The lookup itself utilizes the View index, but the `COLLECT` operation is still a post-operation: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.genre == "Action", "identity") COLLECT WITH COUNT INTO count @@ -87,7 +87,7 @@ can enable that can accurately determine the count from index data faster than the standard `COLLECT`. Also see [Count Approximation](arangosearch-performance.html#count-approximation). -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.genre == "Action", "identity") OPTIONS { countApproximate: "cost" } @@ -99,7 +99,7 @@ To apply this optimization to the faceted search paradigm over all genres, you can run and **cache** the following query that determines all unique genre values: -```js +```aql FOR doc IN imdb RETURN DISTINCT doc.genre ``` @@ -112,7 +112,7 @@ acceptable tradeoff for a faceted search. You can then use the genre list to look up each genre and retrieve the count while utilizing the count approximation optimization: -```js +```aql LET genres = [ "Action", "Adventure", "Animation", /* ... */ ] FOR genre IN genres LET count = FIRST( diff --git a/3.10/arangosearch-fulltext-token-search.md b/3.10/arangosearch-fulltext-token-search.md index 53b81cb630..e271ff516a 100644 --- a/3.10/arangosearch-fulltext-token-search.md +++ b/3.10/arangosearch-fulltext-token-search.md @@ -52,7 +52,7 @@ Token search is covered below. For phrase search see Search for movies with `dinosaur` or `park` (or both) in their description: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.description IN TOKENS("dinosaur park", "text_en"), "text_en") RETURN { @@ -71,7 +71,7 @@ FOR doc IN imdb Search for movies with both `dinosaur` and `park` in their description: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(TOKENS("dinosaur park", "text_en") ALL == doc.description , "text_en") RETURN { diff --git a/3.10/arangosearch-fuzzy-search.md b/3.10/arangosearch-fuzzy-search.md index 793e3611bd..af021da746 100644 --- a/3.10/arangosearch-fuzzy-search.md +++ b/3.10/arangosearch-fuzzy-search.md @@ -160,7 +160,7 @@ There are the following AQL string functions to calculate the - [NGRAM_SIMILARITY()](aql/functions-string.html#ngram_similarity) - [NGRAM_POSITIONAL_SIMILARITY()](aql/functions-string.html#ngram_positional_similarity) -```js +```aql RETURN [ LEVENSHTEIN_DISTANCE("galaxy", "glaaxy"), // 1 (with transpositions) NGRAM_SIMILARITY("avocado", "vocals", 3) // 0.5 (using trigrams) @@ -226,7 +226,7 @@ Levenshtein distance equal to or lower than this value will be a match and the respective documents will be included in the search result. The query will find the token `galaxy` as the edit distance to `galxy` is `1`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER( LEVENSHTEIN_MATCH( @@ -293,7 +293,7 @@ not including the original string: Search for actor names with an _n_-gram similarity of at least 50%. -```js +```aql FOR doc IN imdb SEARCH NGRAM_MATCH( doc.name, diff --git a/3.10/arangosearch-geospatial-search.md b/3.10/arangosearch-geospatial-search.md index 1339d7a6ab..63e786b991 100644 --- a/3.10/arangosearch-geospatial-search.md +++ b/3.10/arangosearch-geospatial-search.md @@ -105,7 +105,7 @@ Using the Museum of Modern Arts as reference location, find restaurants within a 100 meter radius. Return the matches sorted by distance and include how far away they are from the reference point in the result: -```js +```aql LET moma = GEO_POINT(-73.983, 40.764) FOR doc IN restaurantsView SEARCH ANALYZER(GEO_DISTANCE(doc.location, moma) < 100, "geojson") @@ -120,7 +120,7 @@ FOR doc IN restaurantsView Search for restaurants with `Cafe` in their name within a radius of 1000 meters and return the ten closest matches: -```js +```aql LET moma = GEO_POINT(-73.983, 40.764) FOR doc IN restaurantsView SEARCH ANALYZER(LIKE(doc.name, "%Cafe%"), "identity") @@ -141,7 +141,7 @@ First off, search for the neighborhood `Upper West Side` in a subquery and return its GeoJSON Polygon. Then search for restaurants that are contained in this polygon and return them together with the polygon itself: -```js +```aql LET upperWestSide = FIRST( FOR doc IN restaurantsView SEARCH ANALYZER(doc.name == "Upper West Side", "identity") @@ -161,7 +161,7 @@ FOR result IN PUSH( You do not have to look up the polygon, you can also provide one inline. It is also not necessary to return the polygon, you can return the matches only: -```js +```aql LET upperWestSide = { "coordinates": [ [ @@ -259,7 +259,7 @@ FOR doc IN restaurantsView Define a GeoJSON polygon that is a rectangle, then search for neighborhoods that are fully contained in this area: -```js +```aql LET sides = { left: -74, top: 40.8, @@ -296,7 +296,7 @@ Take a look at the lunch break video about the Define a GeoJSON polygon that is a rectangle, then search for neighborhoods that intersect with this area: -```js +```aql LET sides = { left: -74, top: 40.8, diff --git a/3.10/arangosearch-performance.md b/3.10/arangosearch-performance.md index 9a433bd12a..d961e80af9 100644 --- a/3.10/arangosearch-performance.md +++ b/3.10/arangosearch-performance.md @@ -49,7 +49,7 @@ View definition example: AQL query example: -```js +```aql FOR doc IN viewName SORT doc.name RETURN doc @@ -57,7 +57,7 @@ FOR doc IN viewName Execution plan **without** a sorted index being used: -``` +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -69,7 +69,7 @@ Execution plan: Execution plan with a the primary sort order of the index being utilized: -``` +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -141,7 +141,7 @@ In above View definition, the document attribute *categories* is indexed for searching, *publishedAt* is used as primary sort order and *title* as well as *categories* are stored in the View using the new `storedValues` property. -```js +```aql FOR doc IN articlesView SEARCH doc.categories == "recipes" SORT doc.publishedAt DESC @@ -159,7 +159,7 @@ no documents need to be fetched from the storage engine to answer the query. This is shown in the execution plan as a comment to the *EnumerateViewNode*: `/* view query without materialization */` -```js +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -182,7 +182,7 @@ Optimization rules applied: The `SEARCH` operation in AQL accepts an option `conditionOptimization` to give you control over the search criteria optimization: -```js +```aql FOR doc IN myView SEARCH doc.val > 10 AND doc.val > 5 /* more conditions */ OPTIONS { conditionOptimization: "none" } @@ -213,7 +213,7 @@ an approximate result with O(1) complexity. It gives a precise result if the (e.g. `SEARCH doc.field == "value"`), the usual eventual consistency of Views aside. -```js +```aql FOR doc IN viewName SEARCH doc.name == "Carol" OPTIONS { countApproximate: "cost" } diff --git a/3.10/arangosearch-phrase-and-proximity-search.md b/3.10/arangosearch-phrase-and-proximity-search.md index 251a752bed..630e9e78f9 100644 --- a/3.10/arangosearch-phrase-and-proximity-search.md +++ b/3.10/arangosearch-phrase-and-proximity-search.md @@ -39,7 +39,7 @@ tokens may occur between defined tokens for word proximity searches. Search for movies that have the (normalized and stemmed) tokens `biggest` and `blockbust` in their description, in this order: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(PHRASE(doc.description, "BIGGEST Blockbuster"), "text_en") RETURN { @@ -56,7 +56,7 @@ FOR doc IN imdb The `text_en` Analyzer set via the context is applied to the search term `BIGGEST Blockbuster`, effectively resulting in the query: -```js +```aql FOR doc IN imdb SEARCH PHRASE(doc.description, ["biggest", "blockbust"], "text_en") RETURN { @@ -68,7 +68,7 @@ FOR doc IN imdb The search phrase can be handed in via a bind parameter, but it can also be constructed dynamically using a subquery for instance: -```js +```aql LET p = ( FOR word IN ["tale", "of", "a", "woman"] SORT RAND() @@ -96,7 +96,7 @@ one arbitrary word in between the two words, for instance. Match movies that contain the phrase `epic film` in their description, where `` can be exactly one arbitrary token: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(PHRASE(doc.description, "epic", 1, "film"), "text_en") RETURN { @@ -121,7 +121,7 @@ performs a proximity search for movies with the phrase `family business` or `family business` in their description: -```js +```aql LET title = DOCUMENT("imdb_vertices/39967").title // Family Business FOR doc IN imdb SEARCH ANALYZER( @@ -151,7 +151,7 @@ of options. Match movies where the title has a token that starts with `Härr` (normalized to `harr`), followed by six arbitrary tokens and then a token that contains `eni`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(PHRASE(doc.title, {STARTS_WITH: TOKENS("Härr", "text_en")[0]}, 6, {WILDCARD: "%eni%"}), "text_en") RETURN doc.title diff --git a/3.10/arangosearch-prefix-matching.md b/3.10/arangosearch-prefix-matching.md index 2e002e1344..747e186067 100644 --- a/3.10/arangosearch-prefix-matching.md +++ b/3.10/arangosearch-prefix-matching.md @@ -57,7 +57,7 @@ It creates the necessary index data to perform prefix queries with Match all movie titles that start with `"The Matri"` (case-sensitive): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, "The Matr"), "identity") RETURN doc.title @@ -74,7 +74,7 @@ FOR doc IN imdb Match movie titles that start with either `"The Matr"` or `"Harry Pot"` using `OR`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, "The Matr") OR STARTS_WITH(doc.title, "Harry Pot"), "identity") RETURN doc.title @@ -101,7 +101,7 @@ Match movie titles that start with either `"The Matr"` or `"Harry Pot"` utilizing the feature of the `STARTS_WITH()` function that allows you to pass multiple possible prefixes as array of strings, of which one must match: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, ["The Matr", "Harry Pot"]), "identity") RETURN doc.title @@ -157,7 +157,7 @@ conditions for different tokens can be fulfilled. Match movie titles that contain three out of five prefixes: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, TOKENS("Sec Cham Har Pot Phoe", "text_en"), 3), "text_en") RETURN doc.title @@ -171,7 +171,7 @@ FOR doc IN imdb You can calculate the number of prefixes that need to match dynamically, for example to require that all prefixes must match: -```js +```aql LET prefixes = TOKENS("Brot Blu", "text_en") FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, prefixes, LENGTH(prefixes)), "text_en") @@ -246,7 +246,7 @@ db._query(`RETURN TOKENS("Ocean Equilibrium", "edge_ngram")`); Match movie titles that have a word starting with `"ocea"`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == "ocea", "edge_ngram") RETURN doc.title @@ -275,7 +275,7 @@ analyzers.save("match_edge_ngram", "text", { locale: "en.utf-8", accent: false, Now we can also match movie titles that start with `"Oceä"` (normalized to `"ocea"`): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == TOKENS("Oceä", "match_edge_ngram")[0], "edge_ngram") RETURN doc.title @@ -296,7 +296,7 @@ if `preserveOriginal` is enabled. For example, this query does not match anything because the longest indexed edge _n_-gram is `"equili"` but the search term is nine characters long: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.title == TOKENS("Equilibri", "match_edge_ngram")[0], "edge_ngram") RETURN doc.title @@ -306,7 +306,7 @@ Searching for `"Equilibrium"` does match because the full token `"equilibrium"` is indexed by our custom Analyzer thanks to `preserveOriginal`. We can take advantage of the full token being indexed with the `STARTS_WITH()` function: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, TOKENS("Equilibri", "match_edge_ngram")), "edge_ngram") RETURN doc.title diff --git a/3.10/arangosearch-range-queries.md b/3.10/arangosearch-range-queries.md index aa33704edb..f7be96e2ce 100644 --- a/3.10/arangosearch-range-queries.md +++ b/3.10/arangosearch-range-queries.md @@ -62,7 +62,7 @@ no Analyzer using an empty array `[]` as shown below. Match movies with a runtime of exactly `5` minutes: -```js +```aql FOR doc IN imdb SEARCH doc.runtime == 5 RETURN { @@ -84,7 +84,7 @@ Analyzers at all. Match movies with a runtime of `12`, `24` or `77` minutes: -```js +```aql FOR doc IN imdb SEARCH doc.runtime IN [12, 24, 77] RETURN { @@ -103,7 +103,7 @@ FOR doc IN imdb Match movies with a runtime over `300` minutes and sort them from longest to shortest runtime: -```js +```aql FOR doc IN imdb SEARCH doc.runtime > 300 SORT doc.runtime DESC @@ -140,7 +140,7 @@ included or excluded in the range. Match movies with a runtime of `4` to `6` minutes with the range operator: -```js +```aql FOR doc IN imdb SEARCH doc.runtime IN 4..6 RETURN { @@ -165,7 +165,7 @@ matches `4`, `5` and `6`. Match movies with a runtime of `4` to `6` minutes with the `IN_RANGE()` function (inclusive on both ends): -```js +```aql FOR doc IN imdb SEARCH IN_RANGE(doc.runtime, 4, 6, true, true) RETURN { @@ -192,7 +192,7 @@ Match movies with a runtime of `5` minutes or less, as well as `500` minutes or more, but not with a runtime of `0` minutes. Sort the matches by runtime in ascending order: -```js +```aql FOR doc IN imdb SEARCH (doc.runtime <= 5 OR doc.runtime >= 500) AND doc.runtime != 0 SORT doc.runtime @@ -257,7 +257,7 @@ Also see [Known Issues](release-notes-known-issues310.html#arangosearch). Match movies where the name is `>= Wu` and `< Y`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(IN_RANGE(doc.name, "Wu", "Y", true, false), "identity") RETURN doc.name diff --git a/3.10/arangosearch-ranking.md b/3.10/arangosearch-ranking.md index 5b95ad2a00..6921b60d5e 100644 --- a/3.10/arangosearch-ranking.md +++ b/3.10/arangosearch-ranking.md @@ -36,7 +36,7 @@ expression and set the order to descending. Scoring functions expect the document emitted by a `FOR … IN` loop that iterates over a View as first argument. -```js +```aql FOR doc IN viewName SEARCH … SORT BM25(doc) DESC @@ -45,7 +45,7 @@ FOR doc IN viewName You can also return the ranking score as part of the result. -```js +```aql FOR doc IN viewName SEARCH … RETURN MERGE(doc, { bm25: BM25(doc), tfidf: TFIDF(doc) }) @@ -80,7 +80,7 @@ inverse document frequency (IDF). Search for movies with certain keywords in their description and rank the results using the [`BM25()` function](aql/functions-arangosearch.html#bm25): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.description IN TOKENS("amazing action world alien sci-fi science documental galaxy", "text_en"), "text_en") SORT BM25(doc) DESC @@ -107,7 +107,7 @@ FOR doc IN imdb Do the same but with the [`TFIDF()` function](aql/functions-arangosearch.html#tfidf): -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.description IN TOKENS("amazing action world alien sci-fi science documental galaxy", "text_en"), "text_en") SORT TFIDF(doc) DESC @@ -168,7 +168,7 @@ boosted parts of the search expression will get higher scores. Prefer `galaxy` over the other keywords: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.description IN TOKENS("amazing action world alien sci-fi science documental", "text_en") OR BOOST(doc.description IN TOKENS("galaxy", "text_en"), 5), "text_en") @@ -198,7 +198,7 @@ If you are an information retrieval expert and want to fine-tuning the weighting schemes at query time, then you can do so. The `BM25()` function accepts free coefficients as parameters to turn it into BM15 for instance: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(doc.description IN TOKENS("amazing action world alien sci-fi science documental", "text_en") OR BOOST(doc.description IN TOKENS("galaxy", "text_en"), 5), "text_en") @@ -231,7 +231,7 @@ of the document. Match movies with the (normalized) phrase `star war` in the title and calculate a custom score based on BM25 and the movie runtime to favor longer movies: -```js +```aql FOR doc IN imdb SEARCH PHRASE(doc.title, "Star Wars", "text_en") LET score = BM25(doc) * LOG(doc.runtime + 1) diff --git a/3.10/arangosearch-wildcard-search.md b/3.10/arangosearch-wildcard-search.md index d13b247d56..3f83fc0ad7 100644 --- a/3.10/arangosearch-wildcard-search.md +++ b/3.10/arangosearch-wildcard-search.md @@ -72,7 +72,7 @@ escaping (`\\\\` in bind variables and `\\\\\\\\` in queries) Match all titles that starts with `The Matr` using `LIKE()`, where `_` stands for a single wildcard character and `%` for an arbitrary amount: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(LIKE(doc.title, "The Matr%"), "identity") RETURN doc.title @@ -88,7 +88,7 @@ FOR doc IN imdb You can achieve the same with the `STARTS_WITH()` function: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(STARTS_WITH(doc.title, "The Matr"), "identity") RETURN doc.title @@ -96,7 +96,7 @@ FOR doc IN imdb Match all titles that contain `Mat` using `LIKE()`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(LIKE(doc.title, "%Mat%"), "identity") RETURN doc.title @@ -118,7 +118,7 @@ FOR doc IN imdb Match all titles that end with `rix` using `LIKE()`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(LIKE(doc.title, "%rix"), "identity") RETURN doc.title @@ -138,7 +138,7 @@ Match all titles that have an `H` as first letter, followed by two arbitrary characters, followed by `ry` and any amount of characters after that. It will match titles starting with `Harry` and `Henry`: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(LIKE(doc.title, "H__ry%"), "identity") RETURN doc.title @@ -156,7 +156,7 @@ FOR doc IN imdb Use a bind parameter as input, but escape the characters with special meaning and perform a contains-style search by prepending and appending a percent sign: -```js +```aql FOR doc IN imdb SEARCH ANALYZER(LIKE(doc.title, CONCAT("%", SUBSTITUTE(@term, ["_", "%"], ["\\_", "\\%"]), "%")), "identity") RETURN doc.title diff --git a/3.10/arangosearch.md b/3.10/arangosearch.md index 47a7a85201..ad99a3c177 100644 --- a/3.10/arangosearch.md +++ b/3.10/arangosearch.md @@ -105,7 +105,7 @@ logical and comparison operators, as well as get processed by the default `identity` Analyzer, which means that they get indexed unaltered. 7. Click on _QUERIES_ in the main navigation and try the following query: - ```js + ```aql FOR doc IN food_view RETURN doc ``` @@ -113,7 +113,7 @@ logical and comparison operators, as well as (indexed) documents. You should see the documents stored in `food` as result. 8. Now add a search expression. Unlike with regular collections where you would use `FILTER`, a `SEARCH` operation is needed to utilize the View index: - ```js + ```aql FOR doc IN food_view SEARCH doc.name == "avocado" RETURN doc @@ -136,7 +136,7 @@ with one of the Analyzers that a field was indexed with as per the View definition - and this happened to be the case. We can rewrite the query to be more explicit about the Analyzer context: -```js +```aql FOR doc IN food_view SEARCH ANALYZER(doc.name == "avocado", "identity") RETURN doc @@ -160,7 +160,7 @@ variants of the [`EXISTS()` function](aql/functions-arangosearch.html#exists). The prerequisite for this is that you change `"storeValues"` in the View definition from `"none"` to `"id"`. You can then run a query as shown below: -```js +```aql RETURN LENGTH( FOR doc IN food_view SEARCH EXISTS(doc.name, "analyzer", "identity") @@ -237,7 +237,7 @@ English text. with the `text_en` Analyzer in addition to the `identity` Analyzer. 3. Run below query that sets `text_en` as context Analyzer and searches for the word `pepper`: - ```js + ```aql FOR doc IN food_view SEARCH ANALYZER(doc.name == "pepper", "text_en") RETURN doc.name @@ -245,7 +245,7 @@ English text. 4. It matches `chili pepper` because the Analyzer tokenized it into `chili` and `pepper` and the latter matches the search criterion. Compare that to the `identity` Analyzer: - ```js + ```aql FOR doc IN food_view SEARCH ANALYZER(doc.name == "pepper", "identity") RETURN doc.name @@ -253,7 +253,7 @@ English text. It does not match because `chili pepper` is indexed as a single token that does not match the search criterion. 5. Switch back to the `text_en` Analyzer but with a different search term: - ```js + ```aql FOR doc IN food_view SEARCH ANALYZER(doc.name == "PéPPêR", "text_en") RETURN doc.name @@ -263,7 +263,7 @@ English text. The problem is that this transformation is applied to the document attribute when it gets indexed, but we haven't applied it to the search term. 6. If we apply the same transformation then we get a match: - ```js + ```aql FOR doc IN food_view SEARCH ANALYZER(doc.name == TOKENS("PéPPêR", "text_en")[0], "text_en") RETURN doc.name @@ -282,7 +282,7 @@ expressions. ArangoSearch AQL functions take either an expression or a reference of an attribute path as first argument. -```js +```aql ANALYZER(, …) STARTS_WITH(doc.attribute, …) ``` @@ -291,7 +291,7 @@ If an expression is expected, it means that search conditions can expressed in AQL syntax. They are typically function calls to ArangoSearch search functions, possibly nested and/or using logical operators for multiple conditions. -```js +```aql ANALYZER(STARTS_WITH(doc.name, "chi") OR STARTS_WITH(doc.name, "tom"), "identity") ``` @@ -304,7 +304,7 @@ which the field was indexed. It can be easier and cleaner to use `ANALYZER()` even if you exclusively use functions that take an Analyzer argument and leave that argument out: -```js +```aql // Analyzer specified in each function call PHRASE(doc.name, "chili pepper", "text_en") OR PHRASE(doc.name, "tomato", "text_en") @@ -323,7 +323,7 @@ Certain expressions do not require any ArangoSearch functions, such as basic comparisons. However, the Analyzer used for searching will be `"identity"` unless `ANALYZER()` is used to set a different one. -```js +```aql // The "identity" Analyzer will be used by default SEARCH doc.name == "avocado" @@ -340,7 +340,7 @@ which attribute you want to test for as an unquoted string literal. For example `doc.attr` or `doc.deeply.nested.attr` but not `"doc.attr"`. You can also use the bracket notation `doc["attr"]`. -```js +```aql FOR doc IN viewName SEARCH STARTS_WITH(doc.deeply.nested["attr"], "avoca") RETURN doc @@ -359,7 +359,7 @@ available. Here is an example that sorts results from high to low BM25 score and also returns the score: -```js +```aql FOR doc IN food_view SEARCH ANALYZER(doc.type == "vegetable", "identity") SORT BM25(doc) DESC @@ -377,7 +377,7 @@ passed to the [`BM25()` function](aql/functions-arangosearch.html#bm25). The [`TFIDF()` function](aql/functions-arangosearch.html#tfidf) works the same: -```js +```aql FOR doc IN food_view SEARCH ANALYZER(doc.type == "vegetable", "identity") SORT TFIDF(doc) DESC @@ -464,13 +464,13 @@ following example document: The View will automatically index `apple pie`, processed with the `identity` and `text_en` Analyzers, and it can then be queried like this: -```js +```aql FOR doc IN food_view SEARCH ANALYZER(doc.value.nested.deep == "apple pie", "identity") RETURN doc ``` -```js +```aql FOR doc IN food_view SEARCH ANALYZER(doc.value.nested.deep IN TOKENS("pie", "text_en"), "text_en") RETURN doc @@ -505,7 +505,7 @@ A View that is configured to index the field `value` including sub-fields will index the individual numbers under the path `value.nested.deep`, which you can query for like: -```js +```aql FOR doc IN viewName SEARCH doc.value.nested.deep == 2 RETURN doc @@ -515,7 +515,7 @@ This is different to `FILTER` operations, where you would use an [array comparison operator](aql/operators.html#array-comparison-operators) to find an element in the array: -```js +```aql FOR doc IN collection FILTER doc.value.nested.deep ANY == 2 RETURN doc @@ -524,14 +524,14 @@ FOR doc IN collection You can set `trackListPositions` to `true` if you want to query for a value at a specific array index: -```js +```aql SEARCH doc.value.nested.deep[1] == 2 ``` With `trackListPositions` enabled there will be **no match** for the document anymore if the specification of an array index is left out in the expression: -```js +```aql SEARCH doc.value.nested.deep == 2 ``` @@ -545,13 +545,13 @@ For example, given the field `text` is analyzed with `"text_en"` and contains the string `"a quick brown fox jumps over the lazy dog"`, the following expression will be true: -```js +```aql ANALYZER(doc.text == 'fox', "text_en") ``` Note that the `"text_en"` Analyzer stems the words, so this is also true: -```js +```aql ANALYZER(doc.text == 'jump', "text_en") ``` @@ -565,14 +565,14 @@ any element of the array. For example, given: … the following will be true: -```js +```aql ANALYZER(doc.text == 'jump', "text_en") ``` With `trackListPositions: true` you would need to specify the index of the array element `"jumps over the"` to be true: -```js +```aql ANALYZER(doc.text[2] == 'jump', "text_en") ``` diff --git a/3.10/architecture-deployment-modes-cluster-sharding.md b/3.10/architecture-deployment-modes-cluster-sharding.md index eb167ce179..7959d25994 100644 --- a/3.10/architecture-deployment-modes-cluster-sharding.md +++ b/3.10/architecture-deployment-modes-cluster-sharding.md @@ -134,7 +134,7 @@ of the update / replace or removal operation, or in case of AQL, that you use a document reference or an object for the UPDATE, REPLACE or REMOVE operation which includes the shard key attributes: -```js +```aql UPDATE { _key: "123", country: "…" } WITH { … } IN sharded_collection ``` diff --git a/3.10/data-modeling-documents-document-methods.md b/3.10/data-modeling-documents-document-methods.md index 4f04a7e19e..da1e599b33 100644 --- a/3.10/data-modeling-documents-document-methods.md +++ b/3.10/data-modeling-documents-document-methods.md @@ -116,7 +116,7 @@ and will match. -``` +```js collection.byExample(path1, value1, ...) ``` diff --git a/3.10/data-modeling-operational-factors.md b/3.10/data-modeling-operational-factors.md index 8ad3fd9b75..091812030e 100644 --- a/3.10/data-modeling-operational-factors.md +++ b/3.10/data-modeling-operational-factors.md @@ -126,7 +126,7 @@ option together with a modification operation. Either let ArangoDB compare the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default): -```js +```aql FOR i IN 1..1000 UPDATE { _key: CONCAT('test', i), _rev: "1287623" } WITH { foobar: true } IN users @@ -292,7 +292,7 @@ You may use the _exclusive_ query option for modifying AQL queries, to improve t This has the downside that no concurrent writes may occur on the collection, but ArangoDB is able to use a special fast-path which should improve the performance by up to 50% for large collections. -```js +```aql FOR doc IN mycollection UPDATE doc._key WITH { foobar: true } IN mycollection diff --git a/3.10/deployment-cluster-preliminary-information.md b/3.10/deployment-cluster-preliminary-information.md index 1734ceed21..d7657402f8 100644 --- a/3.10/deployment-cluster-preliminary-information.md +++ b/3.10/deployment-cluster-preliminary-information.md @@ -21,7 +21,7 @@ somewhere else and pass it to your `arangod` cluster instance via The data directory is configured in `arangod.conf`: -``` +```conf [database] directory = /var/lib/arangodb3 ``` @@ -31,7 +31,7 @@ as the standalone instance. If that is not already the case, change the `database.directory` entry in `arangod.conf` as seen above to a different directory -``` +```conf # in arangod.conf: [database] directory = /var/lib/arangodb3.standalone @@ -39,7 +39,7 @@ directory = /var/lib/arangodb3.standalone and create it with the correct permissions: -``` +```bash $ mkdir -vp /var/lib/arangodb3.standalone $ chown -c arangodb:arangodb /var/lib/arangodb3.standalone $ chmod -c 0700 /var/lib/arangodb3.standalone @@ -51,14 +51,14 @@ The standalone instance must use a different socket, i.e. it cannot use the same port on the same network interface than the Cluster. For that, change the standalone instance's port in `/etc/arangodb3/arangod.conf` -``` +```conf [server] endpoint = tcp://127.0.0.1:8529 ``` to something unused, e.g. -``` +```conf [server] endpoint = tcp://127.1.2.3:45678 ``` @@ -76,14 +76,14 @@ In addition, the installation might overwrite your _init_ script otherwise. If you have previously changed the default _init_ script, move it out of the way -``` +```bash $ mv -vi /etc/init.d/arangodb3 /etc/init.d/arangodb3.cluster ``` and add it to the _autostart_; how this is done depends on your distribution and _init_ system. On older Debian and Ubuntu systems, you can use `update-rc.d`: -``` +```bash $ update-rc.d arangodb3.cluster defaults ``` diff --git a/3.10/deployment-single-instance-manual-start.md b/3.10/deployment-single-instance-manual-start.md index 90cb3a2de2..e36935e2ee 100644 --- a/3.10/deployment-single-instance-manual-start.md +++ b/3.10/deployment-single-instance-manual-start.md @@ -13,7 +13,7 @@ Local Start We will assume that your IP is 127.0.0.1 and that the port 8529 is free: -``` +```bash arangod --server.endpoint tcp://0.0.0.0:8529 \ --database.directory standalone & ``` @@ -38,7 +38,7 @@ to enable process intercommunication. An example configuration might look like this: -``` +```bash docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod \ --server.endpoint tcp://0.0.0.0:8529\ ``` diff --git a/3.10/deployment-standalone-agency.md b/3.10/deployment-standalone-agency.md index ce964d3526..3468c260c2 100644 --- a/3.10/deployment-standalone-agency.md +++ b/3.10/deployment-standalone-agency.md @@ -114,14 +114,14 @@ An Agency started from scratch will deal with the simplest query as follows: curl -L localhost:8531/_api/agency/read -d '[["/"]]' ``` -```js +```json [{}] ``` The above request for an empty key value store will return with an empty document. The inner array brackets will aggregate a result from multiple sources in the key-value-store while the outer array will deliver multiple such aggregated results. Also note the `-L` curl flag, which allows the request to follow redirects to the current leader. Consider the following key-value-store: -```js +```json { "baz": 12, "corge": { @@ -143,7 +143,7 @@ The following array of read transactions will yield: curl -L localhost:8531/_api/agency/read -d '[["/foo", "/foo/bar", "/baz"],["/qux"]]' ``` -```js +```json [ { "baz": 12, @@ -160,7 +160,8 @@ curl -L localhost:8531/_api/agency/read -d '[["/foo", "/foo/bar", "/baz"],["/qux ``` Note that the result is an array of two results for the first and second read transactions from above accordingly. Also note that the results from the first read transaction are aggregated into -```js + +```json { "baz": 12, "foo": { diff --git a/3.10/graphs-pregel.md b/3.10/graphs-pregel.md index 8f6d279d15..6dfa65f2dc 100644 --- a/3.10/graphs-pregel.md +++ b/3.10/graphs-pregel.md @@ -161,7 +161,7 @@ The result field names depend on the algorithm in both cases. For example, you might want to query only nodes with the highest rank from the result set of a PageRank execution: -```js +```aql FOR v IN PREGEL_RESULT() FILTER v.result >= 0.01 RETURN v._key @@ -174,7 +174,7 @@ sufficient to tell vertices from different collections apart. In this case, `PREGEL_RESULT()` can be given a second parameter `withId`, which will make it return the `_id` values of the vertices as well: -```js +```aql FOR v IN PREGEL_RESULT(, true) FILTER v.result >= 0.01 RETURN v._id diff --git a/3.10/http/agency.md b/3.10/http/agency.md index 87092bdeb4..d758cfab5e 100644 --- a/3.10/http/agency.md +++ b/3.10/http/agency.md @@ -33,9 +33,9 @@ Consider the following write operation into a pristine Agency: ``` curl -L http://$SERVER:$PORT/_api/agency/write -d '[[{"a":{"op":"set","new":{"b":{"c":[1,2,3]},"e":12}},"d":{"op":"set","new":false}}]]' ``` -```js -[{results:[1]}] +```json +[{"results":[1]}] ``` And the subsequent read operation @@ -43,7 +43,8 @@ And the subsequent read operation ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/"]]' ``` -```js + +```json [ { "a": { @@ -70,7 +71,8 @@ Let's start with the above initialized key-value store in the following. Let us ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b"]]' ``` -```js + +```json [ { "a": { @@ -87,7 +89,8 @@ And ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/c"]]' ``` -```js + +```json [ { "a": { @@ -106,7 +109,8 @@ The second outer array brackets in read operations correspond to transactions, m ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/e"],["/d","/a/b"]]' ``` -```js + +```json [ { "a": { @@ -132,7 +136,8 @@ Let's try to fetch a value from the key-value-store, which does not exist: ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/d"]]' ``` -```js + +```json [ { "a": { @@ -147,7 +152,8 @@ The result returns the cross section of the requested path and the key-value-sto ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/d","/d"]]' ``` -```js + +```json [ { "a": { @@ -163,7 +169,8 @@ And this last read operation should return: ``` curl -L http://$SERVER:$PORT/_api/agency/read -d '[["/a/b/c"],["/a/b/d"],["/a/x/y"],["/y"],["/a/b","/a/x" ]]' ``` -```js + +```json [ {"a":{"b":{"c":[1,2,3]}}}, {"a":{"b":{}}}, @@ -181,13 +188,13 @@ The write API must obviously be more versatile and needs a more detailed appreci For `P`, the value of a key is an object with attributes `"old"`, `"oldNot"`, `"oldEmpty"` or `"isArray"`. With `"old"` one can specify a JSON value that has to be present for the condition to be fulfilled. With `"oldNot"` one may check for a value to not be equal to the test. While with `"oldEmpty"`, which can take a boolean value, one can specify that the key value needs to be not set `true` or set to an arbitrary value `false`. With `"isArray"` one can specify that the value must be an array. As a shortcut, `"old"` values of scalar or array type may be stored directly in the attribute. Examples: -```js +```json { "/a/b/c": { "old": [1,2,3] }} ``` -is a precondition specifying that the previous value of the key `"/a/b/c"` key must be `[1,2,3]`. If and only if the value of the precondition is not an object we provide a notation, where the keywork `old` may be omitted. Thus, the above check may be shortcut as +is a precondition specifying that the previous value of the key `"/a/b/c"` key must be `[1,2,3]`. If and only if the value of the precondition is not an object we provide a notation, where the keyword `old` may be omitted. Thus, the above check may be shortcut as -```js +```json { "/a/b/c": [1, 2, 3] } ``` @@ -196,7 +203,8 @@ Consider the Agency in initialized as above let's review the responses from the ``` curl -L http://$SERVER:$PORT/_api/agency/write -d '[[{"/a/b/c":{"op":"set","new":[1,2,3,4]},"/a/b/pi":{"op":"set","new":"some text"}},{"/a/b/c":{"old":[1,2,3]}}]]' ``` -```js + +```json { "results": [19] } @@ -204,7 +212,7 @@ curl -L http://$SERVER:$PORT/_api/agency/write -d '[[{"/a/b/c":{"op":"set","new" The condition is fulfilled in the first run and would be wrong in a second returning -```js +```json { "results": [0] } @@ -212,13 +220,13 @@ The condition is fulfilled in the first run and would be wrong in a second retur `0` as a result means that the precondition failed and no "real" log number was returned. -```js +```json { "/a/e": { "oldEmpty": false } } ``` means that the value of the key `"a/e"` must be set (to something, which can be `null`!). The condition -```js +```json { "/a/e": { "oldEmpty": true } } ``` @@ -239,32 +247,34 @@ The update value U is an object, the attribute names are again key strings and t `"ttl"`, if present, the new value that is being set gets a time to live in seconds, given by a numeric value in this attribute. It is only guaranteed that the actual removal of the value is done according to the system clock, so up to clock skew between servers. The removal is done by an additional write transaction that is automatically generated between the regular writes. Additional rule: If none of `"new"` and `"op"` is set or the value is not even an object, then this is to be interpreted as if it were -```js + +```json { "op": "set", "new": } ``` + which amounts to setting the value with no precondition. Examples: -```js +```json { "/a": { "op": "set", "new": 12 } } ``` sets the value of the key `"/a"` to `12`. The same could have been achieved by -```js +```json { "/a": 12 } ``` or by -```js +```json { "/a": { "new": 12} } ``` The operation -```js +```json { "/a/b": { "new": { "c": [1,2,3,4] } } } ``` @@ -272,36 +282,38 @@ sets the key `"/a/b"` to `{"c": [1,2,3,4]}`. Note that in the above example this Here are some more examples for full transactions (update/precondition pairs). The transaction -```js +```json [ { "/a/b": { "new": { "c": [1,2,3,4] } } }, { "/a/b": { "old": { "c": [1,2,3] } } } ] ``` sets the key `"/a/b"` to `{"c":[1,2,3,4]}` if and only if it was `{"c":[1,2,3]}` before. Note that this fails if `"/a/b"` had other attributes than `"c"`. The transaction -```js +```json [ { "/x": { "op": "delete" } }, { "/x": { "old": false } } ] ``` clears the value of the key `"/x"` if this old value was false. -```js +```json [ { "/y": { "new": 13 }, { "/y": { "oldEmpty": true } } } +] ``` sets the value of `"/y"` to `13`, but only, if it was unset before. -```js +```json [ { "/z": { "op": "push", "new": "Max" } } ] ``` appends the string `"Max"` to the end of the list stored in the `"z"` attribute, or creates an array `["Max"]` in `"z"` if it was unset or not an array. -```js +```json [ { "/u": { "op": "pop" } } ] ``` + removes the last entry of the array stored under `"u"`, if the value of `"u"` is not set or not an array. ### HTTP-headers for write operations @@ -320,7 +332,7 @@ External services to the Agency may announce themselves or others to be observer In order to observe any future modification below say `"/a/b/c"`, a observer is announced through posting the below document to the Agency’s write REST handler: -```js +```json [ { "/a/b/c": { "op": "observe", "url": "http://:/" @@ -330,27 +342,28 @@ In order to observe any future modification below say `"/a/b/c"`, a observer is The observer is notified of any changes to that target until such time that it removes itself as an observer of that key through -```js +```json [ { "/a/b/c": { "op": "unobserve", - "url": “http://:/" } } ] + "url": "http://:/" } } ] ``` Note that the last document removes all observations from entities below `"/a/b/c"`. In particular, issuing -```js +```json [ { "/": "unobserve", "url": "http://:/"} ] ``` will result in the removal of all observations for URL `"http://:/"`. -The notifying POST requests are submitted immediately with any complete array of changes to the read db of the leader of create, modify and delete events accordingly; The body -```js +The notifying POST requests are submitted immediately with any complete array of changes to the read db of the leader of create, modify and delete events accordingly; The body + +```json { "term": "5", "index": 167, "/": { "/a/b/c" : { "op": "modify", "old": 1, "new": 2 } }, "/constants/euler" : {"op": "create", "new": 2.718281828459046 }, - "/constants/pi": { "op": "delete" } } } + "/constants/pi": { "op": "delete" } } ``` ### Configuration @@ -365,7 +378,7 @@ number. We use `curl` throughout for the examples, but any client library performing HTTP requests should do. The output might look somewhat like this -```js +```json { "term": 1, "leaderId": "f5d11cde-8468-4fd2-8747-b4ef5c7dfa98", diff --git a/3.10/http/bulk-imports-importing-self-contained.md b/3.10/http/bulk-imports-importing-self-contained.md index b06363a584..d51f3042df 100644 --- a/3.10/http/bulk-imports-importing-self-contained.md +++ b/3.10/http/bulk-imports-importing-self-contained.md @@ -12,9 +12,12 @@ allowed but will be skipped. Using this format, the documents are imported line-wise. Example input data: - { "_key": "key1", ... } - { "_key": "key2", ... } - ... + +```js +{ "_key": "key1", ... } +{ "_key": "key2", ... } +... +``` To use this method, the *type* query parameter should be set to *documents*. diff --git a/3.10/indexing-fulltext.md b/3.10/indexing-fulltext.md index 475eccca7e..1987375618 100644 --- a/3.10/indexing-fulltext.md +++ b/3.10/indexing-fulltext.md @@ -38,7 +38,7 @@ Note that deeper nested objects are ignored. For example, a fulltext index on structure: ```js -{ translations: { en: { US: "fox" }, de: "Fuchs" } +{ translations: { en: { US: "fox" }, de: "Fuchs" } } ``` If you need to search across multiple fields and/or nested objects, you may write diff --git a/3.10/indexing-geo.md b/3.10/indexing-geo.md index 06540960cf..03535f88d7 100644 --- a/3.10/indexing-geo.md +++ b/3.10/indexing-geo.md @@ -33,7 +33,7 @@ documents which do not fulfill these requirements. To create an index in GeoJSON mode execute: -``` +```js collection.ensureIndex({ type: "geo", fields: [ "geometry" ], geoJson:true }) ``` @@ -154,7 +154,7 @@ output to check for index usage. A basic example of a query for results near an origin point: -```js +```aql FOR x IN geo_collection FILTER GEO_DISTANCE([@lng, @lat], x.geometry) <= 100000 RETURN x._key @@ -162,7 +162,7 @@ FOR x IN geo_collection or -```js +```aql FOR x IN geo_collection FILTER GEO_DISTANCE(@geojson, x.geometry) <= 100000 RETURN x._key @@ -186,7 +186,7 @@ can actually provide. A basic example of a query for the 1000 nearest results to an origin point (ascending sorting): -```js +```aql FOR x IN geo_collection SORT GEO_DISTANCE([@lng, @lat], x.geometry) ASC LIMIT 1000 @@ -206,7 +206,7 @@ can actually provide. You may also get results farthest away (distance sorted in descending order): -```js +```aql FOR x IN geo_collection SORT GEO_DISTANCE([@lng, @lat], x.geometry) DESC LIMIT 1000 @@ -218,7 +218,7 @@ FOR x IN geo_collection A query which returns documents at a distance of _1km_ or farther away, and up to _100km_ from the origin: -```js +```aql FOR x IN geo_collection FILTER GEO_DISTANCE([@lng, @lat], x.geometry) <= 100000 FILTER GEO_DISTANCE([@lng, @lat], x.geometry) >= 1000 @@ -249,7 +249,7 @@ resulting in a sequence of findings sorted by distance, but limited to the given A query which returns documents whose stored geometry is contained within a GeoJSON Polygon. -``` +```aql LET polygon = GEO_POLYGON([[[60,35],[50,5],[75,10],[70,35]]]) FOR x IN geo_collection FILTER GEO_CONTAINS(polygon, x.geometry) @@ -267,7 +267,7 @@ This `FILTER` clause can be combined with a `SORT` clause using `GEO_DISTANCE()` Note that containment in the opposite direction is currently not supported by geo indexes: -```js +```aql LET polygon = GEO_POLYGON([[[60,35],[50,5],[75,10],[70,35]]]) FOR x IN geo_collection FILTER GEO_CONTAINS(x.geometry, polygon) @@ -279,7 +279,7 @@ FOR x IN geo_collection A query that returns documents with an intersection of their stored geometry and a GeoJSON Polygon. -``` +```aql LET polygon = GEO_POLYGON([[[60,35],[50,5],[75,10],[70,35]]]) FOR x IN geo_collection FILTER GEO_INTERSECTS(polygon, x.geometry) diff --git a/3.10/indexing-index-basics.md b/3.10/indexing-index-basics.md index 8604a382c9..332eb4f29b 100644 --- a/3.10/indexing-index-basics.md +++ b/3.10/indexing-index-basics.md @@ -132,7 +132,7 @@ db.edges.ensureIndex({"type": "persistent", "fields": ["_from", "timestamp"]}); in arangosh. Then, queries like -```js +```aql FOR v, e, p IN 1..1 OUTBOUND "V/1" edges FILTER e.timestamp >= "2018-07-09" RETURN p @@ -162,7 +162,7 @@ For example, if a persistent index is created on attributes `value1` and `value2 following filter conditions can use the index (note: the `<=` and `>=` operators are intentionally omitted here for the sake of brevity): -```js +```aql FILTER doc.value1 == ... FILTER doc.value1 < ... FILTER doc.value1 > ... @@ -410,7 +410,7 @@ db.posts.insert({ tags: [ "foobar", "baz", "quux" ] }); This array index can then be used for looking up individual `tags` values from AQL queries via the `IN` operator: -```js +```aql FOR doc IN posts FILTER 'foobar' IN doc.tags RETURN doc @@ -420,7 +420,7 @@ It is possible to add the [array expansion operator](aql/advanced-array-operator `[*]`, but it is not mandatory. You may use it to indicate that an array index is used, it is purely cosmetic however: -```js +```aql FOR doc IN posts FILTER 'foobar' IN doc.tags[*] RETURN doc @@ -428,7 +428,7 @@ FOR doc IN posts The following FILTER conditions will **not use** the array index: -```js +```aql FILTER doc.tags ANY == 'foobar' FILTER doc.tags ANY IN 'foobar' FILTER doc.tags IN 'foobar' @@ -447,7 +447,7 @@ db.posts.insert({ tags: [ { name: "foobar" }, { name: "baz" }, { name: "quux" } The following query will then use the array index (this does require the [array expansion operator](aql/advanced-array-operators.html#array-expansion)): -```js +```aql FOR doc IN posts FILTER 'foobar' IN doc.tags[*].name RETURN doc diff --git a/3.10/indexing-multi-dim.md b/3.10/indexing-multi-dim.md index 8906428407..77ff3825b3 100644 --- a/3.10/indexing-multi-dim.md +++ b/3.10/indexing-multi-dim.md @@ -44,7 +44,7 @@ Future extensions of the index will allow other types. Now we can use the index in a query: -```js +```aql FOR p IN points FILTER x0 <= p.x && p.x <= x1 FILTER y0 <= p.y && p.y <= y1 @@ -63,7 +63,7 @@ Furthermore you can use any comparison operator. The index supports `<=` and `>= naturally, `==` will be translated to the bound `[c, c]`. Strict comparison is translated to their non-strict counterparts and a post-filter is inserted. -```js +```aql FOR p IN points FILTER 2 <= p.x && p.x < 9 FILTER y0 >= 80 @@ -77,11 +77,11 @@ If you build a calendar using ArangoDB you could create a collection for each us that contains her appointments. The documents would roughly look as follows: ```json - { - "from": 345365, - "to": 678934, - "what": "Dentist", - } +{ + "from": 345365, + "to": 678934, + "what": "Dentist", +} ``` `from`/`to` are the timestamps when an appointment starts/ends. Having an @@ -100,7 +100,7 @@ f <= from and to <= t Thus our query would be: -```js +```aql FOR app IN appointments FILTER f <= app.from FILTER app.to <= t @@ -119,7 +119,7 @@ a_2 <= b_1 and a_1 <= b_2 Thus our query would be: -```js +```aql FOR app IN appointments FILTER f <= app.to FILTER app.from <= t @@ -140,7 +140,7 @@ performance negatively if documents are fetched unnecessarily. You can specify the `lookahead` value using the `OPTIONS` keyword: -```js +```aql FOR app IN appointments OPTIONS { lookahead: 32 } FILTER @to <= app.to FILTER app.from <= @from diff --git a/3.10/indexing-persistent.md b/3.10/indexing-persistent.md index 9998d02da8..3fb01e89e2 100644 --- a/3.10/indexing-persistent.md +++ b/3.10/indexing-persistent.md @@ -133,7 +133,7 @@ known to not benefit from using using the cache, you may turn off the usage of the cache for individual query parts. This can be achieved via the `useCache` hint that can be provided to an AQL `FOR` loop: -```js +```aql FOR doc IN collection OPTIONS { useCache: false } FILTER doc.value == @lookup ... diff --git a/3.10/indexing-vertex-centric.md b/3.10/indexing-vertex-centric.md index f09daa698d..8bbfabe4cf 100644 --- a/3.10/indexing-vertex-centric.md +++ b/3.10/indexing-vertex-centric.md @@ -23,7 +23,7 @@ To take an example, if we have an attribute called `type` on the edges, we can u vertex-centric index on this attribute to find all edges attached to a vertex with a given `type`. The following query example could benefit from such an index: -```js +```aql FOR v, e, p IN 3..5 OUTBOUND @start GRAPH @graphName FILTER p.edges[*].type ALL == "friend" RETURN v @@ -66,7 +66,7 @@ The AQL optimizer can decide to use a vertex-centric whenever suitable, however index is used, the optimizer may estimate that an other index is assumed to be better. The optimizer will consider this type of indexes on explicit filtering of `_from` respectively `_to`: -```js +```aql FOR edge IN collection FILTER edge._from == "vertices/123456" AND edge.type == "friend" RETURN edge @@ -74,7 +74,7 @@ FOR edge IN collection and during pattern matching queries: -```js +```aql FOR v, e, p IN 3..5 OUTBOUND @start GRAPH @graphName FILTER p.edges[*].type ALL == "friend" RETURN v diff --git a/3.10/indexing-which-index.md b/3.10/indexing-which-index.md index a47b774c8b..9a28206ceb 100644 --- a/3.10/indexing-which-index.md +++ b/3.10/indexing-which-index.md @@ -106,7 +106,7 @@ different usage scenarios: then you can create an index over `from, to` utilize it with this query: - ```js + ```aql FOR i IN intervals FILTER i.from <= t && t <= i.to RETURN i ``` @@ -199,7 +199,7 @@ least one of the indexed attributes has a value of `null`. For example, the foll query cannot use a sparse index, even if one was created on attribute `attr`: -```js +```aql FOR doc In collection FILTER doc.attr == null RETURN doc @@ -213,13 +213,13 @@ will not make use of a sparse index in a query in order to produce correct resul For example, the following queries cannot use a sparse index on `attr` because the optimizer will not know beforehand whether the values which are compared to `doc.attr` will include `null`: -```js +```aql FOR doc In collection FILTER doc.attr == SOME_FUNCTION(...) RETURN doc ``` -```js +```aql FOR other IN otherCollection FOR doc In collection FILTER doc.attr == other.attr diff --git a/3.10/installation-linux-osconfiguration.md b/3.10/installation-linux-osconfiguration.md index c7b40ac1ae..dd1e7df86e 100644 --- a/3.10/installation-linux-osconfiguration.md +++ b/3.10/installation-linux-osconfiguration.md @@ -25,7 +25,7 @@ FATAL [7ef60] {config} specified language 'en_US' does not match previously used The locale can be generated with the following command: -``` +```bash sudo locale-gen "en_US.UTF-8" ``` @@ -67,7 +67,7 @@ ArangoDB. Please consult your operating system's documentation for how to do thi Execute: -``` +```bash sudo bash -c "echo madvise >/sys/kernel/mm/transparent_hugepage/enabled" sudo bash -c "echo madvise >/sys/kernel/mm/transparent_hugepage/defrag" ``` @@ -89,7 +89,7 @@ The Linux kernel default is 0. You can set it as follows before executing `arangod`: -``` +```bash sudo bash -c "echo 0 >/proc/sys/vm/overcommit_memory" ``` @@ -129,7 +129,7 @@ value for the number of memory mappings. To set the value once, use the following command before starting arangod: -``` +```bash sudo bash -c "sysctl -w 'vm.max_map_count=2048000'" ``` @@ -149,7 +149,7 @@ Zone Reclaim Execute -``` +```bash sudo bash -c "echo 0 >/proc/sys/vm/zone_reclaim_mode" ``` @@ -169,7 +169,7 @@ NUMA Multi-processor systems often have non-uniform Access Memory (NUMA). ArangoDB should be started with interleave on such system. This can be achieved using -``` +```bash numactl --interleave=all arangod ... ``` @@ -196,7 +196,7 @@ memory pooling. Execute -``` +```bash export GLIBCXX_FORCE_NEW=1 ``` diff --git a/3.10/installation-linux.md b/3.10/installation-linux.md index 3162c132f3..c8023f4194 100644 --- a/3.10/installation-linux.md +++ b/3.10/installation-linux.md @@ -61,7 +61,7 @@ installation. For unattended installations, you can set the password using the [debconf helpers](http://www.microhowto.info/howto/perform_an_unattended_installation_of_a_debian_package.html){:target="_blank"}: -``` +```bash echo arangodb3 arangodb3/password password NEWPASSWORD | debconf-set-selections echo arangodb3 arangodb3/password_again password NEWPASSWORD | debconf-set-selections ``` @@ -75,7 +75,7 @@ installation. The generated random password is printed during the installation. Please write it down somewhere, or change it to a password of your choice by executing: -``` +```bash ARANGODB_DEFAULT_ROOT_PASSWORD=NEWPASSWORD arango-secure-installation ``` diff --git a/3.10/programs-arangod-log.md b/3.10/programs-arangod-log.md index a5481e2557..c690c5a092 100644 --- a/3.10/programs-arangod-log.md +++ b/3.10/programs-arangod-log.md @@ -19,7 +19,7 @@ and everything else at info level. In a configuration file, it is written like this: -``` +```conf [log] level = startup=trace level = queries=trace diff --git a/3.10/programs-arangod-query.md b/3.10/programs-arangod-query.md index 88326f8a2e..a02737915b 100644 --- a/3.10/programs-arangod-query.md +++ b/3.10/programs-arangod-query.md @@ -155,7 +155,7 @@ likely unintended. For example, consider the query -```js +```aql FOR doc IN collection RETURN collection ``` diff --git a/3.10/programs-arangoexport-examples.md b/3.10/programs-arangoexport-examples.md index 28ad864597..1f98ad4a6f 100644 --- a/3.10/programs-arangoexport-examples.md +++ b/3.10/programs-arangoexport-examples.md @@ -212,7 +212,7 @@ You can save a query to a file and use it as a custom query with the `--custom-query-file` option. It is mutually exclusive with the `--custom-query` option: -```js +```aql // example.aql FOR book IN @@collectionName FILTER book.sold > @sold diff --git a/3.10/programs-arangoimport-details.md b/3.10/programs-arangoimport-details.md index 3ffad82631..9eb8da2f4e 100644 --- a/3.10/programs-arangoimport-details.md +++ b/3.10/programs-arangoimport-details.md @@ -145,7 +145,7 @@ It will show the *Example* -```js +```bash created: 2 warnings/errors: 0 updated/replaced: 0 @@ -178,7 +178,7 @@ line to any value of `X` greater than 2 will increase the total throughput used. {% hint 'warning' %} -Using parellelism with the `--threads X` parameter +Using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.10/programs-arangoimport-examples-json.md b/3.10/programs-arangoimport-examples-json.md index da039cff53..82a3ec513f 100644 --- a/3.10/programs-arangoimport-examples-json.md +++ b/3.10/programs-arangoimport-examples-json.md @@ -245,7 +245,7 @@ Using multiple threads may lead to a non-sequential import of the input data. Data that appears later in the input file may be imported earlier than data that appears earlier in the input file. This is normally not a problem but may cause issues when when there are data dependencies or duplicates in the import data. In -this case, the number of threads should be set to 1. Also, using parellelism with +this case, the number of threads should be set to 1. Also, using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.10/programs-web-interface-collections.md b/3.10/programs-web-interface-collections.md index e4bfb453f4..86b8507570 100644 --- a/3.10/programs-web-interface-collections.md +++ b/3.10/programs-web-interface-collections.md @@ -58,12 +58,14 @@ Additional information: Upload format: I. Line-wise + ```js { "_key": "key1", ... } { "_key": "key2", ... } ``` II. JSON documents in a list + ```js [ { "_key": "key1", ... }, diff --git a/3.10/quick-start-coming-from-sql.md b/3.10/quick-start-coming-from-sql.md index 18d0545b8c..d09c101aee 100644 --- a/3.10/quick-start-coming-from-sql.md +++ b/3.10/quick-start-coming-from-sql.md @@ -54,7 +54,7 @@ return the whole document, or just parts of it. Given that *oneDocument* is a document (retrieved like `LET oneDocument = DOCUMENT("myusers/3456789")` for instance), it can be returned as-is like this: -```js +```aql RETURN oneDocument ``` @@ -97,7 +97,7 @@ RETURN oneDocument.hobbies Return the hobbies and the address: -```js +```aql RETURN { hobbies: oneDocument.hobbies, address: oneDocument.address @@ -122,7 +122,7 @@ RETURN { Return the first hobby only: -```js +```aql RETURN oneDocument.hobbies[0].name ``` @@ -134,7 +134,7 @@ RETURN oneDocument.hobbies[0].name Return a list of all hobby strings: -```js +```aql RETURN { hobbies: oneDocument.hobbies[*].name } ``` diff --git a/3.10/quick-start-on-premise.md b/3.10/quick-start-on-premise.md index 58c8e3a187..7ef740ce49 100644 --- a/3.10/quick-start-on-premise.md +++ b/3.10/quick-start-on-premise.md @@ -91,7 +91,7 @@ user that has access rights to this database. See Use the *arangosh* to create a new database and user. -``` +```js arangosh> db._createDatabase("example"); arangosh> var users = require("@arangodb/users"); arangosh> users.save("root@example", "password"); diff --git a/3.10/release-notes-new-features310.md b/3.10/release-notes-new-features310.md index b1267ede34..50dbb8b517 100644 --- a/3.10/release-notes-new-features310.md +++ b/3.10/release-notes-new-features310.md @@ -61,7 +61,7 @@ profiling output, and it wasn't clear which execution node caused which amount o For example, consider the following query: -```js +```aql FOR doc1 IN collection FILTER doc1.value1 < 1000 /* uses index */ FILTER doc1.value2 NOT IN [1, 4, 7] /* post filter */ @@ -74,7 +74,7 @@ FOR doc1 IN collection The profiling output for this query now shows how often the filters were invoked for the different execution nodes: -```js +```aql Execution plan: Id NodeType Calls Items Filtered Runtime [s] Comment 1 SingletonNode 1 1 0 0.00008 * ROOT @@ -102,7 +102,7 @@ In the following example query, there are in-memory caches present for both inde the query. However, only the innermost index node #13 can use the cache, because the outer FOR loop does not use an equality lookup. -``` +```aql Query String (270 chars, cacheable: false): FOR doc1 IN collection FILTER doc1.value1 < 1000 FILTER doc1.value2 NOT IN [1, 4, 7] FOR doc2 IN collection FILTER doc1.value1 == doc2.value2 FILTER doc2.value2 != 5 RETURN doc2 @@ -129,7 +129,7 @@ Query Statistics: The multi-dimensional index type `zkd` (experimental) now supports an optional index hint for tweaking performance: -```js +```aql FOR … IN … OPTIONS { lookahead: 32 } ``` @@ -360,7 +360,7 @@ _arangoexport_ now also has a `--custom-query-file` startup option that you can use instead of `--custom-query`, to read a query from a file. This allows you to store complex queries and no escaping is necessary in the file: -```js +```aql // example.aql FOR book IN @@collectionName FILTER book.sold > @sold diff --git a/3.10/security-change-root-password.md b/3.10/security-change-root-password.md index e4f7c57b6b..978caf6817 100644 --- a/3.10/security-change-root-password.md +++ b/3.10/security-change-root-password.md @@ -12,13 +12,13 @@ One can reset the _root_ password in the following way: - **Note:** you might need to take any needed precaution to avoid this server can be accessed from outside as currently authentication is temporarily disabled. You might do this by disabling network access or using _localhost_ for the binding (`--server.endpoint tcp://127.0.0.1:8529`) - Change the password using the ArangoDB Web UI, or using the following command via `arangosh`: -``` +```js require("org/arangodb/users").update("root", "newpassword"); ``` This command should return: -``` +```json { "user" : "root", "active" : true, diff --git a/3.7/deployment-cluster-using-the-starter.md b/3.7/deployment-cluster-using-the-starter.md index ffe0278f72..08e0bb8055 100644 --- a/3.7/deployment-cluster-using-the-starter.md +++ b/3.7/deployment-cluster-using-the-starter.md @@ -27,7 +27,7 @@ Local Tests If you only want a local test Cluster, you can run a single _Starter_ with the `--starter.local` argument. It will start a 3 "machine" Cluster on your local PC: -``` +```bash arangodb --starter.local --starter.data-dir=./localdata --auth.jwt-secret=/etc/arangodb.secret ``` @@ -42,7 +42,7 @@ Multiple Machines If you want to start a Cluster using the _Starter_, you need to copy the _secret_ file to every machine and start the Cluster using the following command: -``` +```bash arangodb --server.storage-engine=rocksdb --auth.jwt-secret=/etc/arangodb.secret --starter.data-dir=./data --starter.join A,B,C ``` @@ -61,7 +61,7 @@ Using the ArangoDB Starter in Docker ------------------------------------ The _Starter_ can also be used to launch Clusters based on _Docker_ containers: - + ```bash export IP= docker volume create arangodb diff --git a/3.7/deployment-kubernetes-deployment-resource.md b/3.7/deployment-kubernetes-deployment-resource.md index ebb52d454e..607aac452c 100644 --- a/3.7/deployment-kubernetes-deployment-resource.md +++ b/3.7/deployment-kubernetes-deployment-resource.md @@ -573,14 +573,14 @@ The ArangoDB deployments need some very minimal access rights. With the deployment of the operator, we grant the following rights for the `default` service account: -``` +```yaml rules: -- apiGroups: - - "" + - apiGroups: + - "" resources: - - pods + - pods verbs: - - get + - get ``` If you are using a different service account, please grant these rights diff --git a/3.8/programs-arangoimport-details.md b/3.8/programs-arangoimport-details.md index bb967d024f..3436fbac78 100644 --- a/3.8/programs-arangoimport-details.md +++ b/3.8/programs-arangoimport-details.md @@ -178,7 +178,7 @@ line to any value of `X` greater than 2 will increase the total throughput used. {% hint 'warning' %} -Using parellelism with the `--threads X` parameter +Using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.8/programs-arangoimport-examples-json.md b/3.8/programs-arangoimport-examples-json.md index da039cff53..82a3ec513f 100644 --- a/3.8/programs-arangoimport-examples-json.md +++ b/3.8/programs-arangoimport-examples-json.md @@ -245,7 +245,7 @@ Using multiple threads may lead to a non-sequential import of the input data. Data that appears later in the input file may be imported earlier than data that appears earlier in the input file. This is normally not a problem but may cause issues when when there are data dependencies or duplicates in the import data. In -this case, the number of threads should be set to 1. Also, using parellelism with +this case, the number of threads should be set to 1. Also, using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.8/release-notes-new-features38.md b/3.8/release-notes-new-features38.md index 707fe1b800..f2214877be 100644 --- a/3.8/release-notes-new-features38.md +++ b/3.8/release-notes-new-features38.md @@ -44,7 +44,7 @@ paths by increasing weights. The cost of an edge can be read from an attribute which can be specified with the `weightAttribute` option. -```js +```aql FOR x, v, p IN 0..10 OUTBOUND "places/York" GRAPH "kShortestPathsGraph" OPTIONS { order: "weighted", @@ -90,7 +90,7 @@ paths between a source and a target vertex that match the given path length. For example, the query: -```js +```aql FOR path IN 2..4 OUTBOUND K_PATHS "v/source" TO "v/target" GRAPH "g" RETURN path ``` @@ -159,7 +159,7 @@ AQL now also support projections on sub-attributes (e.g. `a.b.c`). In previous versions of ArangoDB, projections were only supported on top-level attributes. For example, in the query: -```js +```aql FOR doc IN collection RETURN doc.a.b ``` @@ -175,7 +175,7 @@ it will be used now. Previously, no index could be used for this projection. Projections now can also be fed by any attribute in a combined index. For example, in the query: -```js +```aql FOR doc IN collection RETURN doc.b ``` @@ -231,7 +231,7 @@ allowed, although using collection names like this is very likely unintended. For example, consider the query -```js +```aql FOR doc IN collection RETURN collection ``` @@ -920,13 +920,13 @@ aliases and deprecated ones. - [DATE_TIMEZONE()](aql/functions-date.html#date_timezone) - ```js + ```aql RETURN DATE_TIMEZONE() // [ "Etc/UTC" ] ``` - [DATE_TIMEZONES()](aql/functions-date.html#date_timezones) - ```js + ```aql RETURN DATE_TIMEZONES() // [ "Africa/Abidjan", ..., "Europe/Berlin", ..., "Zulu" ] ``` diff --git a/3.9/programs-arangoimport-details.md b/3.9/programs-arangoimport-details.md index d598e5dacc..0069ca1b35 100644 --- a/3.9/programs-arangoimport-details.md +++ b/3.9/programs-arangoimport-details.md @@ -178,7 +178,7 @@ line to any value of `X` greater than 2 will increase the total throughput used. {% hint 'warning' %} -Using parellelism with the `--threads X` parameter +Using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.9/programs-arangoimport-examples-json.md b/3.9/programs-arangoimport-examples-json.md index da039cff53..82a3ec513f 100644 --- a/3.9/programs-arangoimport-examples-json.md +++ b/3.9/programs-arangoimport-examples-json.md @@ -245,7 +245,7 @@ Using multiple threads may lead to a non-sequential import of the input data. Data that appears later in the input file may be imported earlier than data that appears earlier in the input file. This is normally not a problem but may cause issues when when there are data dependencies or duplicates in the import data. In -this case, the number of threads should be set to 1. Also, using parellelism with +this case, the number of threads should be set to 1. Also, using parallelism with the `--threads X` parameter together with the `--on-duplicate` parameter set to `ignore`, `update` or `replace` can lead to a race condition, when there are duplicates e.g. multiple identical `_key` values. Even ignoring the duplicates will make the result unpredictable, meaning diff --git a/3.9/release-notes-api-changes39.md b/3.9/release-notes-api-changes39.md index cdb3a4d5c5..fd55e50142 100644 --- a/3.9/release-notes-api-changes39.md +++ b/3.9/release-notes-api-changes39.md @@ -288,7 +288,7 @@ The MMFiles engine is gone since ArangoDB 3.7, and the only remaining storage engine since then is RocksDB. For the RocksDB engine, the `/_api/export` endpoint internally used a streaming AQL query such as -```js +```aql FOR doc IN @@collection RETURN doc ``` diff --git a/3.9/release-notes-new-features39.md b/3.9/release-notes-new-features39.md index c0460f32b8..2cadc3b8dc 100644 --- a/3.9/release-notes-new-features39.md +++ b/3.9/release-notes-new-features39.md @@ -142,7 +142,7 @@ operation. It will be used as a hint for the document lookup that is performed as part of the `UPSERT` operation, and can help in cases such as `UPSERT` not picking the best index automatically. -```js +```aql UPSERT { a: 1234 } INSERT { a: 1234, name: "AB"} UPDATE {name: "ABC"} IN myCollection @@ -162,7 +162,7 @@ Added three decay functions to AQL: Decay functions calculate a score with a function that decays depending on the distance of a numeric value from a user given origin. -```js +```aql DECAY_GAUSS(41, 40, 5, 5, 0.5) // 1 DECAY_LINEAR(5, 0, 10, 0, 0.2) // 0.6 DECAY_EXP(2, 0, 10, 0, 0.2) // 0.7247796636776955 @@ -178,7 +178,7 @@ distance (named `L2_DISTANCE`): - [L1_DISTANCE()](aql/functions-numeric.html#l1_distance) - [L2_DISTANCE()](aql/functions-numeric.html#l2_distance) -```js +```aql COSINE_SIMILARITY([0,1], [1,0]) // 0 L1_DISTANCE([-1,-1], [2,2]) // 6 L2_DISTANCE([1,1], [5,2]) // 4.1231056256176606 @@ -197,7 +197,7 @@ first and only produce the remaining paths. For example, the query -```js +```aql FOR v, e, p IN 10 OUTBOUND @start GRAPH "myGraph" FILTER v.isRelevant == true RETURN p @@ -210,7 +210,7 @@ This optimization is now part of the existing `optimize-traversals` rule and you will see the conditions under `Filter / Prune Conditions` in the query explain output (`` FILTER (v.`isRelevant` == true) `` in this example): -```js +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -240,7 +240,7 @@ is returned, but only a specific sub-attribute of the path is used later For example, the query -```js +```aql FOR v, e, p IN 1..3 OUTBOUND @start GRAPH "myGraph" RETURN p.vertices ``` @@ -249,7 +249,7 @@ only requires the buildup of the `vertices` sub-attribute of the path result `p` but not the buildup of the `edges` sub-attribute. The optimization can be observed in the query explain output: -```js +```aql Execution plan: Id NodeType Est. Comment 1 SingletonNode 1 * ROOT @@ -284,7 +284,7 @@ Added an option to store the `PRUNE` expression as a variable. Now, the `PRUNE` condition can be stored in a variable and be used later in the query without having to repeat the `PRUNE` condition: -```js +```aql FOR v, e, p IN 10 OUTBOUND @start GRAPH "myGraph" PRUNE pruneCondition = v.isRelevant == true FILTER pruneCondition @@ -301,7 +301,7 @@ See [Pruning](aql/graphs-traversals.html#pruning). Invalid use of `OPTIONS` in AQL queries will now raise a warning when the query is parsed. This is useful to detect misspelled attribute names in `OPTIONS`, e.g. -```js +```aql INSERT ... INTO collection OPTIONS { overwrightMode: 'ignore' } /* should have been 'overwriteMode' */ ``` @@ -309,7 +309,7 @@ INSERT ... INTO collection It is also useful to detect the usage of valid `OPTIONS` attribute names that are used at a wrong position in the query, e.g. -```js +```aql FOR doc IN collection FILTER doc.value == 1234 INSERT doc INTO other @@ -368,7 +368,7 @@ In some rare cases, an AQL query can be executed faster if it ignores indexes. You can force the optimizer not use an index for any given `FOR` loop by setting the new `disableIndex` hint to `true`: -```js +```aql FOR doc IN collection OPTIONS { disableIndex: true } FILTER doc.value <= 99 RETURN doc.other @@ -389,7 +389,7 @@ Such projections are typically faster as long as there are not too many of them but it depends on the number of attributes and their size. The new `maxProjections` hint lets you adjust the threshold to fine-tune your queries. -```js +```aql FOR doc IN collection OPTIONS { maxProjections: 7 } RETURN [ doc.val1, doc.val2, doc.val3, doc.val4, doc.val5, doc.val6, doc.val7 ] ``` diff --git a/_includes/head.html b/_includes/head.html index aef748f374..8d072aa056 100644 --- a/_includes/head.html +++ b/_includes/head.html @@ -90,7 +90,7 @@ - + {%- if jekyll.environment == 'production' and site.google_analytics -%} diff --git a/_plugins/AqlLexer.rb b/_plugins/AqlLexer.rb new file mode 100644 index 0000000000..71bd58aaf4 --- /dev/null +++ b/_plugins/AqlLexer.rb @@ -0,0 +1,118 @@ +Jekyll::Hooks.register :site, :pre_render do |site| + require "rouge" + + class AqlLexer < Rouge::RegexLexer + title 'AQL' + desc 'ArangoDB Query Language (AQL) lexer' + tag 'aql' + filenames '*.aql' + mimetypes 'application/x-aql' + + aqlBindVariablePattern = '@(?:_+[a-zA-Z0-9]+[a-zA-Z0-9_]*|[a-zA-Z0-9][a-zA-Z0-9_]*)' + aqlBuiltinFunctionsPattern = "(?:" + + "to_bool|to_number|to_string|to_array|to_list|is_null|is_bool|is_number|is_string|is_array|is_list|is_object|is_document|is_datestring|" + + "typename|json_stringify|json_parse|concat|concat_separator|char_length|lower|upper|substring|left|right|trim|reverse|contains|" + + "log|log2|log10|exp|exp2|sin|cos|tan|asin|acos|atan|atan2|radians|degrees|pi|regex_test|regex_replace|" + + "like|floor|ceil|round|abs|rand|sqrt|pow|length|count|min|max|average|avg|sum|product|median|variance_population|variance_sample|variance|" + + "bit_and|bit_or|bit_xor|bit_negate|bit_test|bit_popcount|bit_shift_left|bit_shift_right|bit_construct|bit_deconstruct|bit_to_string|bit_from_string|" + + "first|last|unique|outersection|interleave|in_range|jaccard|matches|merge|merge_recursive|has|attributes|values|unset|unset_recursive|keep|keep_recursive|" + + "near|within|within_rectangle|is_in_polygon|distance|fulltext|stddev_sample|stddev_population|stddev|" + + "slice|nth|position|contains_array|translate|zip|call|apply|push|append|pop|shift|unshift|remove_value|remove_values|" + + "remove_nth|replace_nth|date_now|date_timestamp|date_iso8601|date_dayofweek|date_year|date_month|date_day|date_hour|" + + "date_minute|date_second|date_millisecond|date_dayofyear|date_isoweek|date_leapyear|date_quarter|date_days_in_month|date_trunc|date_round|" + + "date_add|date_subtract|date_diff|date_compare|date_format|date_utctolocal|date_localtoutc|date_timezone|date_timezones|" + + "fail|passthru|v8|sleep|schema_get|schema_validate|call_greenspun|version|noopt|noeval|not_null|" + + "first_list|first_document|parse_identifier|current_user|current_database|collection_count|pregel_result|" + + "collections|document|decode_rev|range|union|union_distinct|minus|intersection|flatten|is_same_collection|check_document|" + + "ltrim|rtrim|find_first|find_last|split|substitute|ipv4_to_number|ipv4_from_number|is_ipv4|md5|sha1|sha512|crc32|fnv64|hash|random_token|to_base64|" + + "to_hex|encode_uri_component|soundex|assert|warn|is_key|sorted|sorted_unique|count_distinct|count_unique|" + + "levenshtein_distance|levenshtein_match|regex_matches|regex_split|ngram_match|ngram_similarity|ngram_positional_similarity|uuid|" + + "tokens|exists|starts_with|phrase|min_match|boost|analyzer|" + + "geo_point|geo_multipoint|geo_polygon|geo_multipolygon|geo_linestring|geo_multilinestring|geo_contains|geo_intersects|" + + "geo_equals|geo_distance|geo_area|geo_in_range" + + ")(?=\\s*\\()" # Will not recognize function if comment between name and opening parenthesis + + state :commentsandwhitespace do + rule %r(\s+), Text + rule %r(//.*), Comment::Single + rule %r(/\*) do + token Comment::Multiline + push :multiline_comment + end + end + + state :multiline_comment do + rule %r([^*]+), Comment::Multiline + rule %r(\*/), Comment::Multiline, :pop! + rule %r(\*), Comment::Multiline + end + + state :double_quote do + rule %r(\\.)m, Str::Double + rule %r([^"\\]+), Str::Double + rule %r("), Str::Double, :pop! + end + + state :single_quote do + rule %r(\\.)m, Str::Single + rule %r([^'\\]+), Str::Single + rule %r('), Str::Single, :pop! + end + + state :backtick do + rule %r(\\.)m, Name + rule %r([^`\\]+), Name + rule %r(`), Name, :pop! + end + + state :forwardtick do + rule %r(\\.)m, Name + rule %r([^´\\]+), Name + rule %r(´), Name, :pop! + end + + state :identifier do + rule %r((?:$?|_+)[a-zA-Z]+[_a-zA-Z0-9]*), Name + rule %r(`) do + token Name + push :backtick + end + rule %r(´) do + token Name + push :forwardtick + end + end + + state :root do + mixin :commentsandwhitespace + rule %r(0[bB][01]+), Num::Bin + rule %r(0[xX][0-9a-fA-F]+), Num::Hex + rule %r((?:(?:0|[1-9][0-9]*)(?:\.[0-9]+)?|\.[0-9]+)(?:[eE][\-\+]?[0-9]+)?), Num::Float + rule %r(0|[1-9][0-9]*), Num::Integer + rule Regexp.new('@' + aqlBindVariablePattern), Name::Variable::Global + rule Regexp.new(aqlBindVariablePattern), Name::Variable + rule %r(=~|!~|[=!<>]=?|[%?:/*+-]|\.\.|&&|\|\|), Operator + rule %r([.,(){}\[\]]), Punctuation + rule %r([a-zA-Z0-9][a-zA-Z0-9_]*(?:::[a-zA-Z0-9_]+)+(?=\s*\()), Name::Function + rule %r((WITH)(\s+)(COUNT)(\s+)(INTO)\b) do + groups Keyword::Reserved, Text, Keyword::Pseudo, Text, Keyword::Reserved + end + rule %r((?:KEEP|PRUNE|SEARCH|TO)\b), Keyword::Pseudo + rule %r(OPTIONS\s*\{)i, Keyword::Pseudo + rule %r((?:AGGREGATE|ALL|AND|ANY|ASC|COLLECT|DESC|DISTINCT|FILTER|FOR|GRAPH|IN|INBOUND|INSERT|INTO|K_PATHS|K_SHORTEST_PATHS|LIKE|LIMIT|NONE|NOT|OR|OUTBOUND|REMOVE|REPLACE|RETURN|SHORTEST_PATH|SORT|UPDATE|UPSERT|WITH|WINDOW)\b)i, Keyword::Reserved + rule %r(LET\b), Keyword::Declaration + rule %r((?:true|false|null)\b)i, Keyword::Constant + rule %r((?:CURRENT|NEW|OLD)\b), Name::Builtin::Pseudo + rule Regexp.new(aqlBuiltinFunctionsPattern, 'i'), Name::Function + rule %r(") do + token Str::Double + push :double_quote + end + rule %r(') do + token Str::Single + push :single_quote + end + mixin :identifier + end + end +end diff --git a/drivers/dotnet-usage.md b/drivers/dotnet-usage.md index 69778da24c..3bc8444625 100644 --- a/drivers/dotnet-usage.md +++ b/drivers/dotnet-usage.md @@ -22,10 +22,11 @@ The options are passed as an instance of the `ApiClientSerializationOptions` cla In addition, the default options can be updated, which affect all subsequent operations that use these options. To set default options, set them on the serializer implementation itself. For example, if using the supplied `JsonNetApiClientSerialization`: -``` +```csharp var serializer = new JsonNetApiClientSerialization(); serializer.DefaultOptions.IgnoreNullValues = false; ``` + ## HTTP Request Headers APIs that support specifying HTTP request headers have an optional method argument to pass in header values. diff --git a/styles/pygments-vs.css b/styles/pygments-arango.css similarity index 60% rename from styles/pygments-vs.css rename to styles/pygments-arango.css index 10d8ba348d..fe64008643 100644 --- a/styles/pygments-vs.css +++ b/styles/pygments-arango.css @@ -1,8 +1,8 @@ .highlight .hll { background-color: #ffffcc } .highlight .c { color: #008000 } /* Comment */ -.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .err { background-color: #ffcccc; padding: 2px } /* Error */ .highlight .k { color: #0000ff } /* Keyword */ -.highlight .cm { color: #008000 } /* Comment.Multiline */ +.highlight .cm { color: #008000 } /* Comment.Multiline */ /* alt color: #8e908c */ .highlight .cp { color: #0000ff } /* Comment.Preproc */ .highlight .c1 { color: #008000 } /* Comment.Single */ .highlight .cs { color: #008000 } /* Comment.Special */ @@ -11,8 +11,8 @@ .highlight .gp { font-weight: bold } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { font-weight: bold } /* Generic.Subheading */ -.highlight .kc { color: #0000ff } /* Keyword.Constant */ -.highlight .kd { color: #0000ff } /* Keyword.Declaration */ +.highlight .kc { color: #f5871f } /* Keyword.Constant */ +.highlight .kd { color: #0000ff } /* Keyword.Declaration */ /* alt color: #8959a8 */ .highlight .kn { color: #0000ff } /* Keyword.Namespace */ .highlight .kp { color: #0000ff } /* Keyword.Pseudo */ .highlight .kr { color: #0000ff } /* Keyword.Reserved */ @@ -20,15 +20,26 @@ .highlight .s { color: #a31515 } /* Literal.String */ .highlight .nc { color: #2b91af } /* Name.Class */ .highlight .no { color: #0000ff } /* Name.Constant */ +.highlight .nb { color: #0000ff } /* Name.Builtin */ +.highlight .np { color: #0000ff; font-style: italic } /* Name.Builtin.Pseudo */ +.highlight .nf { color: #3c4c72; } /* Name.Function */ +.highlight .nv { color: #8959a8 } /* Name.Variable */ +.highlight .vg { color: #8959a8 } /* Name.Variable.Global */ +.highlight .nx { color: #0000ff } /* Name.Other */ .highlight .ow { color: #0000ff } /* Operator.Word */ .highlight .sb { color: #a31515 } /* Literal.String.Backtick */ .highlight .sc { color: #a31515 } /* Literal.String.Char */ .highlight .sd { color: #a31515 } /* Literal.String.Doc */ -.highlight .s2 { color: #a31515 } /* Literal.String.Double */ +.highlight .s2 { color: #718c00 } /* Literal.String.Double */ .highlight .se { color: #a31515 } /* Literal.String.Escape */ .highlight .sh { color: #a31515 } /* Literal.String.Heredoc */ .highlight .si { color: #a31515 } /* Literal.String.Interpol */ .highlight .sx { color: #a31515 } /* Literal.String.Other */ .highlight .sr { color: #a31515 } /* Literal.String.Regex */ -.highlight .s1 { color: #a31515 } /* Literal.String.Single */ +.highlight .s1 { color: #718c00 } /* Literal.String.Single */ .highlight .ss { color: #a31515 } /* Literal.String.Symbol */ +.highlight .m { color: #f5871f; } /* Number */ +.highlight .mb { color: #f5871f; } /* Number.Bin */ +.highlight .mf { color: #f5871f; } /* Number.Float */ +.highlight .mh { color: #f5871f; } /* Number.Hex */ +.highlight .mi { color: #f5871f; } /* Number.Integer */ diff --git a/styles/site.css b/styles/site.css index bcaaf21015..cef86f3fec 100644 --- a/styles/site.css +++ b/styles/site.css @@ -80,34 +80,6 @@ h6:hover a.anchor-link, margin-top: 0; } -pre.highlight .s1 { - color: #718c00; -} - -pre.highlight .s2 { - color: #718c00; -} - -pre.highlight .kc { - color: #f5871f; -} - -pre.highlight .mi { - color: #f5871f; -} - -pre.highlight .cm { - color: #8e908c; -} - -pre.highlight .kd { - color: #8959a8; -} - -pre.highlight .nb { - color: #f5871f; -} - .page-inner li p { margin: 0; } diff --git a/styles/website.css b/styles/website.css index f7c385fbe4..6310f3fe22 100644 --- a/styles/website.css +++ b/styles/website.css @@ -111,10 +111,6 @@ div.example_show_button { font: 1em/1 anchorjs-icons; } -.highlight .err { - border: none; -} - /* Styling Hubspot pop-up */ .leadinModal.leadinModal-v3 .leadinModal-content { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;