diff options
author | Adam Ierymenko <adam.ierymenko@gmail.com> | 2019-03-21 16:42:52 -0700 |
---|---|---|
committer | Adam Ierymenko <adam.ierymenko@gmail.com> | 2019-03-21 16:42:52 -0700 |
commit | e37eb0aa542ef8aee8532c5bfdde7f09ed343a28 (patch) | |
tree | c2900b4814847fb3249b41e7dcfe5ab6dbac9215 /ext | |
parent | 130fa35bb1707ea232015c1a3672f0585632dea1 (diff) | |
download | infinitytier-e37eb0aa542ef8aee8532c5bfdde7f09ed343a28.tar.gz infinitytier-e37eb0aa542ef8aee8532c5bfdde7f09ed343a28.zip |
More cleanup of old stuff no longer used.
Diffstat (limited to 'ext')
83 files changed, 0 insertions, 22794 deletions
diff --git a/ext/bin/tap-mac/tap.kext/Contents/Info.plist b/ext/bin/tap-mac/tap.kext/Contents/Info.plist deleted file mode 100644 index c20eefa5..00000000 --- a/ext/bin/tap-mac/tap.kext/Contents/Info.plist +++ /dev/null @@ -1,36 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> -<plist version="1.0"> -<dict> - <key>CFBundleDevelopmentRegion</key> - <string>English</string> - <key>CFBundleExecutable</key> - <string>tap</string> - <key>CFBundleIdentifier</key> - <string>com.zerotier.tap</string> - <key>CFBundleInfoDictionaryVersion</key> - <string>6.0</string> - <key>CFBundleName</key> - <string>tap</string> - <key>CFBundlePackageType</key> - <string>KEXT</string> - <key>CFBundleShortVersionString</key> - <string>20150118</string> - <key>CFBundleSignature</key> - <string>????</string> - <key>CFBundleVersion</key> - <string>1.0</string> - <key>OSBundleLibraries</key> - <dict> - <key>com.apple.kpi.mach</key> - <string>8.0</string> - <key>com.apple.kpi.bsd</key> - <string>8.0</string> - <key>com.apple.kpi.libkern</key> - <string>8.0</string> - <key>com.apple.kpi.unsupported</key> - <string>8.0</string> - </dict> -</dict> -</plist> - diff --git a/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap b/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap Binary files differdeleted file mode 100755 index 48bf9625..00000000 --- a/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap +++ /dev/null diff --git a/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources b/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources deleted file mode 100644 index 0710b400..00000000 --- a/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources +++ /dev/null @@ -1,105 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> -<plist version="1.0"> -<dict> - <key>files</key> - <dict/> - <key>files2</key> - <dict/> - <key>rules</key> - <dict> - <key>^Resources/</key> - <true/> - <key>^Resources/.*\.lproj/</key> - <dict> - <key>optional</key> - <true/> - <key>weight</key> - <real>1000</real> - </dict> - <key>^Resources/.*\.lproj/locversion.plist$</key> - <dict> - <key>omit</key> - <true/> - <key>weight</key> - <real>1100</real> - </dict> - <key>^version.plist$</key> - <true/> - </dict> - <key>rules2</key> - <dict> - <key>.*\.dSYM($|/)</key> - <dict> - <key>weight</key> - <real>11</real> - </dict> - <key>^(.*/)?\.DS_Store$</key> - <dict> - <key>omit</key> - <true/> - <key>weight</key> - <real>2000</real> - </dict> - <key>^(Frameworks|SharedFrameworks|PlugIns|Plug-ins|XPCServices|Helpers|MacOS|Library/(Automator|Spotlight|LoginItems))/</key> - <dict> - <key>nested</key> - <true/> - <key>weight</key> - <real>10</real> - </dict> - <key>^.*</key> - <true/> - <key>^Info\.plist$</key> - <dict> - <key>omit</key> - <true/> - <key>weight</key> - <real>20</real> - </dict> - <key>^PkgInfo$</key> - <dict> - <key>omit</key> - <true/> - <key>weight</key> - <real>20</real> - </dict> - <key>^Resources/</key> - <dict> - <key>weight</key> - <real>20</real> - </dict> - <key>^Resources/.*\.lproj/</key> - <dict> - <key>optional</key> - <true/> - <key>weight</key> - <real>1000</real> - </dict> - <key>^Resources/.*\.lproj/locversion.plist$</key> - <dict> - <key>omit</key> - <true/> - <key>weight</key> - <real>1100</real> - </dict> - <key>^[^/]+$</key> - <dict> - <key>nested</key> - <true/> - <key>weight</key> - <real>10</real> - </dict> - <key>^embedded\.provisionprofile$</key> - <dict> - <key>weight</key> - <real>20</real> - </dict> - <key>^version\.plist$</key> - <dict> - <key>weight</key> - <real>20</real> - </dict> - </dict> -</dict> -</plist> diff --git a/ext/librethinkdbxx/.travis.yml b/ext/librethinkdbxx/.travis.yml deleted file mode 100644 index b306a410..00000000 --- a/ext/librethinkdbxx/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: required -dist: trusty - -python: - - "3.4.3" - -addons: - rethinkdb: "2.3" - -script: - - make test diff --git a/ext/librethinkdbxx/COPYRIGHT b/ext/librethinkdbxx/COPYRIGHT deleted file mode 100644 index c25145d5..00000000 --- a/ext/librethinkdbxx/COPYRIGHT +++ /dev/null @@ -1,16 +0,0 @@ -RethinkDB Language Drivers - -Copyright 2010-2012 RethinkDB - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this product except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/ext/librethinkdbxx/Makefile b/ext/librethinkdbxx/Makefile deleted file mode 100644 index ba319c16..00000000 --- a/ext/librethinkdbxx/Makefile +++ /dev/null @@ -1,126 +0,0 @@ -# Customisable build settings - -CXX ?= clang++ -CXXFLAGS ?= -INCLUDE_PYTHON_DOCS ?= no -DEBUG ?= no -PYTHON ?= python3 - -# Required build settings - -ifneq (no,$(DEBUG)) - CXXFLAGS += -ggdb -else - CXXFLAGS += -O3 # -flto -endif - -CXXFLAGS += -std=c++11 -I'build/gen' -Wall -pthread -fPIC - -prefix ?= /usr -DESTDIR ?= - -.DELETE_ON_ERROR: -SHELL := /bin/bash - -modules := connection datum json term cursor types utils -headers := utils error exceptions types datum connection cursor term - -o_files := $(patsubst %, build/obj/%.o, $(modules)) -d_files := $(patsubst %, build/dep/%.d, $(modules)) - -skip_tests := regression/1133 regression/767 regression/1005 # python-only -skip_tests += arity # arity errors are compile-time -skip_tests += geo # geo types not implemented yet -skip_tests += limits # possibly broken tests: https://github.com/rethinkdb/rethinkdb/issues/5940 - -upstream_tests := \ - $(filter-out %.rb.%, \ - $(filter-out $(patsubst %,test/upstream/%%, $(skip_tests)), \ - $(filter test/upstream/$(test_filter)%, \ - $(shell find test/upstream -name '*.yaml' | egrep -v '.(rb|js).yaml$$')))) -upstream_tests_cc := $(patsubst %.yaml, build/tests/%.cc, $(upstream_tests)) -upstream_tests_o := $(patsubst %.cc, %.o, $(upstream_tests_cc)) - -.PRECIOUS: $(upstream_tests_cc) $(upstream_tests_o) - -default: build/librethinkdb++.a build/include/rethinkdb.h build/librethinkdb++.so - -all: default build/test - -build/librethinkdb++.a: $(o_files) - ar rcs $@ $^ - -build/librethinkdb++.so: $(o_files) - $(CXX) -o $@ $(CXXFLAGS) -shared $^ - -build/obj/%.o: src/%.cc build/gen/protocol_defs.h - @mkdir -p $(dir $@) - @mkdir -p $(dir build/dep/$*.d) - $(CXX) -o $@ $(CXXFLAGS) -c $< -MP -MQ $@ -MD -MF build/dep/$*.d - -build/gen/protocol_defs.h: reql/ql2.proto reql/gen.py | build/gen/. - $(PYTHON) reql/gen.py $< > $@ - -clean: - rm -rf build - -ifneq (no,$(INCLUDE_PYTHON_DOCS)) -build/include/rethinkdb.h: build/rethinkdb.nodocs.h reql/add_docs.py reql/python_docs.txt | build/include/. - $(PYTHON) reql/add_docs.py reql/python_docs.txt < $< > $@ -else -build/include/rethinkdb.h: build/rethinkdb.nodocs.h | build/include/. - cp $< $@ -endif - -build/rethinkdb.nodocs.h: build/gen/protocol_defs.h $(patsubst %, src/%.h, $(headers)) - ( echo "// Auto-generated file, built from $^"; \ - echo '#pragma once'; \ - cat $^ | \ - grep -v '^#pragma once' | \ - grep -v '^#include "'; \ - ) > $@ - -build/tests/%.cc: %.yaml test/yaml_to_cxx.py - @mkdir -p $(dir $@) - $(PYTHON) test/yaml_to_cxx.py $< > $@ - -build/tests/upstream_tests.cc: $(upstream_tests) test/gen_index_cxx.py FORCE | build/tests/. - @echo '$(PYTHON) test/gen_index_cxx.py $(wordlist 1,5,$(upstream_tests)) ... > $@' - @$(PYTHON) test/gen_index_cxx.py $(upstream_tests) > $@ - -build/tests/%.o: build/tests/%.cc build/include/rethinkdb.h test/testlib.h | build/tests/. - $(CXX) -o $@ $(CXXFLAGS) -isystem build/include -I test -c $< -Wno-unused-variable - -build/tests/%.o: test/%.cc test/testlib.h build/include/rethinkdb.h | build/tests/. - $(CXX) -o $@ $(CXXFLAGS) -isystem build/include -I test -c $< - -build/test: build/tests/testlib.o build/tests/test.o build/tests/upstream_tests.o $(upstream_tests_o) build/librethinkdb++.a - @echo $(CXX) -o $@ $(CXXFLAGS) $(wordlist 1,5,$^) ... - @$(CXX) -o $@ $(CXXFLAGS) build/librethinkdb++.a $^ - -.PHONY: test -test: build/test - build/test - -build/bench: build/tests/bench.o build/librethinkdb++.a - @$(CXX) -o $@ $(CXXFLAGS) -isystem build/include build/librethinkdb++.a $^ - -.PHONY: bench -bench: build/bench - build/bench - -.PHONY: install -install: build/librethinkdb++.a build/include/rethinkdb.h build/librethinkdb++.so - install -m755 -d $(DESTDIR)$(prefix)/lib - install -m755 -d $(DESTDIR)$(prefix)/include - install -m644 build/librethinkdb++.a $(DESTDIR)$(prefix)/lib/librethinkdb++.a - install -m644 build/librethinkdb++.so $(DESTDIR)$(prefix)/lib/librethinkdb++.so - install -m644 build/include/rethinkdb.h $(DESTDIR)$(prefix)/include/rethinkdb.h - -%/.: - mkdir -p $* - -.PHONY: FORCE -FORCE: - --include $(d_files) diff --git a/ext/librethinkdbxx/README.md b/ext/librethinkdbxx/README.md deleted file mode 100644 index 92fa9136..00000000 --- a/ext/librethinkdbxx/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# RethinkDB driver for C++ - -This driver is compatible with RethinkDB 2.0. It is based on the -official RethinkDB Python driver. - -* [RethinkDB server](http://rethinkdb.com/) -* [RethinkDB API docs](http://rethinkdb.com/api/python/) - -## Example - -``` -#include <memory> -#include <cstdio> -#include <rethinkdb.h> - -namespace R = RethinkDB; - -int main() { - std::unique_ptr<R::Connection> conn = R::connect("localhost", 28015); - R::Cursor cursor = R::table("users").filter(R::row["age"] > 14).run(*conn); - for (R::Datum& user : cursor) { - printf("%s\n", user.as_json().c_str()); - } -} -``` - -## Build - -Requires a modern C++ compiler. to build and install, run: - -``` -make -make install -``` - -Will build `include/rethinkdb.h`, `librethinkdb++.a` and -`librethinkdb++.so` into the `build/` directory. - -To include documentation from the Python driver in the header file, -pass the following argument to make. - -``` -make INCLUDE_PYTHON_DOCS=yes -``` - -To build in debug mode: - -``` -make DEBUG=yes -``` - -To install to a specific location: - -``` -make install prefix=/usr/local DESTDIR= -``` - -## Status - -Still in early stages of development. - -## Tests - -This driver is tested against the upstream ReQL tests from the -RethinkDB repo, which are programmatically translated from Python to -C++. As of 34dc13c, all tests pass: - -``` -$ make test -... -SUCCESS: 2053 tests passed -``` diff --git a/ext/librethinkdbxx/reql/add_docs.py b/ext/librethinkdbxx/reql/add_docs.py deleted file mode 100644 index 67f08df8..00000000 --- a/ext/librethinkdbxx/reql/add_docs.py +++ /dev/null @@ -1,80 +0,0 @@ -from sys import stdin, stderr, stdout, argv -from re import match, sub - -docs = {} - -for line in open(argv[1]): - res = match('^\t\(([^,]*), (.*)\),$', line) - if res: - fullname = res.group(1) - docs[fullname.split('.')[-1]] = eval(res.group(2)).decode('utf-8') - -translate_name = { - 'name': None, - 'delete_': 'delete', - 'union_': 'union', - 'operator[]': '__getitem__', - 'operator+': '__add__', - 'operator-': '__sub__', - 'operator*': '__mul__', - 'operator/': '__div__', - 'operator%': '__mod__', - 'operator&&': 'and_', - 'operator||': 'or_', - 'operator==': '__eq__', - 'operator!=': '__ne__', - 'operator>': '__gt__', - 'operator>=': '__ge__', - 'operator<': '__lt__', - 'operator<=': '__le__', - 'operator!': 'not_', - 'default_': 'default', - 'array': None, - 'desc': None, - 'asc': None, - 'maxval': None, - 'minval': None, - 'january': None, - 'february': None, - 'march': None, - 'april': None, - 'may': None, - 'june': None, - 'july': None, - 'august': None, - 'september': None, - 'october': None, - 'november': None, - 'december': None, - 'monday': None, - 'tuesday': None, - 'wednesday': None, - 'thursday': None, - 'friday': None, - 'saturday': None, - 'sunday': None, -} - -def print_docs(name, line): - py_name = translate_name.get(name, name) - if py_name in docs: - indent = match("^( *)", line).group(1) - stdout.write('\n') - # TODO: convert the examples to C++ - for line in docs[py_name].split('\n'): - stdout.write(indent + "// " + line + '\n') - elif py_name: - stderr.write('Warning: no docs for ' + py_name + ': ' + line) - -stdout.write('// Contains documentation copied as-is from the Python driver') - -for line in stdin: - res = match("^ *CO?[0-9_]+\(([^,)]+)|extern Query (\w+)|^ *// *(\$)doc\((\w+)\) *$", line) - if res: - name = res.group(1) or res.group(2) or res.group(4) - print_docs(name, line) - if not res.group(3): - stdout.write(line) - else: - stdout.write(line) - diff --git a/ext/librethinkdbxx/reql/gen.py b/ext/librethinkdbxx/reql/gen.py deleted file mode 100644 index 2b1fe9fc..00000000 --- a/ext/librethinkdbxx/reql/gen.py +++ /dev/null @@ -1,33 +0,0 @@ -from sys import argv -from re import sub, finditer, VERBOSE - -def gen(defs): - indent = 0 - enum = False - def p(s): print(" " * (indent * 4) + s) - for item in finditer(""" - (?P<type> message|enum) \\s+ (?P<name> \\w+) \\s* \\{ | - (?P<var> \\w+) \\s* = \\s* (?P<val> \\w+) \\s* ; | - \\} - """, defs, flags=VERBOSE): - if item.group(0) == "}": - indent = indent - 1 - p("};" if enum else "}") - enum = False; - elif item.group('type') == 'enum': - p("enum class %s {" % item.group('name')) - indent = indent + 1 - enum = True - elif item.group('type') == 'message': - p("namespace %s {" % item.group('name')) - indent = indent + 1 - enum = False - else: - if enum: - p("%s = %s," % (item.group('var'), item.group('val'))) - -print("// Auto-generated by reql/gen.py") -print("#pragma once") -print("namespace RethinkDB { namespace Protocol {") -gen(sub("//.*", "", open(argv[1]).read())) -print("} }") diff --git a/ext/librethinkdbxx/reql/python_docs.txt b/ext/librethinkdbxx/reql/python_docs.txt deleted file mode 100644 index 41a5c84e..00000000 --- a/ext/librethinkdbxx/reql/python_docs.txt +++ /dev/null @@ -1,189 +0,0 @@ -# This file was generated by _scripts/gen_python.py from the rethinkdb documentation in http://github.com/rethinkdb/docs -# hash: "3d13a937cfdacb7ffa3dab2ce3ebdf25bd3c192e" - -import rethinkdb - -docsSource = [ - - (rethinkdb.net.Connection.close, b'conn.close(noreply_wait=True)\n\nClose an open connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Close an open connection, waiting for noreply writes to finish.\n\n conn.close()\n\n*Example* Close an open connection immediately.\n\n conn.close(noreply_wait=False)\n'), - (rethinkdb.connect, b'r.connect(host="localhost", port=28015, db="test", auth_key="", timeout=20) -> connection\nr.connect(host) -> connection\n\nCreate a new connection to the database server. The keyword arguments are:\n\n- `host`: host of the RethinkDB instance. The default value is `localhost`.\n- `port`: the driver port, by default `28015`.\n- `db`: the database used if not explicitly specified in a query, by default `test`.\n- `auth_key`: the authentication key, by default the empty string.\n- `timeout`: timeout period in seconds for the connection to be opened (default `20`).\n\nIf the connection cannot be established, a `RqlDriverError` exception will be thrown.\n\nThe authentication key can be set from the RethinkDB command line tool. Once set, client connections must provide the key as an option to `run` in order to make the connection. For more information, read "Using the RethinkDB authentication system" in the documentation on [securing your cluster](http://rethinkdb.com/docs/security/).\n\n__Note:__ Currently, the Python driver is not thread-safe. Each thread or multiprocessing PID should be given its own connection object. (This is likely to change in a future release of RethinkDB; you can track issue [#2427](https://github.com/rethinkdb/rethinkdb/issues/2427) for progress.)\n\n*Example* Opens a connection using the default host and port but specifying the default database.\n\n conn = r.connect(db=\'marvel\')\n\n*Example* Opens a new connection to the database.\n\n conn = r.connect(host = \'localhost\',\n port = 28015,\n db = \'heroes\',\n auth_key = \'hunter2\')\n\n'), - (rethinkdb.net.Connection.noreply_wait, b'conn.noreply_wait()\n\n`noreply_wait` ensures that previous queries with the `noreply` flag have been processed\nby the server. Note that this guarantee only applies to queries run on the given connection.\n\n*Example* We have previously run queries with the `noreply` argument set to `True`. Now\nwait until the server has processed them.\n\n conn.noreply_wait()\n\n'), - (rethinkdb, b'r -> r\n\nThe top-level ReQL namespace.\n\n*Example* Setup your top-level namespace.\n\n import rethinkdb as r\n\n'), - (rethinkdb.net.Connection.reconnect, b'conn.reconnect(noreply_wait=True)\n\nClose and reopen a connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Cancel outstanding requests/queries that are no longer needed.\n\n conn.reconnect(noreply_wait=False)\n'), - (rethinkdb.net.Connection.repl, b"conn.repl()\n\nSet the default connection to make REPL use easier. Allows calling\n`.run()` on queries without specifying a connection.\n\n__Note:__ Avoid using `repl` in application code. RethinkDB connection objects are not thread-safe, and calls to `connect` from multiple threads may change the global connection object used by `repl`. Applications should specify connections explicitly.\n\n*Example* Set the default connection for the REPL, then call\n`run()` without specifying the connection.\n\n r.connect(db='marvel').repl()\n r.table('heroes').run()\n"), - (rethinkdb.ast.RqlQuery.run, b'query.run(conn, use_outdated=False, time_format=\'native\', profile=False, durability="hard") -> cursor\nquery.run(conn, use_outdated=False, time_format=\'native\', profile=False, durability="hard") -> object\n\nRun a query on a connection, returning either a single JSON result or\na cursor, depending on the query.\n\nThe optional arguments are:\n\n- `use_outdated`: whether or not outdated reads are OK (default: `False`).\n- `time_format`: what format to return times in (default: `\'native\'`).\n Set this to `\'raw\'` if you want times returned as JSON objects for exporting.\n- `profile`: whether or not to return a profile of the query\'s\n execution (default: `False`).\n- `durability`: possible values are `\'hard\'` and `\'soft\'`. In soft durability mode RethinkDB\nwill acknowledge the write immediately after receiving it, but before the write has\nbeen committed to disk.\n- `group_format`: what format to return `grouped_data` and `grouped_streams` in (default: `\'native\'`).\n Set this to `\'raw\'` if you want the raw pseudotype.\n- `noreply`: set to `True` to not receive the result object or cursor and return immediately.\n- `db`: the database to run this query against as a string. The default is the database specified in the `db` parameter to [connect](http://rethinkdb.com/api/python/connect/) (which defaults to `test`). The database may also be specified with the [db](http://rethinkdb.com/api/python/db/) command.\n- `array_limit`: the maximum numbers of array elements that can be returned by a query (default: 100,000). This affects all ReQL commands that return arrays. Note that it has no effect on the size of arrays being _written_ to the database; those always have an upper limit of 100,000 elements.\n- `binary_format`: what format to return binary data in (default: `\'native\'`). Set this to `\'raw\'` if you want the raw pseudotype.\n- `min_batch_rows`: minimum number of rows to wait for before batching a result set (default: 8). This is an integer.\n- `max_batch_rows`: maximum number of rows to wait for before batching a result set (default: unlimited). This is an integer.\n- `max_batch_bytes`: maximum number of bytes to wait for before batching a result set (default: 1024). This is an integer.\n- `max_batch_seconds`: maximum number of seconds to wait before batching a result set (default: 0.5). This is a float (not an integer) and may be specified to the microsecond.\n- `first_batch_scaledown_factor`: factor to scale the other parameters down by on the first batch (default: 4). For example, with this set to 8 and `max_batch_rows` set to 80, on the first batch `max_batch_rows` will be adjusted to 10 (80 / 8). This allows the first batch to return faster.\n\n*Example* Run a query on the connection `conn` and print out every\nrow in the result.\n\n for doc in r.table(\'marvel\').run(conn):\n print doc\n\n*Example* If you are OK with potentially out of date data from all\nthe tables involved in this query and want potentially faster reads,\npass a flag allowing out of date data in an options object. Settings\nfor individual tables will supercede this global setting for all\ntables in the query.\n\n r.table(\'marvel\').run(conn, use_outdated=True)\n\n*Example* If you just want to send a write and forget about it, you\ncan set `noreply` to true in the options. In this case `run` will\nreturn immediately.\n\n r.table(\'marvel\').run(conn, noreply=True)\n\n*Example* If you want to specify whether to wait for a write to be\nwritten to disk (overriding the table\'s default settings), you can set\n`durability` to `\'hard\'` or `\'soft\'` in the options.\n\n r.table(\'marvel\')\n .insert({ \'superhero\': \'Iron Man\', \'superpower\': \'Arc Reactor\' })\n .run(conn, noreply=True, durability=\'soft\')\n\n*Example* If you do not want a time object to be converted to a\nnative date object, you can pass a `time_format` flag to prevent it\n(valid flags are "raw" and "native"). This query returns an object\nwith two fields (`epoch_time` and `$reql_type$`) instead of a native date\nobject.\n\n r.now().run(conn, time_format="raw")\n\n*Example* Specify the database to use for the query.\n\n for doc in r.table(\'marvel\').run(conn, db=\'heroes\'):\n print doc\n\nThis is equivalent to using the `db` command to specify the database:\n\n r.db(\'heroes\').table(\'marvel\').run(conn) ...\n\n*Example* Change the batching parameters for this query.\n\n r.table(\'marvel\').run(conn, max_batch_rows=16, max_batch_bytes=2048)\n'), - (rethinkdb.set_loop_type, b'r.set_loop_type(string)\n\nSet an asynchronous event loop model. Currently, the only event loop model RethinkDB supports is `"tornado"`, for use with the [Tornado web framework](http://www.tornadoweb.org). After setting the event loop to `"tornado"`, the [connect](http://rethinkdb.com/api/python/connect) and [run](http://rethinkdb.com/api/python/run) commands will return Tornado `Future` objects.\n\n*Example* Read a table\'s data using Tornado.\n\n r.set_loop_type("tornado")\n conn = r.connect(host=\'localhost\', port=28015)\n \n @gen.coroutine\n def use_cursor(conn):\n # Print every row in the table.\n cursor = yield r.table(\'test\').order_by(index="id").run(yield conn)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n print(item)\n\nFor a longer discussion with Tornado examples, see the documentation article on [Asynchronous connections][ac].\n\n[ac]: /docs/async-connections/\n'), - (rethinkdb.net.Connection.use, b"conn.use(db_name)\n\nChange the default database on this connection.\n\n*Example* Change the default database so that we don't need to\nspecify the database when referencing a table.\n\n conn.use('marvel')\n r.table('heroes').run(conn) # refers to r.db('marvel').table('heroes')\n"), - (rethinkdb.ast.Table.config, b'table.config() -> selection<object>\ndatabase.config() -> selection<object>\n\nQuery (read and/or update) the configurations for individual tables or databases.\n\nThe `config` command is a shorthand way to access the `table_config` or `db_config` [System tables](http://rethinkdb.com/docs/system-tables/). It will return the single row from the system that corresponds to the database or table configuration, as if [get](http://rethinkdb.com/api/python/get) had been called on the system table with the UUID of the database or table in question.\n\n*Example* Get the configuration for the `users` table.\n\n r.table(\'users\').config().run(conn)\n \n {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "users",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {"primary_replica": "a", "replicas": ["a", "b"]},\n {"primary_replica": "d", "replicas": ["c", "d"]}\n ],\n "write_acks": "majority",\n "durability": "hard"\n }\n\n*Example* Change the write acknowledgement requirement of the `users` table.\n\n r.table(\'users\').config().update({\'write_acks\': \'single\'}).run(conn)\n'), - (rethinkdb.ast.Table.rebalance, b'table.rebalance() -> object\ndatabase.rebalance() -> object\n\nRebalances the shards of a table. When called on a database, all the tables in that database will be rebalanced.\n\nThe `rebalance` command operates by measuring the distribution of primary keys within a table and picking split points that will give each shard approximately the same number of documents. It won\'t change the number of shards within a table, or change any other configuration aspect for the table or the database.\n\nA table will lose availability temporarily after `rebalance` is called; use the [wait](http://rethinkdb.com/api/python/wait) command to wait for the table to become available again, or [status](http://rethinkdb.com/api/python/status) to check if the table is available for writing.\n\nRethinkDB automatically rebalances tables when the number of shards are increased, and as long as your documents have evenly distributed primary keys—such as the default UUIDs—it is rarely necessary to call `rebalance` manually. Cases where `rebalance` may need to be called include:\n\n* Tables with unevenly distributed primary keys, such as incrementing integers\n* Changing a table\'s primary key type\n* Increasing the number of shards on an empty table, then using non-UUID primary keys in that table\n\nThe [web UI](http://rethinkdb.com/docs/administration-tools/) (and the [info](http://rethinkdb.com/api/python/info) command) can be used to tell you when a table\'s shards need to be rebalanced.\n\nThe return value of `rebalance` is an object with two fields:\n\n* `rebalanced`: the number of tables rebalanced.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `rebalance` was executed. \n * `new_val`: The table\'s `status` value after `rebalance` was executed. (This value will almost always indicate the table is unavailable.)\n\nSee the [status](http://rethinkdb.com/api/python/status) command for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\n*Example* Rebalance a table.\n\n r.table(\'superheroes\').rebalance().run(conn)\n \n {\n "rebalanced": 1,\n "status_changes": [\n {\n "old_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n },\n "new_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": False,\n "ready_for_outdated_reads": False,\n "ready_for_reads": False,\n "ready_for_writes": False\n }\n }\n \n }\n ]\n }\n'), - (rethinkdb.ast.Table.reconfigure, b'table.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False]) -> object\ndatabase.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False]) -> object\n\nReconfigure a table\'s sharding and replication.\n\n* `shards`: the number of shards, an integer from 1-32. Required.\n* `replicas`: either an integer or a mapping object. Required.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{"tag1": 2, "tag2": 4, "tag3": 2, ...}`. For more information about server tags, read [Administration tools](http://rethinkdb.com/docs/administration-tools/).\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n* `dry_run`: if `True` the generated configuration will not be applied to the table, only returned.\n\nThe return value of `reconfigure` is an object with three fields:\n\n* `reconfigured`: the number of tables reconfigured. This will be `0` if `dry_run` is `True`.\n* `config_changes`: a list of new and old table configuration values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [config](http://rethinkdb.com/api/python/config) value before `reconfigure` was executed. \n * `new_val`: The table\'s `config` value after `reconfigure` was executed.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `reconfigure` was executed. \n * `new_val`: The table\'s `status` value after `reconfigure` was executed.\n\nFor `config_changes` and `status_changes`, see the [config](http://rethinkdb.com/api/python/config) and [status](http://rethinkdb.com/api/python/status) commands for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\nA table will lose availability temporarily after `reconfigure` is called; use the [table_status](http://rethinkdb.com/api/python/table_status) command to determine when the table is available again.\n\n**Note:** Whenever you call `reconfigure`, the write durability will be set to `hard` and the write acknowledgments will be set to `majority`; these can be changed by using the `config` command on the table.\n\nIf `reconfigure` is called on a database, all the tables in the database will have their configurations affected. The return value will be an array of the objects described above, one per table.\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n\n*Example* Reconfigure a table.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas=1).run(conn)\n \n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {"primary_replica": "jeeves", "replicas": ["jeeves"]},\n {"primary_replica": "alfred", "replicas": ["alfred"]}\n ],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {"primary_replica": "alfred", "replicas": ["alfred"]}\n ],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n\n*Example* Reconfigure a table, specifying replicas by server tags.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas={\'wooster\': 1, \'wayne\': 1}, primary_replica_tag=\'wooster\').run(conn)\n \n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {"primary_replica": "jeeves", "replicas": ["jeeves", "alfred"]},\n {"primary_replica": "jeeves", "replicas": ["jeeves", "alfred"]}\n ],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {"primary_replica": "alfred", "replicas": ["alfred"]}\n ],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n'), - (rethinkdb.ast.Table.status, b'table.status() -> selection<object>\n\nReturn the status of a table.\n\nThe return value is an object providing information about the table\'s shards, replicas and replica readiness states. For a more complete discussion of the object fields, read about the `table_status` table in [System tables](http://rethinkdb.com/docs/system-tables/).\n\n* `db`: database name.\n* `name`: table name.\n* `id`: table UUID.\n* `shards`: an array of objects, one for each shard, with the following keys per object:\n * `primary_replica`: name of the shard\'s primary server.\n * `replicas`: an array of objects showing the status of each replica, with the following keys:\n * `server`: name of the replica server.\n * `state`: one of `ready`, `disconnected`, `backfilling_data`, `offloading_data`, `erasing_data`, `looking_for_primary_replica` or `transitioning`.\n* `status`: an object with the following boolean keys:\n * `all_replicas_ready`: `True` if all backfills have finished.\n * `ready_for_outdated_reads`: `True` if the table is ready for read queries with the `use_outdated` flag set to `True`.\n * `ready_for_reads`: `True` if the table is ready for read queries with current data (with the `use_outdated` flag set to `False` or unspecified).\n * `ready_for_writes`: `True` if the table is ready for write queries.\n\n*Example* Get a table\'s status.\n\n r.table(\'superheroes\').status().run(conn)\n \n {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n }\n'), - (rethinkdb.ast.Table.wait, b'table.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\ndatabase.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\nr.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\n\nWait for a table or all the tables in a database to be ready. A table may be temporarily unavailable after creation, rebalancing or reconfiguring. The `wait` command blocks until the given table (or database) is fully up to date.\n\nThe `wait` command takes two optional arguments:\n\n* `wait_for`: a string indicating a table [status](http://rethinkdb.com/api/python/status) to wait on before returning, one of `ready_for_outdated_reads`, `ready_for_reads`, `ready_for_writes`, or `all_replicas_ready`. The default is `ready_for_writes`. \n* `timeout`: a number indicating maximum time to wait for in seconds before returning. The default is no timeout.\n\nThe return value is an object consisting of two key/value pairs:\n\n* `ready`: an integer indicating the number of tables waited for. It will always be `1` when `wait` is called on a table, and the total number of tables when called on a database.\n* `status_changes`: a list with one entry for each of the tables. Each member of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `wait` was executed. \n * `new_val`: The table\'s `status` value after `wait` finished.\n\nSee [status](http://rethinkdb.com/api/python/status) and [System tables](http://rethinkdb.com/docs/system-tables/) for a description of the fields within `status_changes`.\n\nIf `wait` is called with no table or database specified (the `r.wait()` form), it will wait on all the tables in the default database (set with the [connect](http://rethinkdb.com/api/python/connect/) command\'s `db` parameter, which defaults to `test`).\n\n*Example* Wait on a table to be ready.\n\n r.table(\'superheroes\').wait().run(conn)\n \n {\n "ready": 1,\n "status_changes": [\n \t{\n \t "old_val": {\n \t\t"db": "database",\n \t\t"id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n \t\t"name": "superheroes",\n \t\t"shards": [\n \t\t {\n \t\t\t"primary_replica": None,\n \t\t\t"replicas": [\n \t\t\t {\n \t\t\t\t"server": "jeeves",\n \t\t\t\t"state": "ready"\n \t\t\t }\n \t\t\t]\n \t\t },\n \t\t {\n \t\t\t"primary_replica": None,\n \t\t\t"replicas": [\n \t\t\t {\n \t\t\t\t"server": "jeeves",\n \t\t\t\t"state": "ready"\n \t\t\t }\n \t\t\t]\n \t\t }\n \t\t],\n \t\t"status": {\n \t\t "all_replicas_ready": True,\n \t\t "ready_for_outdated_reads": True,\n \t\t "ready_for_reads": True,\n \t\t "ready_for_writes": True\n \t\t}\n \t },\n \t "new_val": {\n \t\t"db": "database",\n \t\t"id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n \t\t"name": "superheroes",\n \t\t"shards": [\n \t\t {\n \t\t\t"primary_replica": None,\n \t\t\t"replicas": [\n \t\t\t {\n \t\t\t\t"server": "jeeves",\n \t\t\t\t"state": "ready"\n \t\t\t }\n \t\t\t]\n \t\t },\n \t\t {\n \t\t\t"primary_replica": None,\n \t\t\t"replicas": [\n \t\t\t {\n \t\t\t\t"server": "jeeves",\n \t\t\t\t"state": "ready"\n \t\t\t }\n \t\t\t]\n \t\t }\n \t\t],\n \t\t"status": {\n \t\t "all_replicas_ready": True,\n \t\t "ready_for_outdated_reads": True,\n \t\t "ready_for_reads": True,\n \t\t "ready_for_writes": True\n \t\t}\n \t }\n \t}\n ]\n }\n'), - (rethinkdb.ast.RqlQuery.avg, b"sequence.avg([field_or_function]) -> number\n\nAverages all the elements of a sequence. If called with a field name,\naverages all the values of that field in the sequence, skipping\nelements of the sequence that lack that field. If called with a\nfunction, calls that function on every element of the sequence and\naverages the results, skipping elements of the sequence where that\nfunction returns `None` or a non-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example* What's the average of 3, 5, and 7?\n\n r.expr([3, 5, 7]).avg().run(conn)\n\n*Example* What's the average number of points scored in a game?\n\n r.table('games').avg('points').run(conn)\n\n*Example* What's the average number of points scored in a game,\ncounting bonus points?\n\n r.table('games').avg(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n\n*Example* What's the average number of points scored in a game?\n(But return `None` instead of raising an error if there are no games where\npoints have been scored.)\n\n r.table('games').avg('points').default(None).run(conn)\n"), - (rethinkdb.ast.RqlQuery.contains, b"sequence.contains(value|predicate[, value|predicate, ...]) -> bool\n\nWhen called with values, returns `True` if a sequence contains all the\nspecified values. When called with predicate functions, returns `True`\nif for each predicate there exists at least one element of the stream\nwhere that predicate returns `True`.\n\nValues and predicates may be mixed freely in the argument list.\n\n*Example* Has Iron Man ever fought Superman?\n\n r.table('marvel').get('ironman')['opponents'].contains('superman').run(conn)\n\n*Example* Has Iron Man ever defeated Superman in battle?\n\n r.table('marvel').get('ironman')['battles'].contains(lambda battle:\n (battle['winner'] == 'ironman') & (battle['loser'] == 'superman')\n ).run(conn)\n\n*Example* Use `contains` with a predicate function to simulate an `or`. Return the Marvel superheroes who live in Detroit, Chicago or Hoboken.\n\n r.table('marvel').filter(\n lambda hero: r.expr(['Detroit', 'Chicago', 'Hoboken']).contains(hero['city'])\n ).run(conn)\n"), - (rethinkdb.ast.RqlQuery.count, b"sequence.count([value_or_predicate]) -> number\nbinary.count() -> number\n\nCounts the number of elements in a sequence. If called with a value,\ncounts the number of times that value occurs in the sequence. If\ncalled with a predicate function, counts the number of elements in the\nsequence where that function returns `True`.\n\nIf `count` is called on a [binary](http://rethinkdb.com/api/python/binary) object, it will return the size of the object in bytes.\n\n*Example* Count the number of users.\n\n r.table('users').count().run(conn)\n\n*Example* Count the number of 18 year old users.\n\n r.table('users')['age'].count(18).run(conn)\n\n*Example* Count the number of users over 18.\n\n r.table('users')['age'].count(lambda age: age > 18).run(conn)\n\n r.table('users').count(lambda user: user['age'] > 18).run(conn)\n"), - (rethinkdb.ast.RqlQuery.distinct, b"sequence.distinct() -> array\ntable.distinct([index=<indexname>]) -> stream\n\nRemoves duplicate elements from a sequence.\n\nThe `distinct` command can be called on any sequence or table with an index.\n\n{% infobox %}\nWhile `distinct` can be called on a table without an index, the only effect will be to convert the table into a stream; the content of the stream will not be affected.\n{% endinfobox %}\n\n*Example* Which unique villains have been vanquished by Marvel heroes?\n\n r.table('marvel').concat_map(\n lambda hero: hero['villain_list']).distinct().run(conn)\n\n*Example* Topics in a table of messages have a secondary index on them, and more than one message can have the same topic. What are the unique topics in the table?\n\n r.table('messages').distinct(index='topics').run(conn)\n\nThe above structure is functionally identical to:\n\n r.table('messages')['topics'].distinct().run(conn)\n\nHowever, the first form (passing the index as an argument to `distinct`) is faster, and won't run into array limit issues since it's returning a stream.\n"), - (rethinkdb.ast.RqlQuery.group, b'sequence.group(field_or_function..., [index=\'index_name\', multi=False]) -> grouped_stream\n\nTakes a stream and partitions it into multiple groups based on the\nfields or functions provided.\n\nWith the `multi` flag single documents can be assigned to multiple groups, similar to the behavior of [multi-indexes](http://rethinkdb.com/docs/secondary-indexes/python). When `multi` is `True` and the grouping value is an array, documents will be placed in each group that corresponds to the elements of the array. If the array is empty the row will be ignored.\n\n*Example* Grouping games by player.\n\nSuppose that the table `games` has the following data:\n\n [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ]\n\nGrouping games by player can be done with:\n\n > r.table(\'games\').group(\'player\').run(conn)\n \n {\n "Alice": [\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ],\n "Bob": [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n\nCommands chained after `group` will be called on each of these grouped\nsub-streams, producing grouped data.\n\n*Example* What is each player\'s best game?\n\n > r.table(\'games\').group(\'player\').max(\'points\').run(conn)\n \n {\n "Alice": {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n "Bob": {"id": 2, "player": "Bob", "points": 15, "type": "ranked"}\n }\n\nCommands chained onto grouped data will operate on each grouped datum,\nproducing more grouped data.\n\n*Example* What is the maximum number of points scored by each player?\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].run(conn)\n \n {\n "Alice": 7,\n "Bob": 15\n }\n\nYou can also group by more than one field.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table(\'games\').group(\'player\', \'type\').max(\'points\')[\'points\'].run(conn)\n \n {\n ("Alice", "free"): 7,\n ("Bob", "free"): 10,\n ("Bob", "ranked"): 15\n }\n\nYou can also group by a function.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table(\'games\')\n .group(lambda game:\n game.pluck(\'player\', \'type\')\n ).max(\'points\')[\'points\'].run(conn)\n \n {\n frozenset([(\'player\', \'Alice\'), (\'type\', \'free\')]): 7,\n frozenset([(\'player\', \'Bob\'), (\'type\', \'free\')]): 10,\n frozenset([(\'player\', \'Bob\'), (\'type\', \'ranked\')]): 15,\n }\n\nUsing a function, you can also group by date on a ReQL [date field](http://rethinkdb.com/docs/dates-and-times/javascript/).\n\n*Example* How many matches have been played this year by month?\n\n > r.table(\'matches\').group(\n lambda match: [match[\'date\'].year(), match[\'date\'].month()]\n ).count().run(conn)\n \n {\n (2014, 2): 2,\n (2014, 3): 2,\n (2014, 4): 1,\n (2014, 5): 3\n }\n\nYou can also group on an index (primary key or secondary).\n\n*Example* What is the maximum number of points scored by game type?\n\n > r.table(\'games\').group(index=\'type\').max(\'points\')[\'points\'].run(conn)\n \n {\n "free": 10,\n "ranked": 15\n }\n\nSuppose that the table `games2` has the following data:\n\n [\n { \'id\': 1, \'matches\': {\'a\': [1, 2, 3], \'b\': [4, 5, 6]} },\n { \'id\': 2, \'matches\': {\'b\': [100], \'c\': [7, 8, 9]} },\n { \'id\': 3, \'matches\': {\'a\': [10, 20], \'c\': [70, 80]} }\n ]\n\nUsing the `multi` option we can group data by match A, B or C.\n\n > r.table(\'games2\').group(r.row[\'matches\'].keys(), multi=True).run(conn)\n \n [\n {\n \'group\': \'a\',\n \'reduction\': [ <id 1>, <id 3> ]\n },\n {\n \'group\': \'b\',\n \'reduction\': [ <id 1>, <id 2> ]\n },\n {\n \'group\': \'c\',\n \'reduction\': [ <id 2>, <id 3> ]\n }\n ]\n\n(The full result set is abbreviated in the figure; `<id 1>, <id 2>` and `<id 3>` would be the entire documents matching those keys.)\n\n*Example* Use [map](http://rethinkdb.com/api/python/map) and [sum](http://rethinkdb.com/api/python/sum) to get the total points scored for each match.\n\n r.table(\'games2\').group(r.row[\'matches\'].keys(), multi=True).ungroup().map(\n lambda doc: { \'match\': doc[\'group\'], \'total\': doc[\'reduction\'].sum(\n lambda set: set[\'matches\'][doc[\'group\']].sum()\n )}).run(conn)\n \n [\n { \'match\': \'a\', \'total\': 36 },\n { \'match\': \'b\', \'total\': 115 },\n { \'match\': \'c\', \'total\': 174 }\n ]\n\nThe inner `sum` adds the scores by match within each document; the outer `sum` adds those results together for a total across all the documents.\n\nIf you want to operate on all the groups rather than operating on each\ngroup (e.g. if you want to order the groups by their reduction), you\ncan use [ungroup](http://rethinkdb.com/api/python/ungroup/) to turn a grouped stream or\ngrouped data into an array of objects representing the groups.\n\n*Example* Ungrouping grouped data.\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].ungroup().run(conn)\n \n [\n {\n "group": "Alice",\n "reduction": 7\n },\n {\n "group": "Bob",\n "reduction": 15\n }\n ]\n\nUngrouping is useful e.g. for ordering grouped data, or for inserting\ngrouped data into a table.\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].ungroup().order_by(\n r.desc(\'reduction\')).run(conn)\n \n [\n {\n "group": "Bob",\n "reduction": 15\n },\n {\n "group": "Alice",\n "reduction": 7\n }\n ]\n\nWhen grouped data are returned to the client, they are transformed\ninto a client-specific native type. (Something similar is done with\n[times](http://rethinkdb.com/docs/dates-and-times/).) In Python, grouped data are\ntransformed into a `dictionary`. If the group value is an `array`, the\nkey is converted to a `tuple`. If the group value is a `dictionary`,\nit will be converted to a `frozenset`.\n\nIf you instead want to receive the raw\npseudotype from the server (e.g. if you\'re planning to serialize the\nresult as JSON), you can specify `group_format: \'raw\'` as an optional\nargument to `run`:\n\n*Example* Get back the raw `GROUPED_DATA` pseudotype.\n\n > r.table(\'games\').group(\'player\').avg(\'points\').run(conn, group_format=\'raw\')\n \n {\n "$reql_type$": "GROUPED_DATA",\n "data": [\n ["Alice", 4.5],\n ["Bob", 12.5]\n ]\n }\n\nNot passing the `group_format` flag would return:\n\n {\n "Alice": 4.5,\n "Bob": 12.5\n }\n\nYou might also want to use the [ungroup](http://rethinkdb.com/api/python/ungroup/)\ncommand (see above), which will turn the grouped data into an array of\nobjects on the server.\n\nIf you run a query that returns a grouped stream, it will be\nautomatically converted to grouped data before being sent back to you\n(there is currently no efficient way to stream groups from RethinkDB).\nThis grouped data is subject to the array size limit (see [run](http://rethinkdb.com/api/python/run)).\n\nIn general, operations on grouped streams will be efficiently\ndistributed, and operations on grouped data won\'t be. You can figure\nout what you\'re working with by putting `type_of` on the end of your\nquery. Below are efficient and inefficient examples.\n\n*Example* Efficient operation.\n\n # r.table(\'games\').group(\'player\').type_of().run(conn)\n # Returns "GROUPED_STREAM"\n r.table(\'games\').group(\'player\').min(\'points\').run(conn) # EFFICIENT\n\n*Example* Inefficient operation.\n\n # r.table(\'games\').group(\'player\').order_by(\'score\').type_of().run(conn)\n # Returns "GROUPED_DATA"\n r.table(\'games\').group(\'player\').order_by(\'score\').nth(0).run(conn) # INEFFICIENT\n\nWhat does it mean to be inefficient here? When operating on grouped\ndata rather than a grouped stream, *all* of the data has to be\navailable on the node processing the query. This means that the\noperation will only use one server\'s resources, and will require\nmemory proportional to the size of the grouped data it\'s operating\non. (In the case of the `order_by` in the inefficient example, that\nmeans memory proportional **to the size of the table**.) The array\nlimit is also enforced for grouped data, so the `order_by` example\nwould fail for tables with more than 100,000 rows unless you used the `array_limit` option with `run`.\n\n*Example* What is the maximum number of points scored by each\nplayer in free games?\n\n > r.table(\'games\').filter(lambda game:\n game[\'type\'] = \'free\'\n ).group(\'player\').max(\'points\')[\'points\'].run(conn)\n \n {\n "Alice": 7,\n "Bob": 10\n }\n\n*Example* What is each player\'s highest even and odd score?\n\n > r.table(\'games\')\n .group(\'name\', lambda game:\n game[\'points\'] % 2\n ).max(\'points\')[\'points\'].run(conn)\n \n {\n ("Alice", 1): 7,\n ("Bob", 0): 10,\n ("Bob", 1): 15\n }\n'), - (rethinkdb.ast.RqlQuery.max, b"sequence.max(field_or_function) -> element\nsequence.max(index='index') -> element\n\nFinds the maximum element of a sequence. The `max` command can be called with:\n\n* a **field name**, to return the element of the sequence with the largest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the largest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the largest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `max` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the maximum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).max().run(conn)\n\n*Example* Return the user who has scored the most points.\n\n r.table('users').max('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').max(index='points').run(conn)\n\n*Example* Return the user who has scored the most points, adding in bonus points from a separate field using a function.\n\n r.table('users').max(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the highest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').max('points')['points'].run(conn)\n\n*Example* Return the user who has scored the most points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').max('points').default(None).run(conn)\n"), - (rethinkdb.ast.RqlQuery.min, b"sequence.min(field_or_function) -> element\nsequence.min(index='index') -> element\n\nFinds the minimum element of a sequence. The `min` command can be called with:\n\n* a **field name**, to return the element of the sequence with the smallest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the smallest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the smallest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `min` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the minimum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).min().run(conn)\n\n*Example* Return the user who has scored the fewest points.\n\n r.table('users').min('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').min(index='points').run(conn)\n\n*Example* Return the user who has scored the fewest points, adding in bonus points from a separate field using a function.\n\n r.table('users').min(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the smallest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').min('points')['points'].run(conn)\n\n*Example* Return the user who has scored the fewest points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').min('points').default(None).run(conn)\n"), - (rethinkdb.ast.RqlQuery.reduce, b'sequence.reduce(reduction_function) -> value\n\nProduce a single value from a sequence through repeated application of a reduction\nfunction. \nThe reduction function can be called on:\n\n- two elements of the sequence\n- one element of the sequence and one result of a previous reduction\n- two results of previous reductions\n\nThe reduction function can be called on the results of two previous reductions because the\n`reduce` command is distributed and parallelized across shards and CPU cores. A common\nmistaken when using the `reduce` command is to suppose that the reduction is executed\nfrom left to right. Read the [map-reduce in RethinkDB](http://rethinkdb.com/docs/map-reduce/) article to\nsee an example.\n\nIf the sequence is empty, the server will produce a `RqlRuntimeError` that can be\ncaught with `default`. \nIf the sequence has only one element, the first element will be returned.\n\n*Example* Return the number of documents in the table `posts`.\n\n r.table("posts").map(lambda doc: 1)\n .reduce(lambda left, right: left+right)\n .default(0).run(conn)\n\nA shorter way to execute this query is to use [count](http://rethinkdb.com/api/python/count).\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the number of comments for all posts.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n left+right\n ).default(0).run(conn)\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the maximum number comments per post.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n r.branch(\n left > right,\n left,\n right\n )\n ).default(0).run(conn)\n\nA shorter way to execute this query is to use [max](http://rethinkdb.com/api/python/max).\n'), - (rethinkdb.ast.RqlQuery.sum, b"sequence.sum([field_or_function]) -> number\n\nSums all the elements of a sequence. If called with a field name,\nsums all the values of that field in the sequence, skipping elements\nof the sequence that lack that field. If called with a function,\ncalls that function on every element of the sequence and sums the\nresults, skipping elements of the sequence where that function returns\n`None` or a non-existence error.\n\nReturns `0` when called on an empty sequence.\n\n*Example* What's 3 + 5 + 7?\n\n r.expr([3, 5, 7]).sum().run(conn)\n\n*Example* How many points have been scored across all games?\n\n r.table('games').sum('points').run(conn)\n\n*Example* How many points have been scored across all games,\ncounting bonus points?\n\n r.table('games').sum(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n"), - (rethinkdb.ast.RqlQuery.ungroup, b'grouped_stream.ungroup() -> array\ngrouped_data.ungroup() -> array\n\nTakes a grouped stream or grouped data and turns it into an array of\nobjects representing the groups. Any commands chained after `ungroup`\nwill operate on this array, rather than operating on each group\nindividually. This is useful if you want to e.g. order the groups by\nthe value of their reduction.\n\nThe format of the array returned by `ungroup` is the same as the\ndefault native format of grouped data in the JavaScript driver and\ndata explorer.\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\nSuppose that the table `games` has the following data:\n\n [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ]\n\nWe can use this query:\n\n r.table(\'games\')\n .group(\'player\').max(\'points\')[\'points\']\n .ungroup().order_by(r.desc(\'reduction\')).run(conn)\n\nResult: \n\n [\n {\n "group": "Bob",\n "reduction": 15\n },\n {\n "group": "Alice",\n "reduction": 7\n }\n ]\n\n*Example* Select one random player and all their games.\n\n r.table(\'games\').group(\'player\').ungroup().sample(1).run(conn)\n\nResult:\n\n [\n {\n "group": "Bob",\n "reduction": [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n ]\n\nNote that if you didn\'t call `ungroup`, you would instead select one\nrandom game from each player:\n\n r.table(\'games\').group(\'player\').sample(1).run(conn)\n\nResult:\n\n {\n "Alice": [\n {"id": 5, "player": "Alice", "points": 7, "type": "free"}\n ],\n "Bob": [\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n\n*Example* Types!\n\n r.table(\'games\').group(\'player\').type_of().run(conn) # Returns "GROUPED_STREAM"\n r.table(\'games\').group(\'player\').ungroup().type_of().run(conn) # Returns "ARRAY"\n r.table(\'games\').group(\'player\').avg(\'points\').run(conn) # Returns "GROUPED_DATA"\n r.table(\'games\').group(\'player\').avg(\'points\').ungroup().run(conn) #Returns "ARRAY"\n'), - (rethinkdb.args, b"r.args(array) -> special\n\n`r.args` is a special term that's used to splice an array of arguments\ninto another term. This is useful when you want to call a variadic\nterm such as `get_all` with a set of arguments produced at runtime.\n\nThis is analogous to unpacking argument lists in Python.\n\n*Example* Get Alice and Bob from the table `people`.\n\n r.table('people').get_all('Alice', 'Bob').run(conn)\n # or\n r.table('people').get_all(r.args(['Alice', 'Bob'])).run(conn)\n\n*Example* Get all of Alice's children from the table `people`.\n\n # r.table('people').get('Alice') returns {'id': 'Alice', 'children': ['Bob', 'Carol']}\n r.table('people').get_all(r.args(r.table('people').get('Alice')['children'])).run(conn)\n"), - (rethinkdb.binary, b'r.binary(data) -> binary\n\nEncapsulate binary data within a query.\n\nThe type of data `binary` accepts depends on the client language. In Python, it expects a parameter of `bytes` type. Using a `bytes` object within a query implies the use of `binary` and the ReQL driver will automatically perform the coercion (in Python 3 only).\n\nBinary objects returned to the client in JavaScript will also be of the `bytes` type. This can be changed with the `binary_format` option provided to [run](http://rethinkdb.com/api/python/run) to return "raw" objects.\n\nOnly a limited subset of ReQL commands may be chained after `binary`:\n\n* [coerce_to](http://rethinkdb.com/api/python/coerce_to/) can coerce `binary` objects to `string` types\n* [count](http://rethinkdb.com/api/python/count/) will return the number of bytes in the object\n* [slice](http://rethinkdb.com/api/python/slice/) will treat bytes like array indexes (i.e., `slice(10,20)` will return bytes 10–19)\n* [type_of](http://rethinkdb.com/api/python/type_of) returns `PTYPE<BINARY>`\n* [info](http://rethinkdb.com/api/python/info) will return information on a binary object.\n\n*Example* Save an avatar image to a existing user record.\n\n f = open(\'./default_avatar.png\', \'rb\')\n avatar_image = f.read()\n f.close()\n r.table(\'users\').get(100).update({\'avatar\': r.binary(avatar_image)}).run(conn)\n\n*Example* Get the size of an existing avatar image.\n\n r.table(\'users\').get(100)[\'avatar\'].count().run(conn)\n \n 14156\n\nRead more details about RethinkDB\'s binary object support: [Storing binary objects](http://rethinkdb.com/docs/storing-binary/).\n'), - (rethinkdb.branch, b'r.branch(test, true_branch, false_branch) -> any\n\nIf the `test` expression returns `False` or `None`, the `false_branch` will be evaluated.\nOtherwise, the `true_branch` will be evaluated.\n \nThe `branch` command is effectively an `if` renamed due to language constraints.\n\n*Example* Return heroes and superheroes.\n\n r.table(\'marvel\').map(\n r.branch(\n r.row[\'victories\'] > 100,\n r.row[\'name\'] + \' is a superhero\',\n r.row[\'name\'] + \' is a hero\'\n )\n ).run(conn)\n\nIf the documents in the table `marvel` are:\n\n [{\n "name": "Iron Man",\n "victories": 214\n },\n {\n "name": "Jubilee",\n "victories": 9\n }]\n\nThe results will be:\n\n [\n "Iron Man is a superhero",\n "Jubilee is a hero"\n ]\n'), - (rethinkdb.ast.RqlQuery.coerce_to, b"sequence.coerce_to('array') -> array\nvalue.coerce_to('string') -> string\nstring.coerce_to('number') -> number\narray.coerce_to('object') -> object\nobject.coerce_to('array') -> array\nbinary.coerce_to('string') -> string\nstring.coerce_to('binary') -> binary\n\nConvert a value of one type into another.\n\n* a sequence, selection or object can be coerced to an array\n* an array of key-value pairs can be coerced to an object\n* a string can be coerced to a number\n* any datum (single value) can be coerced to a string\n* a binary object can be coerced to a string and vice-versa\n\n*Example* Coerce a stream to an array to store its output in a field. (A stream cannot be stored in a field directly.)\n\n r.table('posts').map(lambda post: post.merge(\n { 'comments': r.table('comments').get_all(post['id'], index='post_id').coerce_to('array') }\n )).run(conn)\n\n*Example* Coerce an array of pairs into an object.\n\n r.expr([['name', 'Ironman'], ['victories', 2000]]).coerce_to('object').run(conn)\n\n__Note:__ To coerce a list of key-value pairs like `['name', 'Ironman', 'victories', 2000]` to an object, use the [object](http://rethinkdb.com/api/python/object) command.\n\n*Example* Coerce a number to a string.\n\n r.expr(1).coerce_to('string').run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.default, b'value.default(default_value) -> any\nsequence.default(default_value) -> any\n\nHandle non-existence errors. Tries to evaluate and return its first argument. If an\nerror related to the absence of a value is thrown in the process, or if its first\nargument returns `None`, returns its second argument. (Alternatively, the second argument\nmay be a function which will be called with either the text of the non-existence error\nor `None`.)\n\n*Example* Suppose we want to retrieve the titles and authors of the table `posts`.\nIn the case where the author field is missing or `None`, we want to retrieve the string\n`Anonymous`.\n\n r.table("posts").map(lambda post:\n {\n "title": post["title"],\n "author": post["author"].default("Anonymous")\n }\n ).run(conn)\n\nWe can rewrite the previous query with `r.branch` too.\n\n r.table("posts").map(lambda post:\n r.branch(\n post.has_fields("author"),\n {\n "title": post["title"],\n "author": post["author"]\n },\n {\n "title": post["title"],\n "author": "Anonymous" \n }\n )\n ).run(conn)\n\n*Example* The `default` command can be useful to filter documents too. Suppose\nwe want to retrieve all our users who are not grown-ups or whose age is unknown\n(i.e the field `age` is missing or equals `None`). We can do it with this query:\n\n r.table("users").filter(lambda user:\n (user["age"] < 18).default(True)\n ).run(conn)\n\nOne more way to write the previous query is to set the age to be `-1` when the\nfield is missing.\n\n r.table("users").filter(lambda user:\n user["age"].default(-1) < 18\n ).run(conn)\n\nOne last way to do the same query is to use `has_fields`.\n\n r.table("users").filter(lambda user:\n user.has_fields("age").not_() | (user["age"] < 18)\n ).run(conn)\n\nThe body of every `filter` is wrapped in an implicit `.default(False)`. You can overwrite\nthe value `False` by passing an option in filter, so the previous query can also be\nwritten like this.\n\n r.table("users").filter(\n lambda user: (user["age"] < 18).default(True),\n default=True\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.do, b"any.do(function) -> any\nr.do([args]*, function) -> any\nany.do(expr) -> any\nr.do([args]*, expr) -> any\n\nCall an anonymous function using return values from other ReQL commands or queries as arguments.\n\nThe last argument to `do` (or, in some forms, the only argument) is an expression or an anonymous function which receives values from either the previous arguments or from prefixed commands chained before `do`. The `do` command is essentially a single-element [map](http://rethinkdb.com/api/python/map/), letting you map a function over just one document. This allows you to bind a query result to a local variable within the scope of `do`, letting you compute the result just once and reuse it in a complex expression or in a series of ReQL commands.\n\nArguments passed to the `do` function must be basic data types, and cannot be streams or selections. (Read about [ReQL data types](http://rethinkdb.com/docs/data-types/).) While the arguments will all be evaluated before the function is executed, they may be evaluated in any order, so their values should not be dependent on one another. The type of `do`'s result is the type of the value returned from the function or last expression.\n\n*Example* Compute a golfer's net score for a game.\n\n r.table('players').get('86be93eb-a112-48f5-a829-15b2cb49de1d').do(\n lambda player: player['gross_score'] - player['course_handicap']\n ).run(conn)\n\n*Example* Return the name of the best scoring player in a two-player golf match.\n\n r.do(r.table('players').get(id1), r.table('players').get(id2),\n (lambda player1, player2:\n r.branch(player1['gross_score'].lt(player2['gross_score']),\n player1, player2))\n ).run(conn)\n\nNote that `branch`, the ReQL conditional command, must be used instead of `if`. See the `branch` [documentation](http://rethinkdb.com/api/python/branch) for more.\n\n*Example* Take different actions based on the result of a ReQL [insert](http://rethinkdb.com/api/python/insert) command.\n\n new_data = {\n 'id': 100,\n 'name': 'Agatha',\n 'gross_score': 57,\n 'course_handicap': 4\n }\n r.table('players').insert(new_data).do(lambda doc:\n r.branch((doc['inserted'] != 0),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'ok'}),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'error'}))\n ).run(conn)\n"), - (rethinkdb.error, b"r.error(message) -> error\n\nThrow a runtime error. If called with no arguments inside the second argument to `default`, re-throw the current error.\n\n*Example* Iron Man can't possibly have lost a battle:\n\n r.table('marvel').get('IronMan').do(\n lambda ironman: r.branch(ironman['victories'] < ironman['battles'],\n r.error('impossible code path'),\n ironman)\n ).run(conn)\n\n"), - (rethinkdb.expr, b"r.expr(value) -> value\n\nConstruct a ReQL JSON object from a native object.\n\nIf the native object is of the `bytes` type, then `expr` will return a binary object. See [binary](http://rethinkdb.com/api/python/binary) for more information.\n\n*Example* Objects wrapped with expr can then be manipulated by ReQL API functions.\n\n r.expr({'a':'b'}).merge({'b':[1,2,3]}).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.for_each, b"sequence.for_each(write_query) -> object\n\nLoop over a sequence, evaluating the given write query for each element.\n\n*Example* Now that our heroes have defeated their villains, we can safely remove them from the villain table.\n\n r.table('marvel').for_each(\n lambda hero: r.table('villains').get(hero['villainDefeated']).delete()\n ).run(conn)\n\n"), - (rethinkdb.http, b'r.http(url[, options]) -> value\nr.http(url[, options]) -> stream\n\nRetrieve data from the specified URL over HTTP. The return type depends on the `result_format` option, which checks the `Content-Type` of the response by default.\n\n*Example* Perform an HTTP `GET` and store the result in a table.\n\n r.table(\'posts\').insert(r.http(\'http://httpbin.org/get\')).run(conn)\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n\n* `timeout`: Number of seconds to wait before timing out and aborting the operation. Default: 30.\n\n* `reattempts`: An integer giving the number of attempts to make in cast of connection errors or potentially-temporary HTTP errors. Default: 5.\n\n* `redirects`: An integer giving the number of redirects and location headers to follow. Default: 1.\n\n* `verify`: Verify the server\'s SSL certificate, specified as a boolean. Default: True.\n\n* `result_format`: The format the result should be returned in. The values can be `\'text\'` (always return as a string), `\'json\'` (parse the result as JSON, raising an error if the parsing fails), `\'jsonp\'` (parse the result as [padded JSON](http://www.json-p.org/)), `\'binary\'` (return a binary object), or `\'auto\'` . The default is `\'auto\'`.\n\n When `result_format` is `\'auto\'`, the response body will be parsed according to the `Content-Type` of the response:\n * `application/json`: parse as `\'json\'`\n * `application/json-p`, `text/json-p`, `text/javascript`: parse as `\'jsonp\'`\n * `audio/*`, `video/*`, `image/*`, `application/octet-stream`: return a binary object\n * Anything else: parse as `\'text\'`\n\n* `method`: HTTP method to use for the request. One of `GET`, `POST`, `PUT`, `PATCH`, `DELETE` or `HEAD`. Default: `GET`.\n\n* `auth`: Authentication information in the form of an object with key/value pairs indicating the authentication type (in the `type` key) and any required information. Types currently supported are `basic` and `digest` for HTTP Basic and HTTP Digest authentication respectively. If `type` is omitted, `basic` is assumed. Example:\n\n\t```py\n\tr.http(\'http://httpbin.org/basic-auth/fred/mxyzptlk\',\n auth={ \'type\': \'basic\', \'user\': \'fred\', \'pass\': \'mxyzptlk\' }).run(conn)\n\t```\n\n* `params`: URL parameters to append to the URL as encoded key/value pairs, specified as an object. For example, `{ \'query\': \'banana\', \'limit\': 2 }` will be appended as `?query=banana&limit=2`. Default: none.\n\n* `header`: Extra header lines to include. The value may be an array of strings or an object. Default: none.\n\n Unless specified otherwise, `r.http` will by default use the headers `Accept-Encoding: deflate=1;gzip=0.5` and `User-Agent: RethinkDB/VERSION`.\n\n* `data`: Data to send to the server on a `POST`, `PUT`, `PATCH`, or `DELETE` request.\n\n For `PUT`, `PATCH` and `DELETE` requests, the value will be serialized to JSON and placed in the request body, and the `Content-Type` will be set to `application/json`.\n\n\tFor `POST` requests, data may be either an object or a string. Objects will be written to the body as form-encoded key/value pairs (values must be numbers, strings, or `None`). Strings will be put directly into the body. If `data` is not a string or an object, an error will be thrown.\n\n If `data` is not specified, no data will be sent.\n\n`r.http` supports depagination, which will request multiple pages in a row and aggregate the results into a stream. The use of this feature is controlled by the optional arguments `page` and `page_limit`. Either none or both of these arguments must be provided.\n\n* `page`: This option may specify either a built-in pagination strategy (as a string), or a function to provide the next URL and/or `params` to request.\n\n At the moment, the only supported built-in is `\'link-next\'`, which is equivalent to `lambda info: info[\'header\'][\'link\'][\'rel="next"\'].default(None)`.\n\n *Example* Perform a GitHub search and collect up to 3 pages of results.\n\n ```py\n r.http("https://api.github.com/search/code?q=addClass+user:mozilla",\n page=\'link-next\', page_limit=3).run(conn)\n ```\n\n As a function, `page` takes one parameter, an object of the format:\n\n ```py\n {\n \'params\': object, # the URL parameters used in the last request\n \'header\': object, # the HTTP headers of the last response as key/value pairs\n \'body\': value # the body of the last response in the format specified by `result_format`\n }\n ```\n\n The `header` field will be a parsed version of the header with fields lowercased, like so:\n\n ```py\n {\n \'content-length\': \'1024\',\n \'content-type\': \'application/json\',\n \'date\': \'Thu, 1 Jan 1970 00:00:00 GMT\',\n \'link\': {\n \'rel="last"\': \'http://example.com/?page=34\',\n \'rel="next"\': \'http://example.com/?page=2\'\n }\n }\n ```\n\n The `page` function may return a string corresponding to the next URL to request, `None` indicating that there is no more to get, or an object of the format:\n\n ```py\n {\n \'url\': string, # the next URL to request, or None for no more pages\n \'params\': object # new URL parameters to use, will be merged with the previous request\'s params\n }\n ```\n\n* `page_limit`: An integer specifying the maximum number of requests to issue using the `page` functionality. This is to prevent overuse of API quotas, and must be specified with `page`.\n * `-1`: no limit\n * `0`: no requests will be made, an empty stream will be returned\n * `n`: `n` requests will be made\n\n# Examples\n\n*Example* Perform multiple requests with different parameters.\n\n r.expr([1, 2, 3]).map(lambda i: r.http(\'http://httpbin.org/get\',\n params={ \'user\': i })).run(conn)\n\n*Example* Perform a `PUT` request for each item in a table.\n\n r.table(\'data\').map(lambda row: r.http(\'http://httpbin.org/put\',\n method=\'PUT\', data=row)).run(conn)\n\n*Example* Perform a `POST` request with accompanying data.\n\nUsing form-encoded data:\n\n r.http(\'http://httpbin.org/post\',\n method=\'POST\',\n data={ \'player\': \'Bob\', \'game\': \'tic tac toe\' }).run(conn)\n\nUsing JSON data:\n\n r.http(\'http://httpbin.org/post\',\n method=\'POST\',\n data=r.expr(value).coerce_to(\'string\'),\n header={ \'Content-Type\': \'application/json\' }).run(conn)\n\n*Example* Perform depagination with a custom `page` function.\n\n r.http(\'example.com/pages\',\n page=lambda info: info[\'body\'][\'meta\'][\'next\'].default(None),\n page_limit=5).run(conn)\n\n# Learn more\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n'), - (rethinkdb.ast.RqlQuery.info, b"any.info() -> object\n\nGet information about a ReQL value.\n\n*Example* Get information about a table such as primary key, or cache size.\n\n r.table('marvel').info().run(conn)\n\n"), - (rethinkdb.js, b'r.js(js_string[, timeout=<number>]) -> value\n\nCreate a javascript expression.\n\n*Example* Concatenate two strings using JavaScript.\n\n`timeout` is the number of seconds before `r.js` times out. The default value is 5 seconds.\n\n{% infobox %}\nWhenever possible, you should use native ReQL commands rather than `r.js` for better performance.\n{% endinfobox %}\n\n r.js("\'str1\' + \'str2\'").run(conn)\n\n*Example* Select all documents where the \'magazines\' field is greater than 5 by running JavaScript on the server.\n\n r.table(\'marvel\').filter(\n r.js(\'(function (row) { return row.magazines.length > 5; })\')\n ).run(conn)\n\n*Example* You may also specify a timeout in seconds (defaults to 5).\n\n r.js(\'while(true) {}\', timeout=1.3).run(conn)\n\n'), - (rethinkdb.json, b'r.json(json_string) -> value\n\nParse a JSON string on the server.\n\n*Example* Send an array to the server\'\n\n r.json("[1,2,3]").run(conn)\n\n'), - (rethinkdb.range, b'r.range() -> stream\nr.range([start_value, ]end_value) -> stream\n\nGenerate a stream of sequential integers in a specified range. `range` takes 0, 1 or 2 arguments:\n\n* With no arguments, `range` returns an "infinite" stream from 0 up to and including the maximum integer value;\n* With one argument, `range` returns a stream from 0 up to but not including the end value;\n* With two arguments, `range` returns a stream from the start value up to but not including the end value.\n\nNote that the left bound (including the implied left bound of 0 in the 0- and 1-argument form) is always closed and the right bound is always open: the start value will always be included in the returned range and the end value will *not* be included in the returned range.\n\nAny specified arguments must be integers, or a `RqlRuntimeError` will be thrown. If the start value is equal or to higher than the end value, no error will be thrown but a zero-element stream will be returned.\n\n*Example* Return a four-element range of `[0, 1, 2, 3]`.\n\n > r.range(4).run(conn)\n \n [0, 1, 2, 3]\n\nYou can also use the [limit](http://rethinkdb.com/api/python/limit) command with the no-argument variant to achieve the same result in this case:\n\n > r.range().limit(4).run(conn)\n \n [0, 1, 2, 3]\n\n*Example* Return a range from -5 through 5.\n\n > r.range(-5, 6).run(conn)\n \n [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]\n'), - (rethinkdb.ast.RqlQuery.to_json_string, b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n'), - (rethinkdb.ast.RqlQuery.to_json, b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n'), - (rethinkdb.ast.RqlQuery.type_of, b'any.type_of() -> string\n\nGets the type of a value.\n\n*Example* Get the type of a string.\n\n r.expr("foo").type_of().run(conn)\n\n'), - (rethinkdb.uuid, b'r.uuid() -> string\n\nReturn a UUID (universally unique identifier), a string that can be used as a unique ID.\n\n*Example* Generate a UUID.\n\n > r.uuid().run(conn)\n \n 27961a0e-f4e8-4eb3-bf95-c5203e1d87b9\n'), - (rethinkdb.net.Cursor.close, b'cursor.close()\n\nClose a cursor. Closing a cursor cancels the corresponding query and frees the memory\nassociated with the open request.\n\n*Example* Close a cursor.\n\n cursor.close()\n'), - (rethinkdb.net.Cursor.next, b"cursor.next([wait=True])\n\nGet the next element in the cursor.\n\nThe optional `wait` argument specifies whether to wait for the next available element and how long to wait:\n\n* `True`: Wait indefinitely (the default).\n* `False`: Do not wait at all. If data is immediately available, it will be returned; if it is not available, a `RqlDriverError` will be raised.\n* number: Wait up the specified number of seconds for data to be available before raising `RqlDriverError`.\n\nThe behavior of `next` will be identical with `False`, `None` or the number `0`.\n\nCalling `next` the first time on a cursor provides the first element of the cursor. If the data set is exhausted (e.g., you have retrieved all the documents in a table), a `StopIteration` error will be raised when `next` is called.\n\n*Example* Retrieve the next element.\n\n cursor = r.table('superheroes').run(conn)\n doc = cursor.next()\n\n*Example* Retrieve the next element on a [changefeed](http://rethinkdb.com/docs/changefeeds/python), waiting up to five seconds.\n\n cursor = r.table('superheroes').changes().run(conn)\n doc = cursor.next(wait=5)\n\n__Note:__ RethinkDB sequences can be iterated through via the Python [Iterable][it] interface. The canonical way to retrieve all the results is to use a [for...in](../each/) loop or [list()](../to_array/).\n\n[it]: https://docs.python.org/3.4/library/stdtypes.html#iterator-types\n"), - (rethinkdb.ast.RqlQuery.date, b'time.date() -> time\n\nReturn a new time object only based on the day, month and year (ie. the same day at 00:00).\n\n*Example* Retrieve all the users whose birthday is today\n\n r.table("users").filter(lambda user:\n user["birthdate"].date() == r.now().date()\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.day, b'time.day() -> number\n\nReturn the day of a time object as a number between 1 and 31.\n\n*Example* Return the users born on the 24th of any month.\n\n r.table("users").filter(\n r.row["birthdate"].day() == 24\n )\n\n'), - (rethinkdb.ast.RqlQuery.day_of_week, b'time.day_of_week() -> number\n\nReturn the day of week of a time object as a number between 1 and 7 (following ISO 8601 standard). For your convenience, the terms r.monday, r.tuesday etc. are defined and map to the appropriate integer.\n\n*Example* Return today\'s day of week.\n\n r.now().day_of_week().run(conn)\n\n*Example* Retrieve all the users who were born on a Tuesday.\n\n r.table("users").filter( lambda user:\n user["birthdate"].day_of_week().eq(r.tuesday)\n )\n\n'), - (rethinkdb.ast.RqlQuery.day_of_year, b'time.day_of_year() -> number\n\nReturn the day of the year of a time object as a number between 1 and 366 (following ISO 8601 standard).\n\n*Example* Retrieve all the users who were born the first day of a year.\n\n r.table("users").filter(\n r.row["birthdate"].day_of_year() == 1\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.during, b'time.during(start_time, end_time[, left_bound="closed", right_bound="open"])\n -> bool\n\nReturn whether a time is between two other times. By default, this is inclusive of the start time and exclusive of the end time. Set `left_bound` and `right_bound` to explicitly include (`closed`) or exclude (`open`) that endpoint of the range.\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (inclusive) and December 10th, 2013 (exclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"))\n ).run(conn)\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (exclusive) and December 10th, 2013 (inclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"), left_bound="open", right_bound="closed")\n ).run(conn)\n\n'), - (rethinkdb.epoch_time, b'r.epoch_time(epoch_time) -> time\n\nCreate a time object based on seconds since epoch. The first argument is a double and\nwill be rounded to three decimal places (millisecond-precision).\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986.\n\n r.table("user").get("John").update({"birthdate": r.epoch_time(531360000)}).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.hours, b'time.hours() -> number\n\nReturn the hour in a time object as a number between 0 and 23.\n\n*Example* Return all the posts submitted after midnight and before 4am.\n\n r.table("posts").filter(lambda post:\n post["date"].hours() < 4\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.in_timezone, b"time.in_timezone(timezone) -> time\n\nReturn a new time object with a different timezone. While the time stays the same, the results returned by methods such as hours() will change since they take the timezone into account. The timezone argument has to be of the ISO 8601 format.\n\n*Example* Hour of the day in San Francisco (UTC/GMT -8, without daylight saving time).\n\n r.now().in_timezone('-08:00').hours().run(conn)\n"), - (rethinkdb.iso8601, b'r.iso8601(iso8601Date[, default_timezone=\'\']) -> time\n\nCreate a time object based on an ISO 8601 date-time string (e.g. \'2013-01-01T01:01:01+00:00\'). We support all valid ISO 8601 formats except for week dates. If you pass an ISO 8601 date-time without a time zone, you must specify the time zone with the `default_timezone` argument. Read more about the ISO 8601 format at [Wikipedia](http://en.wikipedia.org/wiki/ISO_8601).\n\n*Example* Update the time of John\'s birth.\n\n r.table("user").get("John").update({"birth": r.iso8601(\'1986-11-03T08:30:00-07:00\')}).run(conn)\n'), - (rethinkdb.ast.RqlQuery.minutes, b'time.minutes() -> number\n\nReturn the minute in a time object as a number between 0 and 59.\n\n*Example* Return all the posts submitted during the first 10 minutes of every hour.\n\n r.table("posts").filter(lambda post:\n post["date"].minutes() < 10\n ).run(conn)\n'), - (rethinkdb.ast.RqlQuery.month, b'time.month() -> number\n\nReturn the month of a time object as a number between 1 and 12. For your convenience, the terms r.january, r.february etc. are defined and map to the appropriate integer.\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == 11\n )\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == r.november\n )\n\n'), - (rethinkdb.now, b'r.now() -> time\n\nReturn a time object representing the current time in UTC. The command now() is computed once when the server receives the query, so multiple instances of r.now() will always return the same time inside a query.\n\n*Example* Add a new user with the time at which he subscribed.\n\n r.table("users").insert({\n "name": "John",\n "subscription_date": r.now()\n }).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.seconds, b'time.seconds() -> number\n\nReturn the seconds in a time object as a number between 0 and 59.999 (double precision).\n\n*Example* Return the post submitted during the first 30 seconds of every minute.\n\n r.table("posts").filter(lambda post:\n post["date"].seconds() < 30\n ).run(conn)\n\n'), - (rethinkdb.time, b'r.time(year, month, day[, hour, minute, second], timezone)\n -> time\n\nCreate a time object for a specific time.\n\nA few restrictions exist on the arguments:\n\n- `year` is an integer between 1400 and 9,999.\n- `month` is an integer between 1 and 12.\n- `day` is an integer between 1 and 31.\n- `hour` is an integer.\n- `minutes` is an integer.\n- `seconds` is a double. Its value will be rounded to three decimal places\n(millisecond-precision).\n- `timezone` can be `\'Z\'` (for UTC) or a string with the format `\xc2\xb1[hh]:[mm]`.\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986 UTC.\n\n r.table("user").get("John").update({"birthdate": r.time(1986, 11, 3, \'Z\')}).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.time_of_day, b'time.time_of_day() -> number\n\nReturn the number of seconds elapsed since the beginning of the day stored in the time object.\n\n*Example* Retrieve posts that were submitted before noon.\n\n r.table("posts").filter(\n r.row["date"].time_of_day() <= 12*60*60\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.timezone, b'time.timezone() -> string\n\nReturn the timezone of the time object.\n\n*Example* Return all the users in the "-07:00" timezone.\n\n r.table("users").filter(lambda user:\n user["subscriptionDate"].timezone() == "-07:00"\n )\n\n'), - (rethinkdb.ast.RqlQuery.to_epoch_time, b'time.to_epoch_time() -> number\n\nConvert a time object to its epoch time.\n\n*Example* Return the current time in seconds since the Unix Epoch with millisecond-precision.\n\n r.now().to_epoch_time()\n\n'), - (rethinkdb.ast.RqlQuery.to_iso8601, b'time.to_iso8601() -> string\n\nConvert a time object to a string in ISO 8601 format.\n\n*Example* Return the current ISO 8601 time.\n\n > r.now().to_iso8601().run(conn)\n \n "2015-04-20T18:37:52.690+00:00"\n\n'), - (rethinkdb.ast.RqlQuery.year, b'time.year() -> number\n\nReturn the year of a time object.\n\n*Example* Retrieve all the users born in 1986.\n\n r.table("users").filter(lambda user:\n user["birthdate"].year() == 1986\n ).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.append, b"array.append(value) -> array\n\nAppend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].append('newBoots').run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.__getitem__, b"sequence[attr] -> sequence\nsingleSelection[attr] -> value\nobject[attr] -> value\narray[index] -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every object in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan')['firstAppearance'].run(conn)\n\nThe `[]` command also accepts integer arguments as array offsets, like the [nth](http://rethinkdb.com/api/python/nth) command.\n\n*Example* Get the fourth element in a sequence. (The first element is position `0`, so the fourth element is position `3`.)\n\n r.expr([10, 20, 30, 40, 50])[3]\n \n 40\n"), - (rethinkdb.ast.RqlQuery.change_at, b'array.change_at(index, value) -> array\n\nChange a value in an array at a given index. Returns the modified array.\n\n*Example* Bruce Banner hulks out.\n\n r.expr(["Iron Man", "Bruce", "Spider-Man"]).change_at(1, "Hulk").run(conn)\n'), - (rethinkdb.ast.RqlQuery.delete_at, b"array.delete_at(index [,endIndex]) -> array\n\nRemove one or more elements from an array at a given index. Returns the modified array. (Note: `delete_at` operates on arrays, not documents; to delete documents, see the [delete](http://rethinkdb.com/api/python/delete) command.)\n\nIf only `index` is specified, `delete_at` removes the element at that index. If both `index` and `end_index` are specified, `delete_at` removes the range of elements between `index` and `end_index`, inclusive of `index` but not inclusive of `end_index`.\n\nIf `end_index` is specified, it must not be less than `index`. Both `index` and `end_index` must be within the array's bounds (i.e., if the array has 10 elements, an `index` or `end_index` of 10 or higher is invalid).\n\nBy using a negative `index` you can delete from the end of the array. `-1` is the last element in the array, `-2` is the second-to-last element, and so on. You may specify a negative `end_index`, although just as with a positive value, this will not be inclusive. The range `(2,-1)` specifies the third element through the next-to-last element.\n\n*Example* Delete the second element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1).run(conn)\n \n ['a', 'c', 'd', 'e', 'f']\n\n*Example* Delete the second and third elements of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1,3).run(conn)\n \n ['a', 'd', 'e', 'f']\n\n*Example* Delete the next-to-last element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(-2).run(conn)\n \n ['a', 'b', 'c', 'd', 'f']\n\n*Example* Delete a comment on a post.\n\nGiven a post document such as:\n\n{\n id: '4cf47834-b6f9-438f-9dec-74087e84eb63',\n title: 'Post title',\n author: 'Bob',\n comments: [\n { author: 'Agatha', text: 'Comment 1' },\n { author: 'Fred', text: 'Comment 2' }\n ]\n}\n\nThe second comment can be deleted by using `update` and `delete_at` together.\n\n r.table('posts').get('4cf47834-b6f9-438f-9dec-74087e84eb63').update(\n lambda post: { 'comments': post['comments'].delete_at(1) }\n ).run(conn)\n"), - (rethinkdb.ast.RqlQuery.difference, b"array.difference(array) -> array\n\nRemove the elements of one array from another array.\n\n*Example* Retrieve Iron Man's equipment list without boots.\n\n r.table('marvel').get('IronMan')['equipment'].difference(['Boots']).run(conn)\n\n*Example* Remove Iron Man's boots from his equipment.\n\n r.table('marvel').get('IronMan')[:equipment].update(lambda doc:\n {'equipment': doc['equipment'].difference(['Boots'])}\n ).run(conn)\n"), - (rethinkdb.ast.RqlQuery.get_field, b"sequence.get_field(attr) -> sequence\nsingleSelection.get_field(attr) -> value\nobject.get_field(attr) -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every\nobject in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan').get_field('firstAppearance').run(conn)\n"), - (rethinkdb.ast.RqlQuery.has_fields, b'sequence.has_fields([selector1, selector2...]) -> stream\narray.has_fields([selector1, selector2...]) -> array\nobject.has_fields([selector1, selector2...]) -> boolean\n\nTest if an object has one or more fields. An object has a field if it has that key and the key has a non-null value. For instance, the object `{\'a\': 1,\'b\': 2,\'c\': null}` has the fields `a` and `b`.\n\nWhen applied to a single object, `has_fields` returns `true` if the object has the fields and `false` if it does not. When applied to a sequence, it will return a new sequence (an array or stream) containing the elements that have the specified fields.\n\n*Example* Return the players who have won games.\n\n r.table(\'players\').has_fields(\'games_won\').run(conn)\n\n*Example* Return the players who have *not* won games. To do this, use `has_fields` with [not](http://rethinkdb.com/api/python/not), wrapped with [filter](http://rethinkdb.com/api/python/filter).\n\n r.table(\'players\').filter(~r.row.has_fields(\'games_won\')).run(conn)\n\n*Example* Test if a specific player has won any games.\n\n r.table(\'players\').get(\n \'b5ec9714-837e-400c-aa74-dbd35c9a7c4c\').has_fields(\'games_won\').run(conn)\n\n**Nested Fields**\n\n`has_fields` lets you test for nested fields in objects. If the value of a field is itself a set of key/value pairs, you can test for the presence of specific keys.\n\n*Example* In the `players` table, the `games_won` field contains one or more fields for kinds of games won:\n\n {\n \'games_won\': {\n \'playoffs\': 2,\n \'championships\': 1\n }\n }\n\nReturn players who have the "championships" field.\n\n r.table(\'players\').has_fields({\'games_won\': {\'championships\': true}}).run(conn)\n\nNote that `true` in the example above is testing for the existence of `championships` as a field, not testing to see if the value of the `championships` field is set to `true`. There\'s a more convenient shorthand form available. (See [pluck](http://rethinkdb.com/api/python/pluck) for more details on this.)\n\n r.table(\'players\').has_fields({\'games_won\': \'championships\'}).run(conn)\n'), - (rethinkdb.ast.RqlQuery.insert_at, b'array.insert_at(index, value) -> array\n\nInsert a value in to an array at a given index. Returns the modified array.\n\n*Example* Hulk decides to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).insert_at(1, "Hulk").run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.keys, b"singleSelection.keys() -> array\nobject.keys() -> array\n\nReturn an array containing all of the object's keys.\n\n*Example* Get all the keys of a row.\n\n r.table('marvel').get('ironman').keys().run(conn)\n\n"), - (rethinkdb.literal, b'r.literal(object) -> special\n\nReplace an object in a field instead of merging it with an existing object in a `merge` or `update` operation. = Using `literal` with no arguments in a `merge` or `update` operation will remove the corresponding field.\n\n*Example* Replace one nested document with another rather than merging the fields.\n\nAssume your users table has this structure:\n\n [\n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 18,\n "city": "Dallas"\n }\n } \n ...\n ]\n\nUsing `update` to modify the `data` field will normally merge the nested documents:\n\n r.table(\'users\').get(1).update({ \'data\': { \'age\': 19, \'job\': \'Engineer\' } }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "city": "Dallas",\n "job": "Engineer"\n }\n } \n\nThat will preserve `city` and other existing fields. But to replace the entire `data` document with a new object, use `literal`:\n\n r.table(\'users\').get(1).update({ \'data\': r.literal({ \'age\': 19, \'job\': \'Engineer\' }) }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "job": "Engineer"\n }\n } \n\n*Example* Use `literal` to remove a field from a document.\n\n r.table(\'users\').get(1).merge({ "data": r.literal() }).run(conn)\n \n {\n "id": 1,\n "name": "Alice"\n }\n'), - (rethinkdb.ast.RqlQuery.merge, b'singleSelection.merge(object|function[, object|function, ...]) -> object\nobject.merge(object|function[, object|function, ...]) -> object\nsequence.merge(object|function[, object|function, ...]) -> stream\narray.merge(object|function[, object|function, ...]) -> array\n\nMerge two or more objects together to construct a new object with properties from all. When there is a conflict between field names, preference is given to fields in the rightmost object in the argument list `merge` also accepts a subquery function that returns an object, which will be used similarly to a [map](http://rethinkdb.com/api/python/map/) function.\n\n*Example* Equip Thor for battle.\n\n r.table(\'marvel\').get(\'thor\').merge(\n r.table(\'equipment\').get(\'hammer\'),\n r.table(\'equipment\').get(\'pimento_sandwich\')\n ).run(conn)\n\n*Example* Equip every hero for battle, using a subquery function to retrieve their weapons.\n\n r.table(\'marvel\').merge(lambda hero:\n { \'weapons\': r.table(\'weapons\').get(hero[\'weapon_id\']) }\n ).run(conn)\n\n*Example* Use `merge` to join each blog post with its comments.\n\nNote that the sequence being merged—in this example, the comments—must be coerced from a selection to an array. Without `coerce_to` the operation will throw an error ("Expected type DATUM but found SELECTION").\n\n r.table(\'posts\').merge(lambda post:\n { \'comments\': r.table(\'comments\').get_all(post[\'id\'],\n index=\'post_id\').coerce_to(\'array\') }\n ).run(conn)\n\n*Example* Merge can be used recursively to modify object within objects.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10}}}\n ).run(conn)\n\n*Example* To replace a nested object with another object you can use the literal keyword.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : r.literal({\'repulsor rays\' : {\'dmg\' : 3, \'cooldown\' : 0}})}\n ).run(conn)\n\n*Example* Literal can be used to remove keys from an object as well.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : {\'spectacular graviton beam\' : r.literal()}}\n ).run(conn)\n\n'), - (rethinkdb.object, b'r.object([key, value,]...) -> object\n\nCreates an object from a list of key-value pairs, where the keys must\nbe strings. `r.object(A, B, C, D)` is equivalent to\n`r.expr([[A, B], [C, D]]).coerce_to(\'OBJECT\')`.\n\n*Example* Create a simple object.\n\n > r.object(\'id\', 5, \'data\', [\'foo\', \'bar\']).run(conn)\n {\'data\': ["foo", "bar"], \'id\': 5}\n'), - (rethinkdb.ast.RqlQuery.pluck, b"sequence.pluck([selector1, selector2...]) -> stream\narray.pluck([selector1, selector2...]) -> array\nobject.pluck([selector1, selector2...]) -> object\nsingleSelection.pluck([selector1, selector2...]) -> object\n\nPlucks out one or more attributes from either an object or a sequence of objects\n(projection).\n\n*Example* We just need information about IronMan's reactor and not the rest of the\ndocument.\n\n r.table('marvel').get('IronMan').pluck('reactorState', 'reactorPower').run(conn)\n\n*Example* For the hero beauty contest we only care about certain qualities.\n\n r.table('marvel').pluck('beauty', 'muscleTone', 'charm').run(conn)\n\n*Example* Pluck can also be used on nested objects.\n\n r.table('marvel').pluck({'abilities' : {'damage' : True, 'mana_cost' : True}, 'weapons' : True}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand\nfor it.\n\n r.table('marvel').pluck({'abilities' : ['damage', 'mana_cost']}, 'weapons').run(conn)\n\nFor more information read the [nested field documentation](http://rethinkdb.com/docs/nested-fields/).\n"), - (rethinkdb.ast.RqlQuery.prepend, b"array.prepend(value) -> array\n\nPrepend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].prepend('newBoots').run(conn)\n"), - (rethinkdb.row, b"r.row -> value\n\nReturns the currently visited document. Note that `row` does not work within subqueries to access nested documents; you should use anonymous functions to access those documents instead. (See the last example.)\n\n*Example* Get all users whose age is greater than 5.\n\n r.table('users').filter(r.row['age'] > 5).run(conn)\n\n*Example* Access the attribute 'child' of an embedded document.\n\n r.table('users').filter(r.row['embedded_doc']['child'] > 5).run(conn)\n\n*Example* Add 1 to every element of an array.\n\n r.expr([1, 2, 3]).map(r.row + 1).run(conn)\n\n*Example* For nested queries, use functions instead of `row`.\n\n r.table('users').filter(\n lambda doc: doc['name'] == r.table('prizes').get('winner')\n ).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.set_difference, b"array.set_difference(array) -> array\n\nRemove the elements of one array from another and return them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has, excluding a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_difference(['newBoots', 'arc_reactor']).run(conn)\n"), - (rethinkdb.ast.RqlQuery.set_insert, b"array.set_insert(value) -> array\n\nAdd a value to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].set_insert('newBoots').run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.set_intersection, b"array.set_intersection(array) -> array\n\nIntersect two arrays returning values that occur in both of them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has from a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_intersection(['newBoots', 'arc_reactor']).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.set_union, b"array.set_union(array) -> array\n\nAdd a several values to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots and an arc reactor.\n\n r.table('marvel').get('IronMan')['equipment'].set_union(['newBoots', 'arc_reactor']).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.splice_at, b'array.splice_at(index, array) -> array\n\nInsert several values in to an array at a given index. Returns the modified array.\n\n*Example* Hulk and Thor decide to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).splice_at(1, ["Hulk", "Thor"]).run(conn)\n'), - (rethinkdb.ast.RqlQuery.without, b"sequence.without([selector1, selector2...]) -> stream\narray.without([selector1, selector2...]) -> array\nsingleSelection.without([selector1, selector2...]) -> object\nobject.without([selector1, selector2...]) -> object\n\nThe opposite of pluck; takes an object or a sequence of objects, and returns them with\nthe specified paths removed.\n\n*Example* Since we don't need it for this computation we'll save bandwidth and leave\nout the list of IronMan's romantic conquests.\n\n r.table('marvel').get('IronMan').without('personalVictoriesList').run(conn)\n\n*Example* Without their prized weapons, our enemies will quickly be vanquished.\n\n r.table('enemies').without('weapons').run(conn)\n\n*Example* Nested objects can be used to remove the damage subfield from the weapons and abilities fields.\n\n r.table('marvel').without({'weapons' : {'damage' : True}, 'abilities' : {'damage' : True}}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand for it.\n\n r.table('marvel').without({'weapons' : 'damage', 'abilities' : 'damage'}).run(conn)\n\n"), - (rethinkdb.circle, b"r.circle([longitude, latitude], radius[, num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\nr.circle(point, radius[, {num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\n\nConstruct a circular line or polygon. A circle in RethinkDB is a polygon or line *approximating* a circle of a given radius around a given center, consisting of a specified number of vertices (default 32).\n\nThe center may be specified either by two floating point numbers, the latitude (−90 to 90) and longitude (−180 to 180) of the point on a perfect sphere (see [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system), or by a point object. The radius is a floating point number whose units are meters by default, although that may be changed with the `unit` argument.\n\nOptional arguments available with `circle` are:\n\n* `num_vertices`: the number of vertices in the polygon or line. Defaults to 32.\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit for the radius distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `fill`: if `True` (the default) the circle is filled, creating a polygon; if `False` the circle is unfilled (creating a line).\n\n*Example* Define a circle.\n\n r.table('geo').insert({\n 'id': 300,\n 'name': 'Hayes Valley',\n 'neighborhood': r.circle([-122.423246,37.779388], 1000)\n }).run(conn)\n"), - (rethinkdb.ast.RqlQuery.distance, b"geometry.distance(geometry[, geo_system='WGS84', unit='m']) -> number\n\nCompute the distance between a point and another geometry object. At least one of the geometry objects specified must be a point.\n\nOptional arguments available with `distance` are:\n\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit to return the distance in. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n\nIf one of the objects is a polygon or a line, the point will be projected onto the line or polygon assuming a perfect sphere model before the distance is computed (using the model specified with `geo_system`). As a consequence, if the polygon or line is extremely large compared to Earth's radius and the distance is being computed with the default WGS84 model, the results of `distance` should be considered approximate due to the deviation between the ellipsoid and spherical models.\n\n*Example* Compute the distance between two points on the Earth in kilometers.\n\n > point1 = r.point(-122.423246,37.779388)\n > point2 = r.point(-117.220406,32.719464)\n > r.distance(point1, point2, unit='km').run(conn)\n \n 734.1252496021841\n"), - (rethinkdb.ast.RqlQuery.fill, b"line.fill() -> polygon\n\nConvert a Line object into a Polygon object. If the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Create a line object and then convert it to a polygon.\n\n r.table('geo').insert({\n 'id': 201,\n 'rectangle': r.line(\n [-122.423246,37.779388],\n [-122.423246,37.329898],\n [-121.886420,37.329898],\n [-121.886420,37.779388]\n )\n }).run(conn)\n \n r.table('geo').get(201).update({\n 'rectangle': r.row('rectangle').fill()\n }).run(conn)\n"), - (rethinkdb.geojson, b"r.geojson(geojson) -> geometry\n\nConvert a [GeoJSON][] object to a ReQL geometry object.\n\n[GeoJSON]: http://geojson.org\n\nRethinkDB only allows conversion of GeoJSON objects which have ReQL equivalents: Point, LineString, and Polygon. MultiPoint, MultiLineString, and MultiPolygon are not supported. (You could, however, store multiple points, lines and polygons in an array and use a geospatial multi index with them.)\n\nOnly longitude/latitude coordinates are supported. GeoJSON objects that use Cartesian coordinates, specify an altitude, or specify their own coordinate reference system will be rejected.\n\n*Example* Convert a GeoJSON object to a ReQL geometry object.\n\n geo_json = {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n r.table('geo').insert({\n 'id': 'sfo',\n 'name': 'San Francisco',\n 'location': r.geojson(geo_json)\n }).run(conn)\n"), - (rethinkdb.ast.Table.get_intersecting, b"table.get_intersecting(geometry, index='indexname') -> selection<stream>\n\nGet all documents where the given geometry object intersects the geometry object of the requested geospatial index.\n\nThe `index` argument is mandatory. This command returns the same results as `table.filter(r.row('index').intersects(geometry))`. The total number of results is limited to the array size limit which defaults to 100,000, but can be changed with the `array_limit` option to [run](http://rethinkdb.com/api/python/run).\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406,32.719464], 10, unit='mi')\n r.table('parks').get_intersecting(circle1, index='area').run(conn)\n"), - (rethinkdb.ast.Table.get_nearest, b"table.get_nearest(point, index='indexname'[, max_results=100, max_dist=100000, unit='m', geo_system='WGS84']) -> array\n\nGet all documents where the specified geospatial index is within a certain distance of the specified point (default 100 kilometers).\n\nThe `index` argument is mandatory. Optional arguments are:\n\n* `max_results`: the maximum number of results to return (default 100).\n* `unit`: Unit for the distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `max_dist`: the maximum distance from an object to the specified point (default 100 km).\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n\nThe return value will be an array of two-item objects with the keys `dist` and `doc`, set to the distance between the specified point and the document (in the units specified with `unit`, defaulting to meters) and the document itself, respectively.\n\n*Example* Return a list of enemy hideouts within 5000 meters of the secret base.\n\n secret_base = r.point(-122.422876,37.777128)\n r.table('hideouts').get_nearest(secret_base, index='location',\n max_dist=5000).run(conn)\n"), - (rethinkdb.ast.RqlQuery.includes, b"sequence.includes(geometry) -> sequence\ngeometry.includes(geometry) -> bool\n\nTests whether a geometry object is completely contained within another. When applied to a sequence of geometry objects, `includes` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that include the argument.\n\n*Example* Is `point2` included within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406,32.719464)\n > point2 = r.point(-117.206201,32.725186)\n > r.circle(point1, 2000).includes(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks include `circle1`?\n\n circle1 = r.circle([-117.220406,32.719464], 10, unit='mi')\n r.table('parks')['area'].includes(circle1).run(conn)\n"), - (rethinkdb.ast.RqlQuery.intersects, b"sequence.intersects(geometry) -> sequence\ngeometry.intersects(geometry) -> bool\n\nTests whether two geometry objects intersect with one another. When applied to a sequence of geometry objects, `intersects` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that intersect with the argument.\n\n*Example* Is `point2` within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406,32.719464)\n > point2 = r.point(-117.206201,32.725186)\n > r.circle(point1, 2000).intersects(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406,32.719464], 10, unit='mi')\n r.table('parks')('area').intersects(circle1).run(conn)\n"), - (rethinkdb.line, b"r.line([lon1, lat1], [lon2, lat2], ...) -> line\nr.line(point1, point2, ...) -> line\n\nConstruct a geometry object of type Line. The line can be specified in one of two ways:\n\n* Two or more two-item arrays, specifying latitude and longitude numbers of the line's vertices;\n* Two or more [Point](http://rethinkdb.com/api/python/point) objects specifying the line's vertices.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a line.\n\n r.table('geo').insert({\n 'id': 101,\n 'route': r.line([-122.423246,37.779388], [-121.886420,37.329898])\n }).run(conn)\n"), - (rethinkdb.point, b"r.point(longitude, latitude) -> point\n\nConstruct a geometry object of type Point. The point is specified by two floating point numbers, the longitude (−180 to 180) and latitude (−90 to 90) of the point on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a point.\n\n r.table('geo').insert({\n 'id': 1,\n 'name': 'San Francisco',\n 'location': r.point(-122.423246,37.779388)\n }).run(conn)\n"), - (rethinkdb.polygon, b"r.polygon([lon1, lat1], [lon2, lat2], ...) -> polygon\nr.polygon(point1, point2, ...) -> polygon\n\nConstruct a geometry object of type Polygon. The Polygon can be specified in one of two ways:\n\n* Three or more two-item arrays, specifying latitude and longitude numbers of the polygon's vertices;\n* Three or more [Point](http://rethinkdb.com/api/python/point) objects specifying the polygon's vertices.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Define a polygon.\n\n r.table('geo').insert({\n 'id': 101,\n 'rectangle': r.polygon(\n [-122.423246,37.779388],\n [-122.423246,37.329898],\n [-121.886420,37.329898],\n [-121.886420,37.779388]\n )\n }).run(conn)\n"), - (rethinkdb.ast.RqlQuery.polygon_sub, b'polygon1.polygon_sub(polygon2) -> polygon\n\nUse `polygon2` to "punch out" a hole in `polygon1`. `polygon2` must be completely contained within `polygon1` and must have no holes itself (it must not be the output of `polygon_sub` itself).\n\n*Example* Define a polygon with a hole punched in it.\n\n outer_polygon = r.polygon(\n [-122.4,37.7],\n [-122.4,37.3],\n [-121.8,37.3],\n [-121.8,37.7]\n )\n inner_polygon = r.polygon(\n [-122.3,37.4],\n [-122.3,37.6],\n [-122.0,37.6],\n [-122.0,37.4]\n )\n outer_polygon.polygon_sub(inner_polygon).run(conn)\n'), - (rethinkdb.ast.RqlQuery.to_geojson, b"geometry.to_geojson() -> object\n\nConvert a ReQL geometry object to a [GeoJSON][] object.\n\n[GeoJSON]: http://geojson.org\n\n*Example* Convert a ReQL geometry object to a GeoJSON object.\n\n > r.table(geo).get('sfo')['location'].to_geojson().run(conn)\n \n {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n"), - (rethinkdb.ast.RqlQuery.eq_join, b'sequence.eq_join(left_field, right_table[, index=\'id\']) -> sequence\n\nJoin tables using a field on the left-hand sequence matching primary keys or secondary indexes on the right-hand table. `eq_join` is more efficient than other Re_qL join types, and operates much faster. Documents in the result set consist of pairs of left-hand and right-hand documents, matched when the field on the left-hand side exists and is non-null and an entry with that field\'s value exists in the specified index on the right-hand side.\n\nThe result set of `eq_join` is a stream or array of objects. Each object in the returned set will be an object of the form `{ left: <left-document>, right: <right-document> }`, where the values of `left` and `right` will be the joined documents. Use the <code><a href="/api/python/zip/">zip</a></code> command to merge the `left` and `right` fields together.\n\n**Example:** Match players with the games they\'ve played against one another.\n\nThe players table contains these documents:\n\n [\n { \'id\': 1, \'player\': \'George\', \'gameId\': 1 },\n { \'id\': 2, \'player\': \'Agatha\', \'gameId\': 3 },\n { \'id\': 3, \'player\': \'Fred\', \'gameId\': 2 },\n { \'id\': 4, \'player\': \'Marie\', \'gameId\': 2 },\n { \'id\': 5, \'player\': \'Earnest\', \'gameId\': 1 },\n { \'id\': 6, \'player\': \'Beth\', \'gameId\': 3 }\n ]\n\nThe games table contains these documents:\n\n [\n { \'id\': 1, \'field\': \'Little Delving\' },\n { \'id\': 2, \'field\': \'Rushock Bog\' },\n { \'id\': 3, \'field\': \'Bucklebury\' }\n ]\n\nJoin these tables using `game_id` on the player table and `id` on the games table:\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).run(conn)\n\nThis will return a result set such as the following:\n\n [\n {\n "left" : { "gameId" : 3, "id" : 2, "player" : "Agatha" },\n "right" : { "id" : 3, "field" : "Bucklebury" }\n },\n {\n "left" : { "gameId" : 2, "id" : 3, "player" : "Fred" },\n "right" : { "id" : 2, "field" : "Rushock Bog" }\n },\n ...\n ]\n\nWhat you likely want is the result of using `zip` with that. For clarity, we\'ll use `without` to drop the `id` field from the games table (it conflicts with the `id` field for the players and it\'s redundant anyway), and we\'ll order it by the games.\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).without({\'right\': "id"}).zip().order_by(\'game_id\').run(conn)\n \n [\n { "field": "Little Delving", "gameId": 1, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "gameId": 1, "id": 1, "player": "George" },\n { "field": "Rushock Bog", "gameId": 2, "id": 3, "player": "Fred" },\n { "field": "Rushock Bog", "gameId": 2, "id": 4, "player": "Marie" },\n { "field": "Bucklebury", "gameId": 3, "id": 6, "player": "Beth" },\n { "field": "Bucklebury", "gameId": 3, "id": 2, "player": "Agatha" }\n ]\n\nFor more information, see [Table joins in Rethink_dB](http://rethinkdb.com/docs/table-joins/).\n\n**Example:** Use a secondary index on the right table rather than the primary key. If players have a secondary index on their cities, we can get a list of arenas with players in the same area.\n\n r.table(\'arenas\').eq_join(\'city_id\', r.table(\'arenas\'), index=\'city_id\').run(conn)\n\n**Example:** Use a nested key as the join field. Suppose the documents in the players table were structured like this:\n\n { \'id\': 1, \'player\': \'George\', \'game\': {\'id\': 1} },\n { \'id\': 2, \'player\': \'Agatha\', \'game\': {\'id\': 3} },\n ...\n\nSimply specify the field using the `row` command instead of a string.\n\n r.table(\'players\').eq_join(r.row[\'game\'][\'id\'], r.table(\'games\')).without({\'right\': \'id\'}).zip().run(conn)\n \n [\n { "field": "Little Delving", "game": { "id": 1 }, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "game": { "id": 1 }, "id": 1, "player": "George" },\n ...\n ]\n\n**Example:** Use a function instead of a field to join on a more complicated expression. Suppose the players have lists of favorite games ranked in order in a field such as `"favorites": [3, 2, 1]`. Get a list of players and their top favorite:\n\n r.table(\'players3\').eq_join(\n lambda player: player[\'favorites\'].nth(0),\n r.table(\'games\')\n ).without([{\'left\': [\'favorites\', \'game_id\', \'id\']}, {\'right\': \'id\'}]).zip()\n\nResult:\n\n [\n \t{ "field": "Rushock Bog", "name": "Fred" },\n \t{ "field": "Little Delving", "name": "George" },\n \t...\n ]\n'), - (rethinkdb.ast.RqlQuery.inner_join, b"sequence.inner_join(other_sequence, predicate) -> stream\narray.inner_join(other_sequence, predicate) -> array\n\nReturns an inner join of two sequences. The returned sequence represents an intersection of the left-hand sequence and the right-hand sequence: each row of the left-hand sequence will be compared with each row of the right-hand sequence to find all pairs of rows which satisfy the predicate. Each matched pair of rows of both sequences are combined into a result row. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\nNote that `inner_join` is slower and much less efficient than using [eq_join](http://rethinkdb.com/api/python/eq_join/) or [concat_map](http://rethinkdb.com/api/python/concat_map/) with [get_all](http://rethinkdb.com/api/python/get_all/). You should avoid using `inner_join` in commands when possible.\n\n*Example* Return a list of all matchups between Marvel and DC heroes in which the DC hero could beat the Marvel hero in a fight.\n\n r.table('marvel').inner_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n(Compare this to an [outer_join](http://rethinkdb.com/api/python/outer_join) with the same inputs and predicate, which would return a list of *all* Marvel heroes along with any DC heroes with a higher strength.)"), - (rethinkdb.ast.RqlQuery.outer_join, b"sequence.outer_join(other_sequence, predicate) -> stream\narray.outer_join(other_sequence, predicate) -> array\n\nReturns a left outer join of two sequences. The returned sequence represents a union of the left-hand sequence and the right-hand sequence: all documents in the left-hand sequence will be returned, each matched with a document in the right-hand sequence if one satisfies the predicate condition. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\nNote that `outer_join` is slower and much less efficient than using [concat_map](http://rethinkdb.com/api/python/concat_map/) with [get_all](http://rethinkdb.com/api/python/get_all). You should avoid using `outer_join` in commands when possible.\n\n*Example* Return a list of all Marvel heroes, paired with any DC heroes who could beat them in a fight.\n\n r.table('marvel').outer_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n(Compare this to an [inner_join](http://rethinkdb.com/api/python/inner_join) with the same inputs and predicate, which would return a list only of the matchups in which the DC hero has the higher strength.)\n"), - (rethinkdb.ast.RqlQuery.zip, b"stream.zip() -> stream\narray.zip() -> array\n\nUsed to 'zip' up the result of a join by merging the 'right' fields into 'left' fields of each member of the sequence.\n\n*Example* 'zips up' the sequence by merging the left and right fields produced by a join.\n\n r.table('marvel').eq_join('main_dc_collaborator', r.table('dc')).zip().run(conn)\n"), - (rethinkdb.db_create, b'r.db_create(db_name) -> object\n\nCreate a database. A RethinkDB database is a collection of tables, similar to\nrelational databases.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_created`: always `1`.\n* `config_changes`: a list containing one object with two fields, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the database\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a database with the same name already exists, the command throws `RqlRuntimeError`.\n\nNote: Only alphanumeric characters and underscores are valid for the database name.\n\n*Example* Create a database named \'superheroes\'.\n\n r.db_create(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "old_val": None\n }\n ],\n "dbs_created": 1\n }\n\n'), - (rethinkdb.db_drop, b'r.db_drop(db_name) -> object\n\nDrop a database. The database, all its tables, and corresponding data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_dropped`: always `1`.\n* `tables_dropped`: the number of tables in the dropped database.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the database\'s original [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given database does not exist, the command throws `RqlRuntimeError`.\n\n*Example* Drop a database named \'superheroes\'.\n\n r.db_drop(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 3,\n "dbs_dropped": 1\n }\n\n'), - (rethinkdb.db_list, b'r.db_list() -> array\n\nList all database names in the system. The result is a list of strings.\n\n*Example* List all databases.\n\n r.db_list().run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.changes, b'stream.changes(squash=True, include_states=False) -> stream\nsingleSelection.changes(squash=True, include_states=False) -> stream\n\nReturn an infinite stream of objects representing changes to a query.\n\nThe `squash` optional argument controls how `changes` batches change notifications:\n\n* `True`: When multiple changes to the same document occur before a batch of notifications is sent, the changes are "squashed" into one change. The client receives a notification that will bring it fully up to date with the server. This is the default.\n* `False`: All changes will be sent to the client verbatim.\n* `n`: A numeric value (floating point). Similar to `True`, but the server will wait `n` seconds to respond in order to squash as many changes together as possible, reducing network traffic.\n\nIf the `include_states` optional argument is `True`, the changefeed stream will include special status documents consisting of the field `state` and a string indicating a change in the feed\'s state. These documents can occur at any point in the feed between the notification documents described below. There are currently two states:\n\n* `{"state": "initializing"}` indicates the following documents represent initial values on the feed rather than changes. This will be the first document of a feed that returns initial values.\n* `{"state": "ready"}` indicates the following documents represent changes. This will be the first document of a feed that does *not* return initial values; otherwise, it will indicate the initial values have all been sent.\n\nPoint changefeeds will always return initial values and have an `initializing` state; feeds that return changes on unfiltered tables will never return initial values. Feeds that return changes on more complex queries may or may not return return initial values, depending on the kind of aggregation. Read the article on [Changefeeds in RethinkDB][cfr] for a more detailed discussion. If `include_states` is `True` on a changefeed that does not return initial values, the first document on the feed will be `{"state": "ready"}`.\n\n[cfr]: /docs/changefeeds/python/\n\nIf `include_states` is `False` (the default), the status documents will not be sent on the feed.\n\nIf the table becomes unavailable, the changefeed will be disconnected, and a runtime exception will be thrown by the driver.\n\nChangefeed notifications take the form of a two-field object:\n\n {\n "old_val": <document before change>,\n "new_val": <document after change>\n }\n\nThe first notification object in the changefeed stream will contain the query\'s initial value in `new_val` and have no `old_val` field. When a document is deleted, `new_val` will be `None`; when a document is inserted, `old_val` will be `None`.\n\nCertain document transformation commands can be chained before changefeeds. For more information, read the [discussion of changefeeds][cfr] in the "Query language" documentation.\n\nThe server will buffer up to 100,000 elements. If the buffer limit is hit, early changes will be discarded, and the client will receive an object of the form `{"error": "Changefeed cache over array size limit, skipped X elements."}` where `X` is the number of elements skipped.\n\nCommands that operate on streams (such as `filter` or `map`) can usually be chained after `changes`. However, since the stream produced by `changes` has no ending, commands that need to consume the entire stream before returning (such as `reduce` or `count`) cannot.\n\nIt\'s a good idea to open changefeeds on their own connection. If you don\'t, other queries run on the same connection will experience unpredictable latency spikes while the connection blocks on more changes.\n\n*Example* Subscribe to the changes on a table.\n\nStart monitoring the changefeed in one client:\n\n for change in r.table(\'games\').changes().run(conn):\n print change\n\nAs these queries are performed in a second client, the first client would receive and print the following objects:\n\n > r.table(\'games\').insert({\'id\': 1}).run(conn)\n {\'old_val\': None, \'new_val\': {\'id\': 1}}\n \n > r.table(\'games\').get(1).update({\'player1\': \'Bob\'}).run(conn)\n {\'old_val\': {\'id\': 1}, \'new_val\': {\'id\': 1, \'player1\': \'Bob\'}}\n \n > r.table(\'games\').get(1).replace({\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}).run(conn)\n {\'old_val\': {\'id\': 1, \'player1\': \'Bob\'},\n \'new_val\': {\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}}\n \n > r.table(\'games\').get(1).delete().run(conn)\n {\'old_val\': {\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}, \'new_val\': None}\n \n > r.table_drop(\'games\').run(conn)\n RqlRuntimeError: Changefeed aborted (table unavailable)\n\n*Example* Return all the changes that increase a player\'s score.\n\n r.table(\'test\').changes().filter(\n r.row[\'new_val\'][\'score\'] > r.row[\'old_val\'][\'score\']\n ).run(conn)\n\n*Example* Return all the changes to Bob\'s score.\n\n # Note that this will have to look at and discard all the changes to\n # rows besides Bob\'s. This is currently no way to filter with an index\n # on changefeeds.\n r.table(\'test\').changes().filter(r.row[\'new_val\'][\'name\'].eq(\'Bob\')).run(conn)\n\n*Example* Return all the inserts on a table.\n\n r.table(\'test\').changes().filter(r.row[\'old_val\'].eq(None)).run(conn)\n\n*Example* Return all the changes to game 1, with state notifications.\n\n r.table(\'games\').get(1).changes(include_states=True).run(conn)\n \n # result returned on changefeed\n {"state": "initializing"}\n {"new_val": {"id": 1, "score": 12, "arena": "Hobbiton Field"}}\n {"state": "ready"}\n {\n \t"old_val": {"id": 1, "score": 12, "arena": "Hobbiton Field"},\n \t"new_val": {"id": 1, "score": 14, "arena": "Hobbiton Field"}\n }\n {\n \t"old_val": {"id": 1, "score": 14, "arena": "Hobbiton Field"},\n \t"new_val": {"id": 1, "score": 17, "arena": "Hobbiton Field", "winner": "Frodo"}\n }\n\n*Example* Return all the changes to the top 10 games. This assumes the presence of a `score` secondary index on the `games` table.\n\n r.table(\'games\').order_by(index=r.desc(\'score\')).limit(10).run(conn)\n'), - (rethinkdb.ast.Table.index_create, b'table.index_create(index_name[, index_function][, multi=False, geo=False]) -> object\n\nCreate a new secondary index on a table. Secondary indexes improve the speed of many read queries at the slight cost of increased storage space and decreased write performance. For more information about secondary indexes, read the article "[Using secondary indexes in RethinkDB](http://rethinkdb.com/docs/secondary-indexes/)."\n\nRethinkDB supports different types of secondary indexes:\n\n- *Simple indexes* based on the value of a single field.\n- *Compound indexes* based on multiple fields.\n- *Multi indexes* based on arrays of values.\n- *Geospatial indexes* based on indexes of geometry objects, created when the `geo` optional argument is true.\n- Indexes based on *arbitrary expressions*.\n\nThe `index_function` can be an anonymous function or a binary representation obtained from the `function` field of [index_status](http://rethinkdb.com/api/python/index_status).\n\nIf successful, `create_index` will return an object of the form `{"created": 1}`. If an index by that name already exists on the table, a `RqlRuntimeError` will be thrown.\n\n*Example* Create a simple index based on the field `post_id`.\n\n r.table(\'comments\').index_create(\'post_id\').run(conn)\n*Example* Create a simple index based on the nested field `author > name`.\n\n r.table(\'comments\').index_create(\'author_name\', r.row["author"]["name"]).run(conn)\n\n*Example* Create a geospatial index based on the field `location`.\n\n r.table(\'places\').index_create(\'location\', geo=True).run(conn)\n\nA geospatial index field should contain only geometry objects. It will work with geometry ReQL terms ([get_intersecting](http://rethinkdb.com/api/python/get_intersecting/) and [get_nearest](http://rethinkdb.com/api/python/get_nearest/)) as well as index-specific terms ([index_status](http://rethinkdb.com/api/python/index_status), [index_wait](http://rethinkdb.com/api/python/index_wait), [index_drop](http://rethinkdb.com/api/python/index_drop) and [index_list](http://rethinkdb.com/api/python/index_list)). Using terms that rely on non-geometric ordering such as [get_all](http://rethinkdb.com/api/python/get_all/), [order_by](http://rethinkdb.com/api/python/order_by/) and [between](http://rethinkdb.com/api/python/order_by/) will result in an error.\n\n*Example* Create a compound index based on the fields `post_id` and `date`.\n\n r.table(\'comments\').index_create(\'post_and_date\', [r.row["post_id"], r.row["date"]]).run(conn)\n\n*Example* Create a multi index based on the field `authors`.\n\n r.table(\'posts\').index_create(\'authors\', multi=True).run(conn)\n\n*Example* Create a geospatial multi index based on the field `towers`.\n\n r.table(\'networks\').index_create(\'towers\', geo=True, multi=True).run(conn)\n\n*Example* Create an index based on an arbitrary expression.\n\n r.table(\'posts\').index_create(\'authors\', lambda doc:\n r.branch(\n doc.has_fields("updated_at"),\n doc["updated_at"],\n doc["created_at"]\n )\n ).run(conn)\n\n*Example* Create a new secondary index based on an existing one.\n\n index = r.table(\'posts\').index_status(\'authors\').nth(0)[\'function\'].run(conn)\n r.table(\'new_posts\').index_create(\'authors\', index).run(conn)\n\n*Example* Rebuild an outdated secondary index on a table.\n\n old_index = r.table(\'posts\').index_status(\'old_index\').nth(0)[\'function\'].run(conn)\n r.table(\'posts\').index_create(\'new_index\', old_index).run(conn)\n r.table(\'posts\').index_wait(\'new_index\').run(conn)\n r.table(\'posts\').index_rename(\'new_index\', \'old_index\', overwrite=True).run(conn)\n'), - (rethinkdb.ast.Table.index_drop, b"table.index_drop(index_name) -> object\n\nDelete a previously created secondary index of this table.\n\n*Example* Drop a secondary index named 'code_name'.\n\n r.table('dc').index_drop('code_name').run(conn)\n\n"), - (rethinkdb.ast.Table.index_list, b"table.index_list() -> array\n\nList all the secondary indexes of this table.\n\n*Example* List the available secondary indexes for this table.\n\n r.table('marvel').index_list().run(conn)\n"), - (rethinkdb.ast.Table.index_rename, b"table.index_rename(old_index_name, new_index_name[, overwrite=False]) -> object\n\nRename an existing secondary index on a table. If the optional argument `overwrite` is specified as `True`, a previously existing index with the new name will be deleted and the index will be renamed. If `overwrite` is `False` (the default) an error will be raised if the new index name already exists.\n\nThe return value on success will be an object of the format `{'renamed': 1}`, or `{'renamed': 0}` if the old and new names are the same.\n\nAn error will be raised if the old index name does not exist, if the new index name is already in use and `overwrite` is `False`, or if either the old or new index name are the same as the primary key field name.\n\n*Example* Rename an index on the comments table.\n\n r.table('comments').index_rename('post_id', 'message_id').run(conn)\n"), - (rethinkdb.ast.Table.index_status, b'table.index_status([, index...]) -> array\n\nGet the status of the specified indexes on this table, or the status\nof all indexes on this table if no indexes are specified.\n\nThe result is an array where for each index, there will be an object like this one:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nor this one:\n\n {\n "index": <index_name>,\n "ready": False,\n "blocks_processed": <int>,\n "blocks_total": <int>,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nThe `multi` field will be `true` or `false` depending on whether this index was created as a multi index (see [index_create](http://rethinkdb.com/api/python/index_create/) for details). The `outdated` field will be true if the index is outdated in the current version of RethinkDB and needs to be rebuilt.\n\nThe `function` field is a binary object containing an opaque representation of the secondary index (including the `multi` argument if specified). It can be passed as the second argument to [index_create](http://rethinkdb.com/api/python/index_create/) to create a new index with the same function; see `index_create` for more information.\n\n*Example* Get the status of all the indexes on `test`:\n\n r.table(\'test\').index_status().run(conn)\n\n*Example* Get the status of the `timestamp` index:\n\n r.table(\'test\').index_status(\'timestamp\').run(conn)\n\n*Example* Save the binary representation of the index:\n\n func = r.table(\'test\').index_status(\'timestamp\').nth(0)[\'function\'].run(conn)\n'), - (rethinkdb.ast.Table.index_wait, b'table.index_wait([, index...]) -> array\n\nWait for the specified indexes on this table to be ready, or for all\nindexes on this table to be ready if no indexes are specified.\n\nThe result is an array containing one object for each table index:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "geo": <bool>,\n "outdated": <bool>\n }\n\nSee the [index_status](http://rethinkdb.com/api/python/index_status) documentation for a description of the field values.\n\n*Example* Wait for all indexes on the table `test` to be ready:\n\n r.table(\'test\').index_wait().run(conn)\n\n*Example* Wait for the index `timestamp` to be ready:\n\n r.table(\'test\').index_wait(\'timestamp\').run(conn)\n'), - (rethinkdb.ast.DB.table_create, b'db.table_create(table_name[, options]) -> object\n\nCreate a table. A RethinkDB table is a collection of JSON documents.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_created`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the table\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a table with the same name already exists, the command throws `RqlRuntimeError`.\n\nNote: Only alphanumeric characters and underscores are valid for the table name.\n\nWhen creating a table you can specify the following options:\n\n* `primary_key`: the name of the primary key. The default primary key is `id`.\n* `durability`: if set to `soft`, writes will be acknowledged by the server immediately and flushed to disk in the background. The default is `hard`: acknowledgment of writes happens after data has been written to disk.\n* `shards`: the number of shards, an integer from 1-32. Defaults to `1`.\n* `replicas`: either an integer or a mapping object. Defaults to `1`.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{\'tag1\': 2, \'tag2\': 4, \'tag3\': 2, ...}`.\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n\nThe [data type](http://rethinkdb.com/docs/data-types/) of a primary key is usually a string (like a UUID) or a number, but it can also be a time, binary object, boolean or an array. It cannot be an object.\n\n*Example* Create a table named \'dc_universe\' with the default settings.\n\n r.db(\'test\').table_create(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "old_val": None\n }\n ],\n "tables_created": 1\n }\n\n*Example* Create a table named \'dc_universe\' using the field \'name\' as primary key.\n\n r.db(\'test\').table_create(\'dc_universe\', primary_key=\'name\').run(conn)\n\n*Example* Create a table set up for two shards and three replicas per shard. This requires three available servers.\n\n r.db(\'test\').table_create(\'dc_universe\', shards=2, replicas=3).run(conn)\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n'), - (rethinkdb.ast.DB.table_drop, b'db.table_drop(table_name) -> object\n\nDrop a table. The table and all its data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_dropped`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the dropped table\'s [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given table does not exist in the database, the command throws `RqlRuntimeError`.\n\n*Example* Drop a table named \'dc_universe\'.\n\n r.db(\'test\').table_drop(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 1\n }\n'), - (rethinkdb.ast.DB.table_list, b"db.table_list() -> array\n\nList all table names in a database. The result is a list of strings.\n\n*Example* List all tables of the 'test' database.\n\n r.db('test').table_list().run(conn)\n \n"), - (rethinkdb.ast.RqlQuery.__add__, b'number + number -> number\nstring + string -> string\narray + array -> array\ntime + number -> time\n\nSum two numbers, concatenate two strings, or concatenate 2 arrays.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Strings can be concatenated too.\n\n > (r.expr("foo") + "bar").run(conn)\n \n "foobar"\n\n*Example* Arrays can be concatenated too.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n [\'foo\', \'bar\', \'buzz\']\n\n*Example* Create a date one year from now.\n\n r.now() + 365*24*60*60\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > r.add(r.args([10, 20, 30])).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > r.add(r.args([\'foo\', \'bar\', \'buzz\'])).run(conn)\n \n "foobarbuzz"\n'), - (rethinkdb.add, b'number + number -> number\nstring + string -> string\narray + array -> array\ntime + number -> time\n\nSum two numbers, concatenate two strings, or concatenate 2 arrays.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Strings can be concatenated too.\n\n > (r.expr("foo") + "bar").run(conn)\n \n "foobar"\n\n*Example* Arrays can be concatenated too.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n [\'foo\', \'bar\', \'buzz\']\n\n*Example* Create a date one year from now.\n\n r.now() + 365*24*60*60\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > r.add(r.args([10, 20, 30])).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > r.add(r.args([\'foo\', \'bar\', \'buzz\'])).run(conn)\n \n "foobarbuzz"\n'), - (rethinkdb.ast.RqlQuery.__and__, b'bool & bool -> bool\nr.and_(bool, bool) -> bool\nbool.and_(bool) -> bool\n\nCompute the logical "and" of two or more values. The `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n'), - (rethinkdb.and_, b'bool & bool -> bool\nr.and_(bool, bool) -> bool\nbool.and_(bool) -> bool\n\nCompute the logical "and" of two or more values. The `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n'), - (rethinkdb.ast.RqlQuery.__div__, b"number / number -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n"), - (rethinkdb.div, b"number / number -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n"), - (rethinkdb.ast.RqlQuery.__eq__, b'value == value -> bool\nvalue.eq(value) -> bool\n\nTest if two values are equal.\n\n*Example* Does 2 equal 2?\n\n (r.expr(2) == 2).run(conn)\n r.expr(2).eq(2).run(conn)\n'), - (rethinkdb.ast.RqlQuery.eq, b'value == value -> bool\nvalue.eq(value) -> bool\n\nTest if two values are equal.\n\n*Example* Does 2 equal 2?\n\n (r.expr(2) == 2).run(conn)\n r.expr(2).eq(2).run(conn)\n'), - (rethinkdb.ast.RqlQuery.__ge__, b'value >= value -> bool\nvalue.ge(value) -> bool\n\nTest if the first value is greater than or equal to other.\n\n*Example* Is 2 greater than or equal to 2?\n\n (r.expr(2) >= 2).run(conn)\n r.expr(2).ge(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.ge, b'value >= value -> bool\nvalue.ge(value) -> bool\n\nTest if the first value is greater than or equal to other.\n\n*Example* Is 2 greater than or equal to 2?\n\n (r.expr(2) >= 2).run(conn)\n r.expr(2).ge(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__gt__, b'value > value -> bool\nvalue.gt(value) -> bool\n\nTest if the first value is greater than other.\n\n*Example* Is 2 greater than 2?\n\n (r.expr(2) > 2).run(conn)\n r.expr(2).gt(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.gt, b'value > value -> bool\nvalue.gt(value) -> bool\n\nTest if the first value is greater than other.\n\n*Example* Is 2 greater than 2?\n\n (r.expr(2) > 2).run(conn)\n r.expr(2).gt(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__le__, b'value <= value -> bool\nvalue.le(value) -> bool\n\nTest if the first value is less than or equal to other.\n\n*Example* Is 2 less than or equal to 2?\n\n (r.expr(2) <= 2).run(conn)\n r.expr(2).le(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.le, b'value <= value -> bool\nvalue.le(value) -> bool\n\nTest if the first value is less than or equal to other.\n\n*Example* Is 2 less than or equal to 2?\n\n (r.expr(2) <= 2).run(conn)\n r.expr(2).le(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__lt__, b'value < value -> bool\nvalue.lt(value) -> bool\n\nTest if the first value is less than other.\n\n*Example* Is 2 less than 2?\n\n (r.expr(2) < 2).run(conn)\n r.expr(2).lt(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.lt, b'value < value -> bool\nvalue.lt(value) -> bool\n\nTest if the first value is less than other.\n\n*Example* Is 2 less than 2?\n\n (r.expr(2) < 2).run(conn)\n r.expr(2).lt(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__mod__, b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n"), - (rethinkdb.mod, b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n"), - (rethinkdb.ast.RqlQuery.__mul__, b'number * number -> number\narray * number -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'), - (rethinkdb.mul, b'number * number -> number\narray * number -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__ne__, b'value != value -> bool\nvalue.ne(value) -> bool\n\nTest if two values are not equal.\n\n*Example* Does 2 not equal 2?\n\n (r.expr(2) != 2).run(conn)\n r.expr(2).ne(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.ne, b'value != value -> bool\nvalue.ne(value) -> bool\n\nTest if two values are not equal.\n\n*Example* Does 2 not equal 2?\n\n (r.expr(2) != 2).run(conn)\n r.expr(2).ne(2).run(conn)\n\n'), - (rethinkdb.ast.RqlQuery.__invert__, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'), - (rethinkdb.ast.RqlQuery.not_, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'), - (rethinkdb.not_, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'), - (rethinkdb.ast.RqlQuery.__or__, b'bool | bool -> bool\nbool.or_(bool[, bool, ...]) -> bool\nr.or_(bool, bool) -> bool\n\nCompute the logical "or" of two or more values. The `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table(\'posts\').filter(lambda post:\n post[\'category\'].default(\'foo\').eq(\'article\').or(\n post[\'genre\'].default(\'foo\').eq(\'mystery\'))\n ).run(conn)\n'), - (rethinkdb.or_, b'bool | bool -> bool\nbool.or_(bool[, bool, ...]) -> bool\nr.or_(bool, bool) -> bool\n\nCompute the logical "or" of two or more values. The `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table(\'posts\').filter(lambda post:\n post[\'category\'].default(\'foo\').eq(\'article\').or(\n post[\'genre\'].default(\'foo\').eq(\'mystery\'))\n ).run(conn)\n'), - (rethinkdb.random, b"r.random() -> number\nr.random(number[, number], float=True) -> number\nr.random(integer[, integer]) -> integer\n\nGenerate a random number between given (or implied) bounds. `random` takes zero, one or two arguments.\n\n- With __zero__ arguments, the result will be a floating-point number in the range `[0,1)` (from 0 up to but not including 1).\n- With __one__ argument _x,_ the result will be in the range `[0,x)`, and will be integer unless `float=True` is given as an option. Specifying a floating point number without the `float` option will raise an error.\n- With __two__ arguments _x_ and _y,_ the result will be in the range `[x,y)`, and will be integer unless `float=True` is given as an option. If _x_ and _y_ are equal an error will occur, unless the floating-point option has been specified, in which case _x_ will be returned. Specifying a floating point number without the `float` option will raise an error.\n\nNote: The last argument given will always be the 'open' side of the range, but when\ngenerating a floating-point number, the 'open' side may be less than the 'closed' side.\n\n*Example* Generate a random number in the range `[0,1)`\n\n r.random().run(conn)\n\n*Example* Generate a random integer in the range `[0,100)`\n\n r.random(100).run(conn)\n r.random(0, 100).run(conn)\n\n*Example* Generate a random number in the range `(-2.24,1.59]`\n\n r.random(1.59, -2.24, float=True).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.__sub__, b"number - number -> number\ntime - time -> number\ntime - number -> time\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and date\n\n r.now() - date\n\n"), - (rethinkdb.sub, b"number - number -> number\ntime - time -> number\ntime - number -> time\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and date\n\n r.now() - date\n\n"), - (rethinkdb.ast.Table.between, b'table.between(lower_key, upper_key[, index=\'id\', left_bound=\'closed\', right_bound=\'open\'])\n -> selection\n\nGet all documents between two keys. Accepts three optional arguments: `index`, `left_bound`, and `right_bound`. If `index` is set to the name of a secondary index, `between` will return all documents where that index\'s value is in the specified range (it uses the primary key by default). `left_bound` or `right_bound` may be set to `open` or `closed` to indicate whether or not to include that endpoint of the range (by default, `left_bound` is closed and `right_bound` is open).\n\nYou may also use the special constants `r.minval` and `r.maxval` for boundaries, which represent "less than any index key" and "more than any index key" respectively. For instance, if you use `r.minval` as the lower key, then `between` will return all documents whose primary keys (or indexes) are less than the specified upper key.\n\nNote that compound indexes are sorted using [lexicographical order][lo]. Take the following range as an example:\n\n\t[[1, "c"] ... [5, "e"]]\n\nThis range includes all compound keys:\n\n* whose first item is 1 and second item is equal or greater than "c";\n* whose first item is between 1 and 5, *regardless of the value of the second item*;\n* whose first item is 5 and second item is less than or equal to "e".\n\n[lo]: https://en.wikipedia.org/wiki/Lexicographical_order\n\n*Example* Find all users with primary key >= 10 and < 20 (a normal half-open interval).\n\n r.table(\'marvel\').between(10, 20).run(conn)\n\n*Example* Find all users with primary key >= 10 and <= 20 (an interval closed on both sides).\n\n r.table(\'marvel\').between(10, 20, right_bound=\'closed\').run(conn)\n\n*Example* Find all users with primary key < 20.\n\n r.table(\'marvel\').between(r.minval, 20).run(conn)\n\n*Example* Find all users with primary key > 10.\n\n r.table(\'marvel\').between(10, r.maxval, left_bound=\'open\').run(conn)\n\n*Example* Between can be used on secondary indexes too. Just pass an optional index argument giving the secondary index to query.\n\n r.table(\'dc\').between(\'dark_knight\', \'man_of_steel\', index=\'code_name\').run(conn)\n\n*Example* Get all users whose full name is between "John Smith" and "Wade Welles."\n\n r.table("users").between(["Smith", "John"], ["Welles", "Wade"],\n index="full_name").run(conn)\n\n*Example* Subscribe to a [changefeed](http://rethinkdb.com/docs/changefeeds/javascript) of teams ranked in the top 10.\n\n changes = r.table("teams").between(1, 11, index="rank").changes().run(conn)\n\n__Note:__ Between works with secondary indexes on date fields, but will not work with unindexed date fields. To test whether a date value is between two other dates, use the [during](http://rethinkdb.com/api/python/during) command, not `between`.\n\nSecondary indexes can be used in extremely powerful ways with `between` and other commands; read the full article on [secondary indexes](http://rethinkdb.com/docs/secondary-indexes) for examples using boolean operations, `contains` and more.\n\n__Note:__ RethinkDB uses byte-wise ordering for `between` and does not support Unicode collations; non-ASCII characters will be sorted by UTF-8 codepoint.\n\n__Note:__ If you chain `between` after [order_by](http://rethinkdb.com/api/python/order_by), the `between` command must use the index specified in `order_by`, and will default to that index. Trying to specify another index will result in a `RqlRuntimeError`.\n'), - (rethinkdb.db, b"r.db(db_name) -> db\n\nReference a database.\n\n*Example* Before we can query a table we have to select the correct database.\n\n r.db('heroes').table('marvel').run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.filter, b'selection.filter(predicate[, default=False]) -> selection\nstream.filter(predicate[, default=False]) -> stream\narray.filter(predicate[, default=False]) -> array\n\nReturn all the elements in a sequence for which the given predicate is true. The return value of `filter` will be the same as the input (sequence, stream, or array). Documents can be filtered in a variety of ways—ranges, nested values, boolean conditions, and the results of anonymous functions.\n\nBy default, `filter` will silently skip documents with missing fields: if the predicate tries to access a field that doesn\'t exist (for instance, the predicate `{\'age\': 30}` applied to a document with no `age` field), that document will not be returned in the result set, and no error will be generated. This behavior can be changed with the `default` optional argument.\n\n* If `default` is set to `True`, documents with missing fields will be returned rather than skipped.\n* If `default` is set to `r.error()`, an `RqlRuntimeError` will be thrown when a document with a missing field is tested.\n* If `default` is set to `False` (the default), documents with missing fields will be skipped.\n\n*Example* Get all users who are 30 years old.\n\n r.table(\'users\').filter({\'age\': 30}).run(conn)\n\nThe predicate `{\'age\': 30}` selects documents in the `users` table with an `age` field whose value is `30`. Documents with an `age` field set to any other value *or* with no `age` field present are skipped.\n\nWhile the `{\'field\': value}` style of predicate is useful for exact matches, a more general way to write a predicate is to use the [row](http://rethinkdb.com/api/python/row) command with a comparison operator such as [eq](http://rethinkdb.com/api/python/eq) (`==`) or [gt](http://rethinkdb.com/api/python/gt) (`>`), or to use a lambda function that returns `True` or `False`.\n\n r.table(\'users\').filter(r.row["age"] == 30).run(conn)\n\nIn this case, the predicate `r.row["age"] == 30` returns `True` if the field `age` is equal to 30. You can write this predicate as a lambda function instead:\n\n r.table(\'users\').filter(lambda user:\n user["age"] == 30\n ).run(conn)\n\nPredicates to `filter` are evaluated on the server, and must use ReQL expressions. Some Python comparison operators are overloaded by the RethinkDB driver and will be translated to ReQL, such as `==`, `<`/`>` and `|`/`&` (note the single character form, rather than `||`/`&&`).\n\nAlso, predicates must evaluate document fields. They cannot evaluate [secondary indexes](http://rethinkdb.com/docs/secondary-indexes/).\n\n*Example* Get all users who are more than 18 years old.\n\n r.table("users").filter(r.row["age"] > 18).run(conn)\n\n*Example* Get all users who are less than 18 years old and more than 13 years old.\n\n r.table("users").filter((r.row["age"] < 18) & (r.row["age"] > 13)).run(conn)\n\n*Example* Get all users who are more than 18 years old or have their parental consent.\n\n r.table("users").filter(\n (r.row["age"] >= 18) | (r.row["hasParentalConsent"])).run(conn)\n\n*Example* Retrieve all users who subscribed between January 1st, 2012\n(included) and January 1st, 2013 (excluded).\n\n r.table("users").filter(\n lambda user: user["subscription_date"].during(\n r.time(2012, 1, 1, \'Z\'), r.time(2013, 1, 1, \'Z\'))\n ).run(conn)\n\n*Example* Retrieve all users who have a gmail account (whose field `email` ends with `@gmail.com`).\n\n r.table("users").filter(\n lambda user: user["email"].match("@gmail.com$")\n ).run(conn)\n\n*Example* Filter based on the presence of a value in an array.\n\nGiven this schema for the `users` table:\n\n {\n "name": <type \'str\'>\n "places_visited": [<type \'str\'>]\n }\n\nRetrieve all users whose field `places_visited` contains `France`.\n\n r.table("users").filter(lambda user:\n user["places_visited"].contains("France")\n ).run(conn)\n\n*Example* Filter based on nested fields.\n\nGiven this schema for the `users` table:\n\n {\n "id": <type \'str\'>\n "name": {\n "first": <type \'str\'>,\n "middle": <type \'str\'>,\n "last": <type \'str\'>\n }\n }\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), with any middle name.\n\n r.table("users").filter({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n }).run(conn)\n\nIf you want an exact match for a field that is an object, you will have to use `r.literal`.\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), and who do not have a middle name.\n\n r.table("users").filter(r.literal({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n })).run(conn)\n\nYou may rewrite these with lambda functions.\n\n r.table("users").filter(\n lambda user:\n (user["name"]["first"] == "William")\n & (user["name"]["last"] == "Adama")\n ).run(conn)\n\n r.table("users").filter(lambda user:\n user["name"] == {\n "first": "William",\n "last": "Adama"\n }\n ).run(conn)\n\nBy default, documents missing fields tested by the `filter` predicate are skipped. In the previous examples, users without an `age` field are not returned. By passing the optional `default` argument to `filter`, you can change this behavior.\n\n*Example* Get all users less than 18 years old or whose `age` field is missing.\n\n r.table("users").filter(r.row["age"] < 18, default=True).run(conn)\n\n*Example* Get all users more than 18 years old. Throw an error if a\ndocument is missing the field `age`.\n\n r.table("users").filter(r.row["age"] > 18, default=r.error()).run(conn)\n\n*Example* Get all users who have given their phone number (all the documents whose field `phone_number` exists and is not `None`).\n\n r.table(\'users\').filter(\n lambda user: user.has_fields(\'phone_number\')\n ).run(conn)\n\n*Example* Get all users with an "editor" role or an "admin" privilege.\n\n r.table(\'users\').filter(\n lambda user: (user[\'role\'] == \'editor\').default(False) |\n (user[\'privilege\'] == \'admin\').default(False)\n ).run(conn)\n\nInstead of using the `default` optional argument to `filter`, we have to use default values on the fields within the `or` clause. Why? If the field on the left side of the `or` clause is missing from a document—in this case, if the user doesn\'t have a `role` field—the predicate will generate an error, and will return `False` (or the value the `default` argument is set to) without evaluating the right side of the `or`. By using `.default(False)` on the fields, each side of the `or` will evaluate to either the field\'s value or `False` if the field doesn\'t exist.\n'), - (rethinkdb.ast.Table.get, b"table.get(key) -> singleRowSelection\n\nGet a document by primary key.\n\nIf no document exists with that primary key, `get` will return `None`.\n\n*Example* Find a document by UUID.\n\n r.table('posts').get('a9849eef-7176-4411-935b-79a6e3c56a74').run(conn)\n\n*Example* Find a document and merge another document with it.\n\n r.table('heroes').get(3).merge(\n { 'powers': ['invisibility', 'speed'] }\n ).run(conn)\n\n_*Example* Subscribe to a document's [changefeed](http://rethinkdb.com/docs/changefeeds/python).\n\n changes = r.table('heroes').get(3).changes().run(conn)\n"), - (rethinkdb.ast.Table.get_all, b"table.get_all(key1[, key2...], [, index='id']) -> selection\n\nGet all documents where the given value matches the value of the requested index.\n\n*Example* Secondary index keys are not guaranteed to be unique so we cannot query via [get](http://rethinkdb.com/api/python/get/) when using a secondary index.\n\n r.table('marvel').get_all('man_of_steel', index='code_name').run(conn)\n\n*Example* Without an index argument, we default to the primary index. While `get` will either return the document or `None` when no document with such a primary key value exists, this will return either a one or zero length stream.\n\n r.table('dc').get_all('superman').run(conn)\n\n*Example* You can get multiple documents in a single call to `get_all`.\n\n r.table('dc').get_all('superman', 'ant man').run(conn)\n\n*Example* You can use [args](http://rethinkdb.com/api/python/args/) with `get_all` to retrieve multiple documents whose keys are in a list. This uses `get_all` to get a list of female superheroes, coerces that to an array, and then gets a list of villains who have those superheroes as enemies.\n\n r.do(\n r.table('heroes').get_all('f', {'index': 'gender'})['id'].coerce_to('array'), \n lamdba heroines: r.table('villains').get_all(r.args(heroines))\n ).run(conn)\n\nSecondary indexes can be used in extremely powerful ways with `get_all` and other commands; read the full article on [secondary indexes](http://rethinkdb.com/docs/secondary-indexes) for examples using boolean operations, `contains` and more.\n"), - (rethinkdb.ast.DB.table, b"db.table(name[, use_outdated=False, identifier_format='name']) -> table\n\nReturn all documents in a table. Other commands may be chained after `table` to return a subset of documents (such as `get` and `filter`) or perform further processing.\n\n*Example* Return all documents in the table 'marvel' of the default database.\n\n r.table('marvel').run(conn)\n\n*Example* Return all documents in the table 'marvel' of the database 'heroes'.\n\n r.db('heroes').table('marvel').run(conn)\n\nThere are two optional arguments.\n\n* `use_outdated`: if `True`, this allows potentially out-of-date data to be returned, with potentially faster reads. It also allows you to perform reads from a secondary replica if a primary has failed. Default `False`.\n* `identifier_format`: possible values are `name` and `uuid`, with a default of `name`. If set to `uuid`, then [system tables](http://rethinkdb.com/docs/system-tables/) will refer to servers, databases and tables by UUID rather than name. (This only has an effect when used with system tables.)\n\n*Example* Allow potentially out-of-date data in exchange for faster reads.\n\n r.db('heroes').table('marvel', use_outdated=True).run(conn)\n"), - (rethinkdb.ast.RqlQuery.downcase, b'string.downcase() -> string\n\nLowercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").downcase().run(conn)\n "sentence about latex."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n'), - (rethinkdb.ast.RqlQuery.match, b'string.match(regexp) -> None/object\n\nMatches against a regular expression. If there is a match, returns an object with the fields:\n\n- `str`: The matched string\n- `start`: The matched string\'s start\n- `end`: The matched string\'s end\n- `groups`: The capture groups defined with parentheses\n\nIf no match is found, returns `None`.\n\nAccepts RE2 syntax\n([https://code.google.com/p/re2/wiki/Syntax](https://code.google.com/p/re2/wiki/Syntax)).\nYou can enable case-insensitive matching by prefixing the regular expression with\n`(?i)`. See the linked RE2 documentation for more flags.\n\nThe `match` command does not support backreferences.\n\n*Example* Get all users whose name starts with "A". Because `None` evaluates to `false` in\n`filter`, you can just use the result of `match` for the predicate.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("^A")\n ).run(conn)\n\n*Example* Get all users whose name ends with "n".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("n$")\n ).run(conn)\n\n*Example* Get all users whose name has "li" in it\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("li")\n ).run(conn)\n\n*Example* Get all users whose name is "John" with a case-insensitive search.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^john$")\n ).run(conn)\n\n*Example* Get all users whose name is composed of only characters between "a" and "z".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^[a-z]+$")\n ).run(conn)\n\n*Example* Get all users where the zipcode is a string of 5 digits.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'zipcode\'].match("\\d{5}")\n ).run(conn)\n\n*Example* Retrieve the domain of a basic email\n\n r.expr("name@domain.com").match(".*@(.*)").run(conn)\n\nResult:\n\n {\n "start": 0,\n "end": 20,\n "str": "name@domain.com",\n "groups":[\n {\n "end": 17,\n "start": 7,\n "str": "domain.com"\n }\n ]\n }\n\nYou can then retrieve only the domain with the [\\[\\]](http://rethinkdb.com/api/python/get_field) selector.\n\n r.expr("name@domain.com").match(".*@(.*)")["groups"][0]["str"].run(conn)\n\nReturns `\'domain.com\'`\n\n*Example* Fail to parse out the domain and returns `None`.\n\n r.expr("name[at]domain.com").match(".*@(.*)").run(conn)\n'), - (rethinkdb.ast.RqlQuery.split, b'string.split([separator, [max_splits]]) -> array\n\nSplits a string into substrings. Splits on whitespace when called\nwith no arguments. When called with a separator, splits on that\nseparator. When called with a separator and a maximum number of\nsplits, splits on that separator at most `max_splits` times. (Can be\ncalled with `None` as the separator if you want to split on whitespace\nwhile still specifying `max_splits`.)\n\nMimics the behavior of Python\'s `string.split` in edge cases, except\nfor splitting on the empty string, which instead produces an array of\nsingle-character strings.\n\n*Example* Split on whitespace.\n\n > r.expr("foo bar bax").split().run(conn)\n ["foo", "bar", "bax"]\n\n*Example* Split the entries in a CSV file.\n\n > r.expr("12,37,,22,").split(",").run(conn)\n ["12", "37", "", "22", ""]\n\n*Example* Split a string into characters.\n\n > r.expr("mlucy").split("").run(conn)\n ["m", "l", "u", "c", "y"]\n\n*Example* Split the entries in a CSV file, but only at most 3\ntimes.\n\n > r.expr("12,37,,22,").split(",", 3).run(conn)\n ["12", "37", "", "22,"]\n\n*Example* Split on whitespace at most once (i.e. get the first word).\n\n > r.expr("foo bar bax").split(None, 1).run(conn)\n ["foo", "bar bax"]\n'), - (rethinkdb.ast.RqlQuery.upcase, b'string.upcase() -> string\n\nUppercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").upcase().run(conn)\n "SENTENCE ABOUT LATEX."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n'), - (rethinkdb.ast.RqlQuery.concat_map, b'stream.concat_map(mapping_function) -> stream\narray.concat_map(mapping_function) -> array\n\nConcatenate one or more elements into a single sequence using a mapping function.\n\n`concat_map` works in a similar fashion to `map`, applying the given function to each element in a sequence, but it will always return a single sequence. If the mapping function returns a sequence, `map` would produce a sequence of sequences:\n\n r.expr([1, 2, 3]).map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [[1, 2], [2, 4], [3, 6]]\n\nWhereas `concat_map` with the same mapping function would merge those sequences into one:\n\n r.expr([1, 2, 3]).concat_map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [1, 2, 2, 4, 3, 6]\n\nThe return value, array or stream, will be the same type as the input.\n\n*Example* Construct a sequence of all monsters defeated by Marvel heroes. The field "defeatedMonsters" is an array of one or more monster names.\n\n r.table(\'marvel\').concat_map(lambda hero: hero[\'defeatedMonsters\']).run(conn)\n\n*Example* Simulate an [eq_join](http://rethinkdb.com/api/python/eq_join/) using `concat_map`. (This is how ReQL joins are implemented internally.)\n\n r.table(\'posts\').concat_map(\n lambda post: r.table(\'comments\').get_all(\n post[\'id\'], index=\'post_id\'\n ).map(\n lambda comment: { \'left\': post, \'right\': comment}\n )\n ).run(conn)\n'), - (rethinkdb.ast.RqlQuery.is_empty, b"sequence.is_empty() -> bool\n\nTest if a sequence is empty.\n\n*Example* Are there any documents in the marvel table?\n\n r.table('marvel').is_empty().run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.limit, b"sequence.limit(n) -> stream\narray.limit(n) -> array\n\nEnd the sequence after the given number of elements.\n\n*Example* Only so many can fit in our Pantheon of heroes.\n\n r.table('marvel').order_by('belovedness').limit(10).run(conn)\n"), - (rethinkdb.ast.RqlQuery.map, b"sequence1.map([sequence2, ...], mapping_function) -> stream\narray1.map([sequence2, ...], mapping_function) -> array\nr.map(sequence1[, sequence2, ...], mapping_function) -> stream\nr.map(array1[, array2, ...], mapping_function) -> array\n\nTransform each element of one or more sequences by applying a mapping function to them. If `map` is run with two or more sequences, it will iterate for as many items as there are in the shortest sequence.\n\nNote that `map` can only be applied to sequences, not single values. If you wish to apply a function to a single value/selection (including an array), use the [do](http://rethinkdb.com/api/python/do) command.\n\n*Example* Return the first five squares.\n\n > r.expr([1, 2, 3, 4, 5]).map(lambda val: (val * val)).run(conn)\n \n [1, 4, 9, 16, 25]\n\n*Example* Sum the elements of three sequences.\n\n > sequence1 = [100, 200, 300, 400]\n > sequence2 = [10, 20, 30, 40]\n > sequence3 = [1, 2, 3, 4]\n > r.map(sequence1, sequence2, sequence3,\n lambda val1, val2, val3: (val1 + val2 + val3)).run(conn)\n \n [111, 222, 333, 444]\n\n*Example* Rename a field when retrieving documents using `map` and `merge`.\n\nThis example renames the field `id` to `user_id` when retrieving documents from the table `users`.\n\n r.table('users').map(\n lambda doc: doc.merge({'user_id': doc['id']}).without('id')).run(conn)\n\nNote that in this case, [row](http://rethinkdb.com/api/python/row) may be used as an alternative to writing an anonymous function, as it returns the same value as the function parameter receives:\n\n r.table('users').map(\n r.row.merge({'user_id': r.row['id']}).without('id')).run(conn)\n\n*Example* Assign every superhero an archenemy.\n\n r.table('heroes').map(r.table('villains'),\n lambda hero, villain: hero.merge({'villain': villain})).run(conn)\n"), - (rethinkdb.ast.RqlQuery.nth, b"sequence.nth(index) -> object\nselection.nth(index) -> selection<object>\n\nGet the *nth* element of a sequence, counting from zero. If the argument is negative, count from the last element.\n\nIn Python, you can use `[]` with an integer as a shorthand for `nth`.\n\n*Example* Select the second element in the array.\n\n r.expr([1,2,3]).nth(1).run(conn)\n r.expr([1,2,3])[1].run(conn)\n\n*Example* Select the bronze medalist from the competitors.\n\n r.table('players').order_by(index=r.desc('score')).nth(3).run(conn)\n\n*Example* Select the last place competitor.\n\n r.table('players').order_by(index=r.desc('score')).nth(-1).run(conn)\n"), - (rethinkdb.ast.RqlQuery.offsets_of, b"sequence.offsets_of(datum | predicate) -> array\n\nGet the indexes of an element in a sequence. If the argument is a predicate, get the indexes of all elements matching it.\n\n*Example* Find the position of the letter 'c'.\n\n r.expr(['a','b','c']).offsets_of('c').run(conn)\n\n*Example* Find the popularity ranking of invisible heroes.\n\n r.table('marvel').union(r.table('dc')).order_by('popularity').offsets_of(\n r.row['superpowers'].contains('invisibility')\n ).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.order_by, b'table.order_by([key1...], index=index_name) -> selection<stream>\nselection.order_by(key1, [key2...]) -> selection<array>\nsequence.order_by(key1, [key2...]) -> array\n\nSort the sequence by document values of the given key(s). To specify\nthe ordering, wrap the attribute with either `r.asc` or `r.desc`\n(defaults to ascending).\n\n__Note:__ RethinkDB uses byte-wise ordering for `orderBy` and does not support Unicode collations; non-ASCII characters will be sorted by UTF-8 codepoint. For more information on RethinkDB\'s sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nSorting without an index requires the server to hold the sequence in\nmemory, and is limited to 100,000 documents (or the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)). Sorting with an index can\nbe done on arbitrarily large tables, or after a `between` command\nusing the same index.\n\n*Example* Order all the posts using the index `date`. \n\n r.table(\'posts\').order_by(index=\'date\').run(conn)\n\nThe index must either be the primary key or have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table(\'posts\').index_create(\'date\').run(conn)\n\nYou can also select a descending ordering:\n\n r.table(\'posts\').order_by(index=r.desc(\'date\')).run(conn, callback)\n\n*Example* Order a sequence without an index.\n\n r.table(\'posts\').get(1)[\'comments\'].order_by(\'date\')\n\nYou can also select a descending ordering:\n\n r.table(\'posts\').get(1)[\'comments\'].order_by(r.desc(\'date\'))\n\nIf you\'re doing ad-hoc analysis and know your table won\'t have more then 100,000\nelements (or you\'ve changed the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)) you can run `order_by` without an index:\n\n r.table(\'small_table\').order_by(\'date\')\n\n*Example* You can efficiently order using multiple fields by using a\n[compound index](http://www.rethinkdb.com/docs/secondary-indexes/python/).\n\nOrder by date and title.\n\n r.table(\'posts\').order_by(index=\'date_and_title\').run(conn)\n\nThe index must have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table(\'posts\').index_create(\'date_and_title\', lambda post:\n [post["date"], post["title"]]).run(conn)\n\n_Note_: You cannot specify multiple orders in a compound index. See [issue #2306](https://github.com/rethinkdb/rethinkdb/issues/2306)\nto track progress.\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it\nby multiple fields without an index.\n\n r.table(\'small_table\').order_by(\'date\', r.desc(\'title\'))\n\n*Example* Notice that an index ordering always has highest\nprecedence. The following query orders posts by date, and if multiple\nposts were published on the same date, they will be ordered by title.\n\n r.table(\'post\').order_by(\'title\', index=\'date\').run(conn)\n*Example* You can use [nested field](http://rethinkdb.com/docs/cookbook/python/#filtering-based-on-nested-fields) syntax to sort on fields from subdocuments. (You can also create indexes on nested fields using this syntax with `index_create`.)\n\n r.table(\'user\').order_by(lambda user: user[\'group\'][\'id\']).run(conn)\n\n*Example* You can efficiently order data on arbitrary expressions using indexes.\n\n r.table(\'posts\').order_by(index=\'votes\').run(conn)\n\nThe index must have been previously created with [index_create](http://rethinkdb.com/api/ruby/index_create/).\n\n r.table(\'posts\').index_create(\'votes\', lambda post:\n post["upvotes"]-post["downvotes"]\n ).run(conn)\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it with an arbitrary function directly.\n\n r.table(\'small_table\').order_by(lambda doc:\n doc[\'upvotes\']-doc[\'downvotes\']\n );\n\nYou can also select a descending ordering:\n\n r.table(\'small_table\').order_by(r.desc(lambda doc:\n doc[\'upvotes\']-doc[\'downvotes\']\n ));\n\n*Example* Ordering after a `between` command can be done as long as the same index is being used.\n\n r.table("posts").between(r.time(2013, 1, 1, \'+00:00\'), r.time(2013, 1, 1, \'+00:00\'), index=\'date\')\n .order_by(index=\'date\').run(conn);\n\n'), - (rethinkdb.ast.RqlQuery.sample, b"sequence.sample(number) -> selection\nstream.sample(number) -> array\narray.sample(number) -> array\n\nSelect a given number of elements from a sequence with uniform random distribution. Selection is done without replacement.\n\nIf the sequence has less than the requested number of elements (i.e., calling `sample(10)` on a sequence with only five elements), `sample` will return the entire sequence in a random order.\n\n*Example* Select 3 random heroes.\n\n r.table('marvel').sample(3).run(conn)\n"), - (rethinkdb.ast.RqlQuery.skip, b"sequence.skip(n) -> stream\narray.skip(n) -> array\n\nSkip a number of elements from the head of the sequence.\n\n*Example* Here in conjunction with `order_by` we choose to ignore the most successful heroes.\n\n r.table('marvel').order_by('successMetric').skip(10).run(conn)\n\n"), - (rethinkdb.ast.RqlQuery.slice, b"selection.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> selection\nstream.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> stream\narray.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> array\nbinary.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> binary\n\nReturn the elements of a sequence within the specified range.\n\n`slice` returns the range between `start_index` and `end_index`. If only `start_index` is specified, `slice` returns the range from that index to the end of the sequence. Specify `left_bound` or `right_bound` as `open` or `closed` to indicate whether to include that endpoint of the range by default: `closed` returns that endpoint, while `open` does not. By default, `left_bound` is closed and `right_bound` is open, so the range `(10,13)` will return the tenth, eleventh and twelfth elements in the sequence.\n\nIf `end_index` is past the end of the sequence, all elements from `start_index` to the end of the sequence will be returned. If `start_index` is past the end of the sequence or `end_index` is less than `start_index`, a zero-element sequence will be returned (although see below for negative `end_index` values). An error will be raised on a negative `start_index`.\n\nA negative `end_index` is allowed with arrays; in that case, the returned range counts backward from the array's end. That is, the range of `(2,-1)` returns the second element through the next-to-last element of the range. A negative `end_index` is not allowed with a stream. (An `end_index` of −1 *is* allowed with a stream if `right_bound` is closed; this behaves as if no `end_index` was specified.)\n\nIf `slice` is used with a [binary](http://rethinkdb.com/api/python/binary) object, the indexes refer to byte positions within the object. That is, the range `(10,20)` will refer to the 10th byte through the 19th byte.\n\nIf you are only specifying the indexes and not the bounding options, you may use Python's slice operator as a shorthand: `[start_index:end_index]`.\n\n**Example:** Return the fourth, fifth and sixth youngest players. (The youngest player is at index 0, so those are elements 3–5.)\n\n r.table('players').order_by(index='age').slice(3,6).run(conn)\n\nOr, using Python's slice operator:\n\n r.table('players').filter({'class': 'amateur'})[10:20].run(conn)\n\n**Example:** Return all but the top three players who have a red flag.\n\n r.table('players').filter({'flag': 'red'}).order_by(index=r.desc('score')).slice(3).run(conn)\n\n**Example:** Return holders of tickets `X` through `Y`, assuming tickets are numbered sequentially. We want to include ticket `Y`.\n\n r.table('users').order_by(index='ticket').slice(x, y, right_bound='closed').run(conn)\n\n**Example:** Return the elements of an array from the second through two from the end (that is, not including the last two).\n\n r.expr([0,1,2,3,4,5]).slice(2,-2).run(conn)\n\nResult:\n\n [2,3]\n"), - (rethinkdb.ast.RqlQuery.union, b"stream.union(sequence[, sequence, ...]) -> stream\narray.union(sequence[, sequence, ...]) -> array\n\nConcatenate two or more sequences.\n\n*Example* Construct a stream of all heroes.\n\n r.table('marvel').union(r.table('dc')).run(conn)\n\n*Example* Combine four arrays into one.\n\n r.expr([1, 2]).union([3, 4], [5, 6], [7, 8, 9]).run(conn)\n \n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n"), - (rethinkdb.ast.RqlQuery.with_fields, b"sequence.with_fields([selector1, selector2...]) -> stream\narray.with_fields([selector1, selector2...]) -> array\n\nPlucks one or more attributes from a sequence of objects, filtering out any objects in the sequence that do not have the specified fields. Functionally, this is identical to `has_fields` followed by `pluck` on a sequence.\n\n*Example* Get a list of users and their posts, excluding any users who have not made any posts.\n\nExisting table structure:\n\n [\n { 'id': 1, 'user': 'bob', 'email': 'bob@foo.com', 'posts': [ 1, 4, 5 ] },\n { 'id': 2, 'user': 'george', 'email': 'george@foo.com' },\n { 'id': 3, 'user': 'jane', 'email': 'jane@foo.com', 'posts': [ 2, 3, 6 ] }\n ]\n\nCommand and output:\n\n r.table('users').with_fields('id', 'user', 'posts').run(conn)\n \n [\n { 'id': 1, 'user': 'bob', 'posts': [ 1, 4, 5 ] },\n { 'id': 3, 'user': 'jane', 'posts': [ 2, 3, 6 ] }\n ]\n\n*Example* Use the [nested field syntax](http://rethinkdb.com/docs/nested-fields/) to get a list of users with cell phone numbers in their contacts.\n\n r.table('users').with_fields('id', 'user', {contact: {'phone': 'work'}).run(conn)\n"), - (rethinkdb.ast.Table.delete, b'table.delete([durability="hard", return_changes=False])\n -> object\nselection.delete([durability="hard", return_changes=False])\n -> object\nsingleSelection.delete([durability="hard", return_changes=False])\n -> object\n\nDelete one or more documents from a table.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n\nDelete returns an object that contains the following attributes:\n\n- `deleted`: the number of documents that were deleted.\n- `skipped`: the number of documents that were skipped. \nFor example, if you attempt to delete a batch of documents, and another concurrent query\ndeletes some of those documents first, they will be counted as skipped.\n- `errors`: the number of errors encountered while performing the delete.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `inserted`, `replaced`, and `unchanged`: all 0 for a delete operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `delete` operation. Each object will have two keys: `{"new_val": None, "old_val": <old value>}`.\n\n*Example* Delete a single document from the table `comments`.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete().run(conn)\n\n*Example* Delete all documents from the table `comments`.\n\n r.table("comments").delete().run(conn)\n\n*Example* Delete all comments where the field `id_post` is `3`.\n\n r.table("comments").filter({"id_post": 3}).delete().run(conn)\n\n*Example* Delete a single document from the table `comments` and return its value.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete(return_changes=True).run(conn)\n\nThe result will look like:\n\n {\n "deleted": 1,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": None,\n "old_val": {\n "id": "7eab9e63-73f1-4f33-8ce4-95cbea626f59",\n "author": "William",\n "comment": "Great post",\n "id_post": 3\n }\n }\n ],\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Delete all documents from the table `comments` without waiting for the\noperation to be flushed to disk.\n\n r.table("comments").delete(durability="soft"}).run(conn)\n'), - (rethinkdb.ast.Table.insert, b'table.insert(object | [object1, object2, ...][, durability="hard", return_changes=False, conflict="error"])\n -> object\n\nInsert documents into a table. Accepts a single document or an array of\ndocuments.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving and caching it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `conflict`: Determine handling of inserting documents with the same primary key as existing entries. Possible values are `"error"`, `"replace"` or `"update"`.\n - `"error"`: Do not insert the new document and record the conflict as an error. This is the default.\n - `"replace"`: [Replace](http://rethinkdb.com/api/python/replace/) the old document in its entirety with the new one.\n - `"update"`: [Update](http://rethinkdb.com/api/python/update/) fields of the old document with fields from the new one.\n\nInsert returns an object that contains the following attributes:\n\n- `inserted`: the number of documents successfully inserted.\n- `replaced`: the number of documents updated when `conflict` is set to `"replace"` or `"update"`.\n- `unchanged`: the number of documents whose fields are identical to existing documents with the same primary key when `conflict` is set to `"replace"` or `"update"`.\n- `errors`: the number of errors encountered while performing the insert.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `skipped`: 0 for an insert operation.\n- `generated_keys`: a list of generated primary keys for inserted documents whose primary keys were not specified (capped to 100,000).\n- `warnings`: if the field `generated_keys` is truncated, you will get the warning _"Too many generated keys (<X>), array truncated to 100000."_.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `insert` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": None}`.\n\n*Example* Insert a document into the table `posts`.\n\n r.table("posts").insert({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\nThe result will be:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Insert a document without a defined primary key into the table `posts` where the\nprimary key is `id`.\n\n r.table("posts").insert({\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\nRethinkDB will generate a primary key and return it in `generated_keys`.\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\nRetrieve the document you just inserted with:\n\n r.table("posts").get("dd782b64-70a7-43e4-b65e-dd14ae61d947").run(conn)\n\nAnd you will get back:\n\n {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet",\n }\n\n*Example* Insert multiple documents into the table `users`.\n\n r.table("users").insert([\n {"id": "william", "email": "william@rethinkdb.com"},\n {"id": "lara", "email": "lara@rethinkdb.com"}\n ]).run(conn)\n\n*Example* Insert a document into the table `users`, replacing the document if the document\nalready exists. \n\n r.table("users").insert(\n {"id": "william", "email": "william@rethinkdb.com"},\n conflict="error"\n ).run(conn)\n\n*Example* Copy the documents from `posts` to `posts_backup`.\n\n r.table("posts_backup").insert( r.table("posts") ).run(conn)\n\n*Example* Get back a copy of the inserted document (with its generated primary key).\n\n r.table("posts").insert(\n {"title": "Lorem ipsum", "content": "Dolor sit amet"},\n return_changes=True\n ).run(conn)\n\nThe result will be\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0,\n "changes": [\n {\n "old_val": None,\n "new_val": {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }\n }\n ]\n }\n'), - (rethinkdb.ast.Table.replace, b'table.replace(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.replace(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.replace(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nReplace documents in a table. Accepts a JSON document or a ReQL expression, and replaces\nthe original document with the new one. The new document must have the same primary key\nas the original document.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the replacement and distributes the result to replicas in a non-atomic fashion. This flag is required to perform non-deterministic updates, such as those that require reading data from another table.\n\nReplace returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were replaced\n- `unchanged`: the number of documents that would have been modified, except that the\nnew value was the same as the old value\n- `inserted`: the number of new documents added. You can have new documents inserted if\nyou do a point-replace on a key that isn\'t in the table or you do a replace on a\nselection and one of the documents you are replacing has been deleted\n- `deleted`: the number of deleted documents when doing a replace with `None`\n- `errors`: the number of errors encountered while performing the replace.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `skipped`: 0 for a replace operation\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `replace` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Replace the document with the primary key `1`.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }).run(conn)\n\n*Example* Remove the field `status` from all posts.\n\n r.table("posts").replace(lambda post:\n post.without("status")\n ).run(conn)\n\n*Example* Remove all the fields that are not `id`, `title` or `content`.\n\n r.table("posts").replace(lambda post:\n post.pluck("id", "title", "content")\n ).run(conn)\n\n*Example* Replace the document with the primary key `1` using soft durability.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }, durability="soft").run(conn)\n\n*Example* Replace the document with the primary key `1` and return the values of the document before\nand after the replace operation.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "published"\n }, return_changes=True).run(conn)\n\nThe result will have a `changes` field:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "Aleas jacta est",\n "status": "published",\n },\n "old_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "TODO",\n "status": "draft",\n "author": "William",\n }\n }\n ], \n "replaced": 1,\n "skipped": 0,\n "unchanged": 0\n }\n'), - (rethinkdb.ast.Table.sync, b'table.sync() -> object\n\n`sync` ensures that writes on a given table are written to permanent storage. Queries\nthat specify soft durability (`durability=\'soft\'`) do not give such guarantees, so\n`sync` can be used to ensure the state of these queries. A call to `sync` does not return\nuntil all previous writes to the table are persisted.\n\nIf successful, the operation returns an object: `{"synced": 1}`.\n\n*Example* After having updated multiple heroes with soft durability, we now want to wait\nuntil these changes are persisted.\n\n r.table(\'marvel\').sync().run(conn)\n\n'), - (rethinkdb.ast.Table.update, b'table.update(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.update(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.update(object | expr[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nUpdate JSON documents in a table. Accepts a JSON document, a ReQL expression, or a combination of the two.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the update and distributes the result to replicas in a non-atomic fashion. This flag is required to perform non-deterministic updates, such as those that require reading data from another table.\n\nUpdate returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were updated.\n- `unchanged`: the number of documents that would have been modified except the new value was the same as the old value.\n- `skipped`: the number of documents that were skipped because the document didn\'t exist.\n- `errors`: the number of errors encountered while performing the update.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `inserted`: 0 for an update operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `update` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Update the status of the post with `id` of `1` to `published`.\n\n r.table("posts").get(1).update({"status": "published"}).run(conn)\n\n*Example* Update the status of all posts to `published`.\n\n r.table("posts").update({"status": "published"}).run(conn)\n\n*Example* Update the status of all the posts written by William.\n\n r.table("posts").filter({"author": "William"}).update({"status": "published"}).run(conn)\n\n*Example* Increment the field `view` with `id` of `1`.\nThis query will throw an error if the field `views` doesn\'t exist.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }).run(conn)\n\n*Example* Increment the field `view` of the post with `id` of `1`.\nIf the field `views` does not exist, it will be set to `0`.\n\n r.table("posts").update({\n "views": (r.row["views"]+1).default(0)\n }).run(conn)\n\n*Example* Perform a conditional update. \nIf the post has more than 100 views, set the `type` of a post to `hot`, else set it to `normal`.\n\n r.table("posts").get(1).update(lambda post:\n r.branch(\n post["views"] > 100,\n {"type": "hot"},\n {"type": "normal"}\n )\n ).run(conn)\n\n*Example* Update the field `num_comments` with the result of a sub-query. Because this update is not atomic, you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.table("comments").filter({"id_post": 1}).count()\n }, non_atomic=True).run(conn)\n\nIf you forget to specify the `non_atomic` flag, you will get a `RqlRuntimeError`:\n\nRqlRuntimeError: Could not prove function deterministic. Maybe you want to use the non_atomic flag? \n\n*Example* Update the field `num_comments` with a random value between 0 and 100. This update cannot be proven deterministic because of `r.js` (and in fact is not), so you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.js("Math.floor(Math.random()*100)")\n }, non_atomic=True).run(conn)\n\n*Example* Update the status of the post with `id` of `1` using soft durability.\n\n r.table("posts").get(1).update({status: "published"}, durability="soft").run(conn)\n\n*Example* Increment the field `views` and return the values of the document before and after the update operation.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }, return_changes=True).run(conn)\n\nThe result will now include a `changes` field:\n\n {\n "deleted": 1,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 207\n },\n "old_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 206\n }\n }\n ],\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\nThe `update` command supports RethinkDB\'s [nested field][nf] syntax to update subdocuments. Consider a user table with contact information in this format:\n\n[nf]: /docs/nested-fields/python\n\n {\n "id": 10001,\n "name": "Bob Smith",\n "contact": {\n "phone": {\n "work": "408-555-1212",\n "home": "408-555-1213",\n "cell": "408-555-1214"\n },\n "email": {\n "work": "bob@smith.com",\n "home": "bobsmith@example.com",\n "other": "bobbys@moosecall.net"\n },\n "im": {\n "skype": "Bob Smith",\n "aim": "bobmoose",\n "icq": "nobodyremembersicqnumbers"\n }\n },\n "notes": [\n {\n "date": r.time(2014,1,1,\'Z\'),\n "from": "John Doe",\n "subject": "My name is even more boring than Bob\'s"\n },\n {\n "date": r.time(2014,2,2,\'Z\'),\n "from": "Bob Smith Sr",\n "subject": "Happy Second of February"\n }\n ]\n }\n\n*Example* Update Bob Smith\'s cell phone number.\n\n r.table("users").get(10001).update(\n {"contact": {"phone": {"cell": "408-555-4242"}}}\n ).run(conn)\n\n*Example* Add another note to Bob Smith\'s record.\n\n new_note = {\n "date": r.now(),\n "from": "Inigo Montoya",\n "subject": "You killed my father"\n }\n r.table("users").get(10001).update(\n {"notes": r.row["notes"].append(new_note)}\n ).run(conn)\n\n*Example* Send a note to every user with an ICQ number.\n\n icq_note = {\n "date": r.now(),\n "from": "Admin",\n "subject": "Welcome to the future"\n }\n r.table("users").filter(\n r.row.has_fields({"contact": {"im": "icq"}})\n ).update(\n {"notes": r.row["notes"].append(icq_note)}\n ).run(conn)\n\n*Example* Replace all of Bob\'s IM records. Normally, `update` will merge nested documents together; to replace the entire `"im"` document, use the [literal][] command.\n\n[literal]: /api/python/literal/\n\n r.table(\'users\').get(10001).update(\n {"contact": {"im": r.literal({"aim": "themoosemeister"})}}\n ).run(conn)\n'), -] - -for function, text in docsSource: - try: - text = str(text.decode('utf-8')) - except UnicodeEncodeError: - pass - if hasattr(function, "__func__"): - function.__func__.__doc__ = text - else: - function.__doc__ = text diff --git a/ext/librethinkdbxx/reql/ql2.proto b/ext/librethinkdbxx/reql/ql2.proto deleted file mode 100644 index e40c5be5..00000000 --- a/ext/librethinkdbxx/reql/ql2.proto +++ /dev/null @@ -1,843 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// THE HIGH-LEVEL VIEW // -//////////////////////////////////////////////////////////////////////////////// - -// Process: When you first open a connection, send the magic number -// for the version of the protobuf you're targeting (in the [Version] -// enum). This should **NOT** be sent as a protobuf; just send the -// little-endian 32-bit integer over the wire raw. This number should -// only be sent once per connection. - -// The magic number shall be followed by an authorization key. The -// first 4 bytes are the length of the key to be sent as a little-endian -// 32-bit integer, followed by the key string. Even if there is no key, -// an empty string should be sent (length 0 and no data). - -// Following the authorization key, the client shall send a magic number -// for the communication protocol they want to use (in the [Protocol] -// enum). This shall be a little-endian 32-bit integer. - -// The server will then respond with a NULL-terminated string response. -// "SUCCESS" indicates that the connection has been accepted. Any other -// response indicates an error, and the response string should describe -// the error. - -// Next, for each query you want to send, construct a [Query] protobuf -// and serialize it to a binary blob. Send the blob's size to the -// server encoded as a little-endian 32-bit integer, followed by the -// blob itself. You will recieve a [Response] protobuf back preceded -// by its own size, once again encoded as a little-endian 32-bit -// integer. You can see an example exchange below in **EXAMPLE**. - -// A query consists of a [Term] to evaluate and a unique-per-connection -// [token]. - -// Tokens are used for two things: -// * Keeping track of which responses correspond to which queries. -// * Batched queries. Some queries return lots of results, so we send back -// batches of <1000, and you need to send a [CONTINUE] query with the same -// token to get more results from the original query. -//////////////////////////////////////////////////////////////////////////////// - -message VersionDummy { // We need to wrap it like this for some - // non-conforming protobuf libraries - // This enum contains the magic numbers for your version. See **THE HIGH-LEVEL - // VIEW** for what to do with it. - enum Version { - V0_1 = 0x3f61ba36; - V0_2 = 0x723081e1; // Authorization key during handshake - V0_3 = 0x5f75e83e; // Authorization key and protocol during handshake - V0_4 = 0x400c2d20; // Queries execute in parallel - V1_0 = 0x34c2bdc3; // Users and permissions - } - - // The protocol to use after the handshake, specified in V0_3 - enum Protocol { - PROTOBUF = 0x271ffc41; - JSON = 0x7e6970c7; - } -} - -// You send one of: -// * A [START] query with a [Term] to evaluate and a unique-per-connection token. -// * A [CONTINUE] query with the same token as a [START] query that returned -// [SUCCESS_PARTIAL] in its [Response]. -// * A [STOP] query with the same token as a [START] query that you want to stop. -// * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers -// with a [WAIT_COMPLETE] [Response]. -// * A [SERVER_INFO] query. The server answers with a [SERVER_INFO] [Response]. -message Query { - enum QueryType { - START = 1; // Start a new query. - CONTINUE = 2; // Continue a query that returned [SUCCESS_PARTIAL] - // (see [Response]). - STOP = 3; // Stop a query partway through executing. - NOREPLY_WAIT = 4; // Wait for noreply operations to finish. - SERVER_INFO = 5; // Get server information. - } - optional QueryType type = 1; - // A [Term] is how we represent the operations we want a query to perform. - optional Term query = 2; // only present when [type] = [START] - optional int64 token = 3; - // This flag is ignored on the server. `noreply` should be added - // to `global_optargs` instead (the key "noreply" should map to - // either true or false). - optional bool OBSOLETE_noreply = 4 [default = false]; - - // If this is set to [true], then [Datum] values will sometimes be - // of [DatumType] [R_JSON] (see below). This can provide enormous - // speedups in languages with poor protobuf libraries. - optional bool accepts_r_json = 5 [default = false]; - - message AssocPair { - optional string key = 1; - optional Term val = 2; - } - repeated AssocPair global_optargs = 6; -} - -// A backtrace frame (see `backtrace` in Response below) -message Frame { - enum FrameType { - POS = 1; // Error occurred in a positional argument. - OPT = 2; // Error occurred in an optional argument. - } - optional FrameType type = 1; - optional int64 pos = 2; // The index of the positional argument. - optional string opt = 3; // The name of the optional argument. -} -message Backtrace { - repeated Frame frames = 1; -} - -// You get back a response with the same [token] as your query. -message Response { - enum ResponseType { - // These response types indicate success. - SUCCESS_ATOM = 1; // Query returned a single RQL datatype. - SUCCESS_SEQUENCE = 2; // Query returned a sequence of RQL datatypes. - SUCCESS_PARTIAL = 3; // Query returned a partial sequence of RQL - // datatypes. If you send a [CONTINUE] query with - // the same token as this response, you will get - // more of the sequence. Keep sending [CONTINUE] - // queries until you get back [SUCCESS_SEQUENCE]. - WAIT_COMPLETE = 4; // A [NOREPLY_WAIT] query completed. - SERVER_INFO = 5; // The data for a [SERVER_INFO] request. This is - // the same as `SUCCESS_ATOM` except that there will - // never be profiling data. - - // These response types indicate failure. - CLIENT_ERROR = 16; // Means the client is buggy. An example is if the - // client sends a malformed protobuf, or tries to - // send [CONTINUE] for an unknown token. - COMPILE_ERROR = 17; // Means the query failed during parsing or type - // checking. For example, if you pass too many - // arguments to a function. - RUNTIME_ERROR = 18; // Means the query failed at runtime. An example is - // if you add together two values from a table, but - // they turn out at runtime to be booleans rather - // than numbers. - } - optional ResponseType type = 1; - - // If `ResponseType` is `RUNTIME_ERROR`, this may be filled in with more - // information about the error. - enum ErrorType { - INTERNAL = 1000000; - RESOURCE_LIMIT = 2000000; - QUERY_LOGIC = 3000000; - NON_EXISTENCE = 3100000; - OP_FAILED = 4100000; - OP_INDETERMINATE = 4200000; - USER = 5000000; - PERMISSION_ERROR = 6000000; - } - optional ErrorType error_type = 7; - - // ResponseNotes are used to provide information about the query - // response that may be useful for people writing drivers or ORMs. - // Currently all the notes we send indicate that a stream has certain - // special properties. - enum ResponseNote { - // The stream is a changefeed stream (e.g. `r.table('test').changes()`). - SEQUENCE_FEED = 1; - // The stream is a point changefeed stream - // (e.g. `r.table('test').get(0).changes()`). - ATOM_FEED = 2; - // The stream is an order_by_limit changefeed stream - // (e.g. `r.table('test').order_by(index: 'id').limit(5).changes()`). - ORDER_BY_LIMIT_FEED = 3; - // The stream is a union of multiple changefeed types that can't be - // collapsed to a single type - // (e.g. `r.table('test').changes().union(r.table('test').get(0).changes())`). - UNIONED_FEED = 4; - // The stream is a changefeed stream and includes notes on what state - // the changefeed stream is in (e.g. objects of the form `{state: - // 'initializing'}`). - INCLUDES_STATES = 5; - } - repeated ResponseNote notes = 6; - - optional int64 token = 2; // Indicates what [Query] this response corresponds to. - - // [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM] or - // [SERVER_INFO]. [response] contains many RQL data if [type] is - // [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. [response] contains 1 - // error message (of type [R_STR]) in all other cases. - repeated Datum response = 3; - - // If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a - // backtrace will be provided. The backtrace says where in the query the - // error occurred. Ideally this information will be presented to the user as - // a pretty-printed version of their query with the erroneous section - // underlined. A backtrace is a series of 0 or more [Frame]s, each of which - // specifies either the index of a positional argument or the name of an - // optional argument. (Those words will make more sense if you look at the - // [Term] message below.) - optional Backtrace backtrace = 4; // Contains n [Frame]s when you get back an error. - - // If the [global_optargs] in the [Query] that this [Response] is a - // response to contains a key "profile" which maps to a static value of - // true then [profile] will contain a [Datum] which provides profiling - // information about the execution of the query. This field should be - // returned to the user along with the result that would normally be - // returned (a datum or a cursor). In official drivers this is accomplished - // by putting them inside of an object with "value" mapping to the return - // value and "profile" mapping to the profile object. - optional Datum profile = 5; -} - -// A [Datum] is a chunk of data that can be serialized to disk or returned to -// the user in a Response. Currently we only support JSON types, but we may -// support other types in the future (e.g., a date type or an integer type). -message Datum { - enum DatumType { - R_NULL = 1; - R_BOOL = 2; - R_NUM = 3; // a double - R_STR = 4; - R_ARRAY = 5; - R_OBJECT = 6; - // This [DatumType] will only be used if [accepts_r_json] is - // set to [true] in [Query]. [r_str] will be filled with a - // JSON encoding of the [Datum]. - R_JSON = 7; // uses r_str - } - optional DatumType type = 1; - optional bool r_bool = 2; - optional double r_num = 3; - optional string r_str = 4; - - repeated Datum r_array = 5; - message AssocPair { - optional string key = 1; - optional Datum val = 2; - } - repeated AssocPair r_object = 6; -} - -// A [Term] is either a piece of data (see **Datum** above), or an operator and -// its operands. If you have a [Datum], it's stored in the member [datum]. If -// you have an operator, its positional arguments are stored in [args] and its -// optional arguments are stored in [optargs]. -// -// A note about type signatures: -// We use the following notation to denote types: -// arg1_type, arg2_type, argrest_type... -> result_type -// So, for example, if we have a function `avg` that takes any number of -// arguments and averages them, we might write: -// NUMBER... -> NUMBER -// Or if we had a function that took one number modulo another: -// NUMBER, NUMBER -> NUMBER -// Or a function that takes a table and a primary key of any Datum type, then -// retrieves the entry with that primary key: -// Table, DATUM -> OBJECT -// Some arguments must be provided as literal values (and not the results of sub -// terms). These are marked with a `!`. -// Optional arguments are specified within curly braces as argname `:` value -// type (e.x `{noreply:BOOL}`) -// Many RQL operations are polymorphic. For these, alterantive type signatures -// are separated by `|`. -// -// The RQL type hierarchy is as follows: -// Top -// DATUM -// NULL -// BOOL -// NUMBER -// STRING -// OBJECT -// SingleSelection -// ARRAY -// Sequence -// ARRAY -// Stream -// StreamSelection -// Table -// Database -// Function -// Ordering - used only by ORDER_BY -// Pathspec -- an object, string, or array that specifies a path -// Error -message Term { - enum TermType { - // A RQL datum, stored in `datum` below. - DATUM = 1; - - MAKE_ARRAY = 2; // DATUM... -> ARRAY - // Evaluate the terms in [optargs] and make an object - MAKE_OBJ = 3; // {...} -> OBJECT - - // * Compound types - - // Takes an integer representing a variable and returns the value stored - // in that variable. It's the responsibility of the client to translate - // from their local representation of a variable to a unique _non-negative_ - // integer for that variable. (We do it this way instead of letting - // clients provide variable names as strings to discourage - // variable-capturing client libraries, and because it's more efficient - // on the wire.) - VAR = 10; // !NUMBER -> DATUM - // Takes some javascript code and executes it. - JAVASCRIPT = 11; // STRING {timeout: !NUMBER} -> DATUM | - // STRING {timeout: !NUMBER} -> Function(*) - UUID = 169; // () -> DATUM - - // Takes an HTTP URL and gets it. If the get succeeds and - // returns valid JSON, it is converted into a DATUM - HTTP = 153; // STRING {data: OBJECT | STRING, - // timeout: !NUMBER, - // method: STRING, - // params: OBJECT, - // header: OBJECT | ARRAY, - // attempts: NUMBER, - // redirects: NUMBER, - // verify: BOOL, - // page: FUNC | STRING, - // page_limit: NUMBER, - // auth: OBJECT, - // result_format: STRING, - // } -> STRING | STREAM - - // Takes a string and throws an error with that message. - // Inside of a `default` block, you can omit the first - // argument to rethrow whatever error you catch (this is most - // useful as an argument to the `default` filter optarg). - ERROR = 12; // STRING -> Error | -> Error - // Takes nothing and returns a reference to the implicit variable. - IMPLICIT_VAR = 13; // -> DATUM - - // * Data Operators - // Returns a reference to a database. - DB = 14; // STRING -> Database - // Returns a reference to a table. - TABLE = 15; // Database, STRING, {read_mode:STRING, identifier_format:STRING} -> Table - // STRING, {read_mode:STRING, identifier_format:STRING} -> Table - // Gets a single element from a table by its primary or a secondary key. - GET = 16; // Table, STRING -> SingleSelection | Table, NUMBER -> SingleSelection | - // Table, STRING -> NULL | Table, NUMBER -> NULL | - GET_ALL = 78; // Table, DATUM..., {index:!STRING} => ARRAY - - // Simple DATUM Ops - EQ = 17; // DATUM... -> BOOL - NE = 18; // DATUM... -> BOOL - LT = 19; // DATUM... -> BOOL - LE = 20; // DATUM... -> BOOL - GT = 21; // DATUM... -> BOOL - GE = 22; // DATUM... -> BOOL - NOT = 23; // BOOL -> BOOL - // ADD can either add two numbers or concatenate two arrays. - ADD = 24; // NUMBER... -> NUMBER | STRING... -> STRING - SUB = 25; // NUMBER... -> NUMBER - MUL = 26; // NUMBER... -> NUMBER - DIV = 27; // NUMBER... -> NUMBER - MOD = 28; // NUMBER, NUMBER -> NUMBER - - FLOOR = 183; // NUMBER -> NUMBER - CEIL = 184; // NUMBER -> NUMBER - ROUND = 185; // NUMBER -> NUMBER - - // DATUM Array Ops - // Append a single element to the end of an array (like `snoc`). - APPEND = 29; // ARRAY, DATUM -> ARRAY - // Prepend a single element to the end of an array (like `cons`). - PREPEND = 80; // ARRAY, DATUM -> ARRAY - //Remove the elements of one array from another array. - DIFFERENCE = 95; // ARRAY, ARRAY -> ARRAY - - // DATUM Set Ops - // Set ops work on arrays. They don't use actual sets and thus have - // performance characteristics you would expect from arrays rather than - // from sets. All set operations have the post condition that they - // array they return contains no duplicate values. - SET_INSERT = 88; // ARRAY, DATUM -> ARRAY - SET_INTERSECTION = 89; // ARRAY, ARRAY -> ARRAY - SET_UNION = 90; // ARRAY, ARRAY -> ARRAY - SET_DIFFERENCE = 91; // ARRAY, ARRAY -> ARRAY - - SLICE = 30; // Sequence, NUMBER, NUMBER -> Sequence - SKIP = 70; // Sequence, NUMBER -> Sequence - LIMIT = 71; // Sequence, NUMBER -> Sequence - OFFSETS_OF = 87; // Sequence, DATUM -> Sequence | Sequence, Function(1) -> Sequence - CONTAINS = 93; // Sequence, (DATUM | Function(1))... -> BOOL - - // Stream/Object Ops - // Get a particular field from an object, or map that over a - // sequence. - GET_FIELD = 31; // OBJECT, STRING -> DATUM - // | Sequence, STRING -> Sequence - // Return an array containing the keys of the object. - KEYS = 94; // OBJECT -> ARRAY - // Return an array containing the values of the object. - VALUES = 186; // OBJECT -> ARRAY - // Creates an object - OBJECT = 143; // STRING, DATUM, ... -> OBJECT - // Check whether an object contains all the specified fields, - // or filters a sequence so that all objects inside of it - // contain all the specified fields. - HAS_FIELDS = 32; // OBJECT, Pathspec... -> BOOL - // x.with_fields(...) <=> x.has_fields(...).pluck(...) - WITH_FIELDS = 96; // Sequence, Pathspec... -> Sequence - // Get a subset of an object by selecting some attributes to preserve, - // or map that over a sequence. (Both pick and pluck, polymorphic.) - PLUCK = 33; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT - // Get a subset of an object by selecting some attributes to discard, or - // map that over a sequence. (Both unpick and without, polymorphic.) - WITHOUT = 34; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT - // Merge objects (right-preferential) - MERGE = 35; // OBJECT... -> OBJECT | Sequence -> Sequence - - // Sequence Ops - // Get all elements of a sequence between two values. - // Half-open by default, but the openness of either side can be - // changed by passing 'closed' or 'open for `right_bound` or - // `left_bound`. - BETWEEN_DEPRECATED = 36; // Deprecated version of between, which allows `null` to specify unboundedness - // With the newer version, clients should use `r.minval` and `r.maxval` for unboundedness - BETWEEN = 182; // StreamSelection, DATUM, DATUM, {index:!STRING, right_bound:STRING, left_bound:STRING} -> StreamSelection - REDUCE = 37; // Sequence, Function(2) -> DATUM - MAP = 38; // Sequence, Function(1) -> Sequence - // The arity of the function should be - // Sequence..., Function(sizeof...(Sequence)) -> Sequence - - FOLD = 187; // Sequence, Datum, Function(2), {Function(3), Function(1) - - // Filter a sequence with either a function or a shortcut - // object (see API docs for details). The body of FILTER is - // wrapped in an implicit `.default(false)`, and you can - // change the default value by specifying the `default` - // optarg. If you make the default `r.error`, all errors - // caught by `default` will be rethrown as if the `default` - // did not exist. - FILTER = 39; // Sequence, Function(1), {default:DATUM} -> Sequence | - // Sequence, OBJECT, {default:DATUM} -> Sequence - // Map a function over a sequence and then concatenate the results together. - CONCAT_MAP = 40; // Sequence, Function(1) -> Sequence - // Order a sequence based on one or more attributes. - ORDER_BY = 41; // Sequence, (!STRING | Ordering)..., {index: (!STRING | Ordering)} -> Sequence - // Get all distinct elements of a sequence (like `uniq`). - DISTINCT = 42; // Sequence -> Sequence - // Count the number of elements in a sequence, or only the elements that match - // a given filter. - COUNT = 43; // Sequence -> NUMBER | Sequence, DATUM -> NUMBER | Sequence, Function(1) -> NUMBER - IS_EMPTY = 86; // Sequence -> BOOL - // Take the union of multiple sequences (preserves duplicate elements! (use distinct)). - UNION = 44; // Sequence... -> Sequence - // Get the Nth element of a sequence. - NTH = 45; // Sequence, NUMBER -> DATUM - // do NTH or GET_FIELD depending on target object - BRACKET = 170; // Sequence | OBJECT, NUMBER | STRING -> DATUM - // OBSOLETE_GROUPED_MAPREDUCE = 46; - // OBSOLETE_GROUPBY = 47; - - INNER_JOIN = 48; // Sequence, Sequence, Function(2) -> Sequence - OUTER_JOIN = 49; // Sequence, Sequence, Function(2) -> Sequence - // An inner-join that does an equality comparison on two attributes. - EQ_JOIN = 50; // Sequence, !STRING, Sequence, {index:!STRING} -> Sequence - ZIP = 72; // Sequence -> Sequence - RANGE = 173; // -> Sequence [0, +inf) - // NUMBER -> Sequence [0, a) - // NUMBER, NUMBER -> Sequence [a, b) - - // Array Ops - // Insert an element in to an array at a given index. - INSERT_AT = 82; // ARRAY, NUMBER, DATUM -> ARRAY - // Remove an element at a given index from an array. - DELETE_AT = 83; // ARRAY, NUMBER -> ARRAY | - // ARRAY, NUMBER, NUMBER -> ARRAY - // Change the element at a given index of an array. - CHANGE_AT = 84; // ARRAY, NUMBER, DATUM -> ARRAY - // Splice one array in to another array. - SPLICE_AT = 85; // ARRAY, NUMBER, ARRAY -> ARRAY - - // * Type Ops - // Coerces a datum to a named type (e.g. "bool"). - // If you previously used `stream_to_array`, you should use this instead - // with the type "array". - COERCE_TO = 51; // Top, STRING -> Top - // Returns the named type of a datum (e.g. TYPE_OF(true) = "BOOL") - TYPE_OF = 52; // Top -> STRING - - // * Write Ops (the OBJECTs contain data about number of errors etc.) - // Updates all the rows in a selection. Calls its Function with the row - // to be updated, and then merges the result of that call. - UPDATE = 53; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT - // Deletes all the rows in a selection. - DELETE = 54; // StreamSelection, {durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection -> OBJECT - // Replaces all the rows in a selection. Calls its Function with the row - // to be replaced, and then discards it and stores the result of that - // call. - REPLACE = 55; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT - // Inserts into a table. If `conflict` is replace, overwrites - // entries with the same primary key. If `conflict` is - // update, does an update on the entry. If `conflict` is - // error, or is omitted, conflicts will trigger an error. - INSERT = 56; // Table, OBJECT, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT | Table, Sequence, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT - - // * Administrative OPs - // Creates a database with a particular name. - DB_CREATE = 57; // STRING -> OBJECT - // Drops a database with a particular name. - DB_DROP = 58; // STRING -> OBJECT - // Lists all the databases by name. (Takes no arguments) - DB_LIST = 59; // -> ARRAY - // Creates a table with a particular name in a particular - // database. (You may omit the first argument to use the - // default database.) - TABLE_CREATE = 60; // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT - // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT - // STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT - // STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT - // Drops a table with a particular name from a particular - // database. (You may omit the first argument to use the - // default database.) - TABLE_DROP = 61; // Database, STRING -> OBJECT - // STRING -> OBJECT - // Lists all the tables in a particular database. (You may - // omit the first argument to use the default database.) - TABLE_LIST = 62; // Database -> ARRAY - // -> ARRAY - // Returns the row in the `rethinkdb.table_config` or `rethinkdb.db_config` table - // that corresponds to the given database or table. - CONFIG = 174; // Database -> SingleSelection - // Table -> SingleSelection - // Returns the row in the `rethinkdb.table_status` table that corresponds to the - // given table. - STATUS = 175; // Table -> SingleSelection - // Called on a table, waits for that table to be ready for read/write operations. - // Called on a database, waits for all of the tables in the database to be ready. - // Returns the corresponding row or rows from the `rethinkdb.table_status` table. - WAIT = 177; // Table -> OBJECT - // Database -> OBJECT - // Generates a new config for the given table, or all tables in the given database - // The `shards` and `replicas` arguments are required. If `emergency_repair` is - // specified, it will enter a completely different mode of repairing a table - // which has lost half or more of its replicas. - RECONFIGURE = 176; // Database|Table, {shards:NUMBER, replicas:NUMBER [, - // dry_run:BOOLEAN] - // } -> OBJECT - // Database|Table, {shards:NUMBER, replicas:OBJECT [, - // primary_replica_tag:STRING, - // nonvoting_replica_tags:ARRAY, - // dry_run:BOOLEAN] - // } -> OBJECT - // Table, {emergency_repair:STRING, dry_run:BOOLEAN} -> OBJECT - // Balances the table's shards but leaves everything else the same. Can also be - // applied to an entire database at once. - REBALANCE = 179; // Table -> OBJECT - // Database -> OBJECT - - // Ensures that previously issued soft-durability writes are complete and - // written to disk. - SYNC = 138; // Table -> OBJECT - - // Set global, database, or table-specific permissions - GRANT = 188; // -> OBJECT - // Database -> OBJECT - // Table -> OBJECT - - // * Secondary indexes OPs - // Creates a new secondary index with a particular name and definition. - INDEX_CREATE = 75; // Table, STRING, Function(1), {multi:BOOL} -> OBJECT - // Drops a secondary index with a particular name from the specified table. - INDEX_DROP = 76; // Table, STRING -> OBJECT - // Lists all secondary indexes on a particular table. - INDEX_LIST = 77; // Table -> ARRAY - // Gets information about whether or not a set of indexes are ready to - // be accessed. Returns a list of objects that look like this: - // {index:STRING, ready:BOOL[, progress:NUMBER]} - INDEX_STATUS = 139; // Table, STRING... -> ARRAY - // Blocks until a set of indexes are ready to be accessed. Returns the - // same values INDEX_STATUS. - INDEX_WAIT = 140; // Table, STRING... -> ARRAY - // Renames the given index to a new name - INDEX_RENAME = 156; // Table, STRING, STRING, {overwrite:BOOL} -> OBJECT - - // * Control Operators - // Calls a function on data - FUNCALL = 64; // Function(*), DATUM... -> DATUM - // Executes its first argument, and returns its second argument if it - // got [true] or its third argument if it got [false] (like an `if` - // statement). - BRANCH = 65; // BOOL, Top, Top -> Top - // Returns true if any of its arguments returns true (short-circuits). - OR = 66; // BOOL... -> BOOL - // Returns true if all of its arguments return true (short-circuits). - AND = 67; // BOOL... -> BOOL - // Calls its Function with each entry in the sequence - // and executes the array of terms that Function returns. - FOR_EACH = 68; // Sequence, Function(1) -> OBJECT - -//////////////////////////////////////////////////////////////////////////////// -////////// Special Terms -//////////////////////////////////////////////////////////////////////////////// - - // An anonymous function. Takes an array of numbers representing - // variables (see [VAR] above), and a [Term] to execute with those in - // scope. Returns a function that may be passed an array of arguments, - // then executes the Term with those bound to the variable names. The - // user will never construct this directly. We use it internally for - // things like `map` which take a function. The "arity" of a [Function] is - // the number of arguments it takes. - // For example, here's what `_X_.map{|x| x+2}` turns into: - // Term { - // type = MAP; - // args = [_X_, - // Term { - // type = Function; - // args = [Term { - // type = DATUM; - // datum = Datum { - // type = R_ARRAY; - // r_array = [Datum { type = R_NUM; r_num = 1; }]; - // }; - // }, - // Term { - // type = ADD; - // args = [Term { - // type = VAR; - // args = [Term { - // type = DATUM; - // datum = Datum { type = R_NUM; - // r_num = 1}; - // }]; - // }, - // Term { - // type = DATUM; - // datum = Datum { type = R_NUM; r_num = 2; }; - // }]; - // }]; - // }]; - FUNC = 69; // ARRAY, Top -> ARRAY -> Top - - // Indicates to ORDER_BY that this attribute is to be sorted in ascending order. - ASC = 73; // !STRING -> Ordering - // Indicates to ORDER_BY that this attribute is to be sorted in descending order. - DESC = 74; // !STRING -> Ordering - - // Gets info about anything. INFO is most commonly called on tables. - INFO = 79; // Top -> OBJECT - - // `a.match(b)` returns a match object if the string `a` - // matches the regular expression `b`. - MATCH = 97; // STRING, STRING -> DATUM - - // Change the case of a string. - UPCASE = 141; // STRING -> STRING - DOWNCASE = 142; // STRING -> STRING - - // Select a number of elements from sequence with uniform distribution. - SAMPLE = 81; // Sequence, NUMBER -> Sequence - - // Evaluates its first argument. If that argument returns - // NULL or throws an error related to the absence of an - // expected value (for instance, accessing a non-existent - // field or adding NULL to an integer), DEFAULT will either - // return its second argument or execute it if it's a - // function. If the second argument is a function, it will be - // passed either the text of the error or NULL as its - // argument. - DEFAULT = 92; // Top, Top -> Top - - // Parses its first argument as a json string and returns it as a - // datum. - JSON = 98; // STRING -> DATUM - // Returns the datum as a JSON string. - // N.B.: we would really prefer this be named TO_JSON and that exists as - // an alias in Python and JavaScript drivers; however it conflicts with the - // standard `to_json` method defined by Ruby's standard json library. - TO_JSON_STRING = 172; // DATUM -> STRING - - // Parses its first arguments as an ISO 8601 time and returns it as a - // datum. - ISO8601 = 99; // STRING -> PSEUDOTYPE(TIME) - // Prints a time as an ISO 8601 time. - TO_ISO8601 = 100; // PSEUDOTYPE(TIME) -> STRING - - // Returns a time given seconds since epoch in UTC. - EPOCH_TIME = 101; // NUMBER -> PSEUDOTYPE(TIME) - // Returns seconds since epoch in UTC given a time. - TO_EPOCH_TIME = 102; // PSEUDOTYPE(TIME) -> NUMBER - - // The time the query was received by the server. - NOW = 103; // -> PSEUDOTYPE(TIME) - // Puts a time into an ISO 8601 timezone. - IN_TIMEZONE = 104; // PSEUDOTYPE(TIME), STRING -> PSEUDOTYPE(TIME) - // a.during(b, c) returns whether a is in the range [b, c) - DURING = 105; // PSEUDOTYPE(TIME), PSEUDOTYPE(TIME), PSEUDOTYPE(TIME) -> BOOL - // Retrieves the date portion of a time. - DATE = 106; // PSEUDOTYPE(TIME) -> PSEUDOTYPE(TIME) - // x.time_of_day == x.date - x - TIME_OF_DAY = 126; // PSEUDOTYPE(TIME) -> NUMBER - // Returns the timezone of a time. - TIMEZONE = 127; // PSEUDOTYPE(TIME) -> STRING - - // These access the various components of a time. - YEAR = 128; // PSEUDOTYPE(TIME) -> NUMBER - MONTH = 129; // PSEUDOTYPE(TIME) -> NUMBER - DAY = 130; // PSEUDOTYPE(TIME) -> NUMBER - DAY_OF_WEEK = 131; // PSEUDOTYPE(TIME) -> NUMBER - DAY_OF_YEAR = 132; // PSEUDOTYPE(TIME) -> NUMBER - HOURS = 133; // PSEUDOTYPE(TIME) -> NUMBER - MINUTES = 134; // PSEUDOTYPE(TIME) -> NUMBER - SECONDS = 135; // PSEUDOTYPE(TIME) -> NUMBER - - // Construct a time from a date and optional timezone or a - // date+time and optional timezone. - TIME = 136; // NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | - // NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | - - // Constants for ISO 8601 days of the week. - MONDAY = 107; // -> 1 - TUESDAY = 108; // -> 2 - WEDNESDAY = 109; // -> 3 - THURSDAY = 110; // -> 4 - FRIDAY = 111; // -> 5 - SATURDAY = 112; // -> 6 - SUNDAY = 113; // -> 7 - - // Constants for ISO 8601 months. - JANUARY = 114; // -> 1 - FEBRUARY = 115; // -> 2 - MARCH = 116; // -> 3 - APRIL = 117; // -> 4 - MAY = 118; // -> 5 - JUNE = 119; // -> 6 - JULY = 120; // -> 7 - AUGUST = 121; // -> 8 - SEPTEMBER = 122; // -> 9 - OCTOBER = 123; // -> 10 - NOVEMBER = 124; // -> 11 - DECEMBER = 125; // -> 12 - - // Indicates to MERGE to replace, or remove in case of an empty literal, the - // other object rather than merge it. - LITERAL = 137; // -> Merging - // JSON -> Merging - - // SEQUENCE, STRING -> GROUPED_SEQUENCE | SEQUENCE, FUNCTION -> GROUPED_SEQUENCE - GROUP = 144; - SUM = 145; - AVG = 146; - MIN = 147; - MAX = 148; - - // `str.split()` splits on whitespace - // `str.split(" ")` splits on spaces only - // `str.split(" ", 5)` splits on spaces with at most 5 results - // `str.split(nil, 5)` splits on whitespace with at most 5 results - SPLIT = 149; // STRING -> ARRAY | STRING, STRING -> ARRAY | STRING, STRING, NUMBER -> ARRAY | STRING, NULL, NUMBER -> ARRAY - - UNGROUP = 150; // GROUPED_DATA -> ARRAY - - // Takes a range of numbers and returns a random number within the range - RANDOM = 151; // NUMBER, NUMBER {float:BOOL} -> DATUM - - CHANGES = 152; // TABLE -> STREAM - ARGS = 154; // ARRAY -> SPECIAL (used to splice arguments) - - // BINARY is client-only at the moment, it is not supported on the server - BINARY = 155; // STRING -> PSEUDOTYPE(BINARY) - - GEOJSON = 157; // OBJECT -> PSEUDOTYPE(GEOMETRY) - TO_GEOJSON = 158; // PSEUDOTYPE(GEOMETRY) -> OBJECT - POINT = 159; // NUMBER, NUMBER -> PSEUDOTYPE(GEOMETRY) - LINE = 160; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) - POLYGON = 161; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) - DISTANCE = 162; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) {geo_system:STRING, unit:STRING} -> NUMBER - INTERSECTS = 163; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL - INCLUDES = 164; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL - CIRCLE = 165; // PSEUDOTYPE(GEOMETRY), NUMBER {num_vertices:NUMBER, geo_system:STRING, unit:STRING, fill:BOOL} -> PSEUDOTYPE(GEOMETRY) - GET_INTERSECTING = 166; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING} -> StreamSelection - FILL = 167; // PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) - GET_NEAREST = 168; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING, max_results:NUM, max_dist:NUM, geo_system:STRING, unit:STRING} -> ARRAY - POLYGON_SUB = 171; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) - - // Constants for specifying key ranges - MINVAL = 180; - MAXVAL = 181; - } - optional TermType type = 1; - - // This is only used when type is DATUM. - optional Datum datum = 2; - - repeated Term args = 3; // Holds the positional arguments of the query. - message AssocPair { - optional string key = 1; - optional Term val = 2; - } - repeated AssocPair optargs = 4; // Holds the optional arguments of the query. - // (Note that the order of the optional arguments doesn't matter; think of a - // Hash.) -} - -//////////////////////////////////////////////////////////////////////////////// -// EXAMPLE // -//////////////////////////////////////////////////////////////////////////////// -// ```ruby -// r.table('tbl', {:read_mode => 'outdated'}).insert([{:id => 0}, {:id => 1}]) -// ``` -// Would turn into: -// Term { -// type = INSERT; -// args = [Term { -// type = TABLE; -// args = [Term { -// type = DATUM; -// datum = Datum { type = R_STR; r_str = "tbl"; }; -// }]; -// optargs = [["read_mode", -// Term { -// type = DATUM; -// datum = Datum { type = R_STR; r_bool = "outdated"; }; -// }]]; -// }, -// Term { -// type = MAKE_ARRAY; -// args = [Term { -// type = DATUM; -// datum = Datum { type = R_OBJECT; r_object = [["id", 0]]; }; -// }, -// Term { -// type = DATUM; -// datum = Datum { type = R_OBJECT; r_object = [["id", 1]]; }; -// }]; -// }] -// } -// And the server would reply: -// Response { -// type = SUCCESS_ATOM; -// token = 1; -// response = [Datum { type = R_OBJECT; r_object = [["inserted", 2]]; }]; -// } -// Or, if there were an error: -// Response { -// type = RUNTIME_ERROR; -// token = 1; -// response = [Datum { type = R_STR; r_str = "The table `tbl` doesn't exist!"; }]; -// backtrace = [Frame { type = POS; pos = 0; }, Frame { type = POS; pos = 0; }]; -// } diff --git a/ext/librethinkdbxx/src/connection.cc b/ext/librethinkdbxx/src/connection.cc deleted file mode 100644 index 53d106ec..00000000 --- a/ext/librethinkdbxx/src/connection.cc +++ /dev/null @@ -1,434 +0,0 @@ -#include <sys/types.h> -#include <sys/socket.h> -#include <sys/select.h> - -#include <netdb.h> -#include <unistd.h> - -#include <algorithm> -#include <cstring> -#include <cinttypes> -#include <memory> - -#include "connection.h" -#include "connection_p.h" -#include "json_p.h" -#include "exceptions.h" -#include "term.h" -#include "cursor_p.h" - -#include "rapidjson-config.h" -#include "rapidjson/rapidjson.h" -#include "rapidjson/encodedstream.h" -#include "rapidjson/document.h" - -namespace RethinkDB { - -using QueryType = Protocol::Query::QueryType; - -// constants -const int debug_net = 0; -const uint32_t version_magic = - static_cast<uint32_t>(Protocol::VersionDummy::Version::V0_4); -const uint32_t json_magic = - static_cast<uint32_t>(Protocol::VersionDummy::Protocol::JSON); - -std::unique_ptr<Connection> connect(std::string host, int port, std::string auth_key) { - struct addrinfo hints; - memset(&hints, 0, sizeof hints); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - - char port_str[16]; - snprintf(port_str, 16, "%d", port); - struct addrinfo *servinfo; - int ret = getaddrinfo(host.c_str(), port_str, &hints, &servinfo); - if (ret) throw Error("getaddrinfo: %s\n", gai_strerror(ret)); - - struct addrinfo *p; - Error error; - int sockfd; - for (p = servinfo; p != NULL; p = p->ai_next) { - sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); - if (sockfd == -1) { - error = Error::from_errno("socket"); - continue; - } - - if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) { - ::close(sockfd); - error = Error::from_errno("connect"); - continue; - } - - break; - } - - if (p == NULL) { - throw error; - } - - freeaddrinfo(servinfo); - - std::unique_ptr<ConnectionPrivate> conn_private(new ConnectionPrivate(sockfd)); - WriteLock writer(conn_private.get()); - { - size_t size = auth_key.size(); - char buf[12 + size]; - memcpy(buf, &version_magic, 4); - uint32_t n = size; - memcpy(buf + 4, &n, 4); - memcpy(buf + 8, auth_key.data(), size); - memcpy(buf + 8 + size, &json_magic, 4); - writer.send(buf, sizeof buf); - } - - ReadLock reader(conn_private.get()); - { - const size_t max_response_length = 1024; - char buf[max_response_length + 1]; - size_t len = reader.recv_cstring(buf, max_response_length); - if (len == max_response_length || strcmp(buf, "SUCCESS")) { - buf[len] = 0; - ::close(sockfd); - throw Error("Server rejected connection with message: %s", buf); - } - } - - return std::unique_ptr<Connection>(new Connection(conn_private.release())); -} - -Connection::Connection(ConnectionPrivate *dd) : d(dd) { } -Connection::~Connection() { - // close(); - if (d->guarded_sockfd >= 0) - ::close(d->guarded_sockfd); -} - -size_t ReadLock::recv_some(char* buf, size_t size, double wait) { - if (wait != FOREVER) { - while (true) { - fd_set readfds; - struct timeval tv; - - FD_ZERO(&readfds); - FD_SET(conn->guarded_sockfd, &readfds); - - tv.tv_sec = (int)wait; - tv.tv_usec = (int)((wait - (int)wait) / MICROSECOND); - int rv = select(conn->guarded_sockfd + 1, &readfds, NULL, NULL, &tv); - if (rv == -1) { - throw Error::from_errno("select"); - } else if (rv == 0) { - throw TimeoutException(); - } - - if (FD_ISSET(conn->guarded_sockfd, &readfds)) { - break; - } - } - } - - ssize_t numbytes = ::recv(conn->guarded_sockfd, buf, size, 0); - if (numbytes <= 0) throw Error::from_errno("recv"); - if (debug_net > 1) { - fprintf(stderr, "<< %s\n", write_datum(std::string(buf, numbytes)).c_str()); - } - - return numbytes; -} - -void ReadLock::recv(char* buf, size_t size, double wait) { - while (size) { - size_t numbytes = recv_some(buf, size, wait); - - buf += numbytes; - size -= numbytes; - } -} - -size_t ReadLock::recv_cstring(char* buf, size_t max_size){ - size_t size = 0; - for (; size < max_size; size++) { - recv(buf, 1, FOREVER); - if (*buf == 0) { - break; - } - buf++; - } - return size; -} - -void WriteLock::send(const char* buf, size_t size) { - while (size) { - ssize_t numbytes = ::write(conn->guarded_sockfd, buf, size); - if (numbytes == -1) throw Error::from_errno("write"); - if (debug_net > 1) { - fprintf(stderr, ">> %s\n", write_datum(std::string(buf, numbytes)).c_str()); - } - - buf += numbytes; - size -= numbytes; - } -} - -void WriteLock::send(const std::string data) { - send(data.data(), data.size()); -} - -std::string ReadLock::recv(size_t size) { - char buf[size]; - recv(buf, size, FOREVER); - return buf; -} - -void Connection::close() { - CacheLock guard(d.get()); - for (auto& it : d->guarded_cache) { - stop_query(it.first); - } - - int ret = ::close(d->guarded_sockfd); - if (ret == -1) { - throw Error::from_errno("close"); - } - d->guarded_sockfd = -1; -} - -Response ConnectionPrivate::wait_for_response(uint64_t token_want, double wait) { - CacheLock guard(this); - ConnectionPrivate::TokenCache& cache = guarded_cache[token_want]; - - while (true) { - if (!cache.responses.empty()) { - Response response(std::move(cache.responses.front())); - cache.responses.pop(); - if (cache.closed && cache.responses.empty()) { - guarded_cache.erase(token_want); - } - - return response; - } - - if (cache.closed) { - throw Error("Trying to read from a closed token"); - } - - if (guarded_loop_active) { - cache.cond.wait(guard.inner_lock); - } else { - break; - } - } - - ReadLock reader(this); - return reader.read_loop(token_want, std::move(guard), wait); -} - -Response ReadLock::read_loop(uint64_t token_want, CacheLock&& guard, double wait) { - if (!guard.inner_lock) { - guard.lock(); - } - if (conn->guarded_loop_active) { - throw Error("Cannot run more than one read loop on the same connection"); - } - conn->guarded_loop_active = true; - guard.unlock(); - - try { - while (true) { - char buf[12]; - bzero(buf, sizeof(buf)); - recv(buf, 12, wait); - uint64_t token_got; - memcpy(&token_got, buf, 8); - uint32_t length; - memcpy(&length, buf + 8, 4); - - std::unique_ptr<char[]> bufmem(new char[length + 1]); - char *buffer = bufmem.get(); - bzero(buffer, length + 1); - recv(buffer, length, wait); - buffer[length] = '\0'; - - rapidjson::Document json; - json.ParseInsitu(buffer); - if (json.HasParseError()) { - fprintf(stderr, "json parse error, code: %d, position: %d\n", - (int)json.GetParseError(), (int)json.GetErrorOffset()); - } else if (json.IsNull()) { - fprintf(stderr, "null value, read: %s\n", buffer); - } - - Datum datum = read_datum(json); - if (debug_net > 0) { - fprintf(stderr, "[%" PRIu64 "] << %s\n", token_got, write_datum(datum).c_str()); - } - - Response response(std::move(datum)); - - if (token_got == token_want) { - guard.lock(); - if (response.type != Protocol::Response::ResponseType::SUCCESS_PARTIAL) { - auto it = conn->guarded_cache.find(token_got); - if (it != conn->guarded_cache.end()) { - it->second.closed = true; - it->second.cond.notify_all(); - } - conn->guarded_cache.erase(it); - } - conn->guarded_loop_active = false; - for (auto& it : conn->guarded_cache) { - it.second.cond.notify_all(); - } - return response; - } else { - guard.lock(); - auto it = conn->guarded_cache.find(token_got); - if (it == conn->guarded_cache.end()) { - // drop the response - } else if (!it->second.closed) { - it->second.responses.emplace(std::move(response)); - if (response.type != Protocol::Response::ResponseType::SUCCESS_PARTIAL) { - it->second.closed = true; - } - } - it->second.cond.notify_all(); - guard.unlock(); - } - } - } catch (const TimeoutException &e) { - if (!guard.inner_lock){ - guard.lock(); - } - conn->guarded_loop_active = false; - throw e; - } -} - -void ConnectionPrivate::run_query(Query query, bool no_reply) { - WriteLock writer(this); - writer.send(query.serialize()); -} - -Cursor Connection::start_query(Term *term, OptArgs&& opts) { - bool no_reply = false; - auto it = opts.find("noreply"); - if (it != opts.end()) { - no_reply = *(it->second.datum.get_boolean()); - } - - uint64_t token = d->new_token(); - { - CacheLock guard(d.get()); - d->guarded_cache[token]; - } - - d->run_query(Query{QueryType::START, token, term->datum, std::move(opts)}); - if (no_reply) { - return Cursor(new CursorPrivate(token, this, Nil())); - } - - Cursor cursor(new CursorPrivate(token, this)); - Response response = d->wait_for_response(token, FOREVER); - cursor.d->add_response(std::move(response)); - return cursor; -} - -void Connection::stop_query(uint64_t token) { - const auto& it = d->guarded_cache.find(token); - if (it != d->guarded_cache.end() && !it->second.closed) { - d->run_query(Query{QueryType::STOP, token}, true); - } -} - -void Connection::continue_query(uint64_t token) { - d->run_query(Query{QueryType::CONTINUE, token}, true); -} - -Error Response::as_error() { - std::string repr; - if (result.size() == 1) { - std::string* string = result[0].get_string(); - if (string) { - repr = *string; - } else { - repr = write_datum(result[0]); - } - } else { - repr = write_datum(Datum(result)); - } - std::string err; - using RT = Protocol::Response::ResponseType; - using ET = Protocol::Response::ErrorType; - switch (type) { - case RT::SUCCESS_SEQUENCE: err = "unexpected response: SUCCESS_SEQUENCE"; break; - case RT::SUCCESS_PARTIAL: err = "unexpected response: SUCCESS_PARTIAL"; break; - case RT::SUCCESS_ATOM: err = "unexpected response: SUCCESS_ATOM"; break; - case RT::WAIT_COMPLETE: err = "unexpected response: WAIT_COMPLETE"; break; - case RT::SERVER_INFO: err = "unexpected response: SERVER_INFO"; break; - case RT::CLIENT_ERROR: err = "ReqlDriverError"; break; - case RT::COMPILE_ERROR: err = "ReqlCompileError"; break; - case RT::RUNTIME_ERROR: - switch (error_type) { - case ET::INTERNAL: err = "ReqlInternalError"; break; - case ET::RESOURCE_LIMIT: err = "ReqlResourceLimitError"; break; - case ET::QUERY_LOGIC: err = "ReqlQueryLogicError"; break; - case ET::NON_EXISTENCE: err = "ReqlNonExistenceError"; break; - case ET::OP_FAILED: err = "ReqlOpFailedError"; break; - case ET::OP_INDETERMINATE: err = "ReqlOpIndeterminateError"; break; - case ET::USER: err = "ReqlUserError"; break; - case ET::PERMISSION_ERROR: err = "ReqlPermissionError"; break; - default: err = "ReqlRuntimeError"; break; - } - } - throw Error("%s: %s", err.c_str(), repr.c_str()); -} - -Protocol::Response::ResponseType response_type(double t) { - int n = static_cast<int>(t); - using RT = Protocol::Response::ResponseType; - switch (n) { - case static_cast<int>(RT::SUCCESS_ATOM): - return RT::SUCCESS_ATOM; - case static_cast<int>(RT::SUCCESS_SEQUENCE): - return RT::SUCCESS_SEQUENCE; - case static_cast<int>(RT::SUCCESS_PARTIAL): - return RT::SUCCESS_PARTIAL; - case static_cast<int>(RT::WAIT_COMPLETE): - return RT::WAIT_COMPLETE; - case static_cast<int>(RT::CLIENT_ERROR): - return RT::CLIENT_ERROR; - case static_cast<int>(RT::COMPILE_ERROR): - return RT::COMPILE_ERROR; - case static_cast<int>(RT::RUNTIME_ERROR): - return RT::RUNTIME_ERROR; - default: - throw Error("Unknown response type"); - } -} - -Protocol::Response::ErrorType runtime_error_type(double t) { - int n = static_cast<int>(t); - using ET = Protocol::Response::ErrorType; - switch (n) { - case static_cast<int>(ET::INTERNAL): - return ET::INTERNAL; - case static_cast<int>(ET::RESOURCE_LIMIT): - return ET::RESOURCE_LIMIT; - case static_cast<int>(ET::QUERY_LOGIC): - return ET::QUERY_LOGIC; - case static_cast<int>(ET::NON_EXISTENCE): - return ET::NON_EXISTENCE; - case static_cast<int>(ET::OP_FAILED): - return ET::OP_FAILED; - case static_cast<int>(ET::OP_INDETERMINATE): - return ET::OP_INDETERMINATE; - case static_cast<int>(ET::USER): - return ET::USER; - default: - throw Error("Unknown error type"); - } -} - -} diff --git a/ext/librethinkdbxx/src/connection.h b/ext/librethinkdbxx/src/connection.h deleted file mode 100644 index d3882857..00000000 --- a/ext/librethinkdbxx/src/connection.h +++ /dev/null @@ -1,59 +0,0 @@ -#pragma once - -#include <string> -#include <queue> -#include <mutex> -#include <memory> -#include <condition_variable> - -#include "protocol_defs.h" -#include "datum.h" -#include "error.h" - -#define FOREVER (-1) -#define SECOND 1 -#define MICROSECOND 0.000001 - -namespace RethinkDB { - -class Term; -using OptArgs = std::map<std::string, Term>; - -// A connection to a RethinkDB server -// It contains: -// * A socket -// * Read and write locks -// * A cache of responses that have not been read by the corresponding Cursor -class ConnectionPrivate; -class Connection { -public: - Connection() = delete; - Connection(const Connection&) noexcept = delete; - Connection(Connection&&) noexcept = delete; - Connection& operator=(Connection&&) noexcept = delete; - Connection& operator=(const Connection&) noexcept = delete; - ~Connection(); - - void close(); - -private: - explicit Connection(ConnectionPrivate *dd); - std::unique_ptr<ConnectionPrivate> d; - - Cursor start_query(Term *term, OptArgs&& args); - void stop_query(uint64_t); - void continue_query(uint64_t); - - friend class Cursor; - friend class CursorPrivate; - friend class Token; - friend class Term; - friend std::unique_ptr<Connection> - connect(std::string host, int port, std::string auth_key); - -}; - -// $doc(connect) -std::unique_ptr<Connection> connect(std::string host = "localhost", int port = 28015, std::string auth_key = ""); - -} diff --git a/ext/librethinkdbxx/src/connection_p.h b/ext/librethinkdbxx/src/connection_p.h deleted file mode 100644 index d8a95e3c..00000000 --- a/ext/librethinkdbxx/src/connection_p.h +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef CONNECTION_P_H -#define CONNECTION_P_H - -#include <inttypes.h> - -#include "connection.h" -#include "term.h" -#include "json_p.h" - -namespace RethinkDB { - -extern const int debug_net; - -struct Query { - Protocol::Query::QueryType type; - uint64_t token; - Datum term; - OptArgs optArgs; - - std::string serialize() { - Array query_arr{static_cast<double>(type)}; - if (term.is_valid()) query_arr.emplace_back(term); - if (!optArgs.empty()) - query_arr.emplace_back(Term(std::move(optArgs)).datum); - - std::string query_str = write_datum(query_arr); - if (debug_net > 0) { - fprintf(stderr, "[%" PRIu64 "] >> %s\n", token, query_str.c_str()); - } - - char header[12]; - memcpy(header, &token, 8); - uint32_t size = query_str.size(); - memcpy(header + 8, &size, 4); - query_str.insert(0, header, 12); - return query_str; - } -}; - -// Used internally to convert a raw response type into an enum -Protocol::Response::ResponseType response_type(double t); -Protocol::Response::ErrorType runtime_error_type(double t); - -// Contains a response from the server. Use the Cursor class to interact with these responses -class Response { -public: - Response() = delete; - explicit Response(Datum&& datum) : - type(response_type(std::move(datum).extract_field("t").extract_number())), - error_type(datum.get_field("e") ? - runtime_error_type(std::move(datum).extract_field("e").extract_number()) : - Protocol::Response::ErrorType(0)), - result(std::move(datum).extract_field("r").extract_array()) { } - Error as_error(); - Protocol::Response::ResponseType type; - Protocol::Response::ErrorType error_type; - Array result; -}; - -class Token; -class ConnectionPrivate { -public: - ConnectionPrivate(int sockfd) - : guarded_next_token(1), guarded_sockfd(sockfd), guarded_loop_active(false) - { } - - void run_query(Query query, bool no_reply = false); - - Response wait_for_response(uint64_t, double); - uint64_t new_token() { - return guarded_next_token++; - } - - std::mutex read_lock; - std::mutex write_lock; - std::mutex cache_lock; - - struct TokenCache { - bool closed = false; - std::condition_variable cond; - std::queue<Response> responses; - }; - - std::map<uint64_t, TokenCache> guarded_cache; - uint64_t guarded_next_token; - int guarded_sockfd; - bool guarded_loop_active; -}; - -class CacheLock { -public: - CacheLock(ConnectionPrivate* conn) : inner_lock(conn->cache_lock) { } - - void lock() { - inner_lock.lock(); - } - - void unlock() { - inner_lock.unlock(); - } - - std::unique_lock<std::mutex> inner_lock; -}; - -class ReadLock { -public: - ReadLock(ConnectionPrivate* conn_) : lock(conn_->read_lock), conn(conn_) { } - - size_t recv_some(char*, size_t, double wait); - void recv(char*, size_t, double wait); - std::string recv(size_t); - size_t recv_cstring(char*, size_t); - - Response read_loop(uint64_t, CacheLock&&, double); - - std::lock_guard<std::mutex> lock; - ConnectionPrivate* conn; -}; - -class WriteLock { -public: - WriteLock(ConnectionPrivate* conn_) : lock(conn_->write_lock), conn(conn_) { } - - void send(const char*, size_t); - void send(std::string); - - std::lock_guard<std::mutex> lock; - ConnectionPrivate* conn; -}; - -} // namespace RethinkDB - -#endif // CONNECTION_P_H diff --git a/ext/librethinkdbxx/src/cursor.cc b/ext/librethinkdbxx/src/cursor.cc deleted file mode 100644 index df0621eb..00000000 --- a/ext/librethinkdbxx/src/cursor.cc +++ /dev/null @@ -1,223 +0,0 @@ -#include "cursor.h" -#include "cursor_p.h" -#include "exceptions.h" - -namespace RethinkDB { - -// for type completion, in order to forward declare with unique_ptr -Cursor::Cursor(Cursor&&) = default; -Cursor& Cursor::operator=(Cursor&&) = default; - -CursorPrivate::CursorPrivate(uint64_t token_, Connection *conn_) - : single(false), no_more(false), index(0), - token(token_), conn(conn_) -{ } - -CursorPrivate::CursorPrivate(uint64_t token_, Connection *conn_, Datum&& datum) - : single(true), no_more(true), index(0), buffer(Array{std::move(datum)}), - token(token_), conn(conn_) -{ } - -Cursor::Cursor(CursorPrivate *dd) : d(dd) {} - -Cursor::~Cursor() { - try { - if (d && d->conn) { - close(); - } - } catch ( ... ) {} -} - -Datum& Cursor::next(double wait) const { - if (!has_next(wait)) { - throw Error("next: No more data"); - } - - return d->buffer[d->index++]; -} - -Datum& Cursor::peek(double wait) const { - if (!has_next(wait)) { - throw Error("next: No more data"); - } - - return d->buffer[d->index]; -} - -void Cursor::each(std::function<void(Datum&&)> f, double wait) const { - while (has_next(wait)) { - f(std::move(d->buffer[d->index++])); - } -} - -void CursorPrivate::convert_single() const { - if (index != 0) { - throw Error("Cursor: already consumed"); - } - - if (buffer.size() != 1) { - throw Error("Cursor: invalid response from server"); - } - - if (!buffer[0].is_array()) { - throw Error("Cursor: not an array"); - } - - buffer.swap(buffer[0].extract_array()); - single = false; -} - -void CursorPrivate::clear_and_read_all() const { - if (single) { - convert_single(); - } - if (index != 0) { - buffer.erase(buffer.begin(), buffer.begin() + index); - index = 0; - } - while (!no_more) { - add_response(conn->d->wait_for_response(token, FOREVER)); - } -} - -Array&& Cursor::to_array() && { - d->clear_and_read_all(); - return std::move(d->buffer); -} - -Array Cursor::to_array() const & { - d->clear_and_read_all(); - return d->buffer; -} - -Datum Cursor::to_datum() const & { - if (d->single) { - if (d->index != 0) { - throw Error("to_datum: already consumed"); - } - return d->buffer[0]; - } - - d->clear_and_read_all(); - return d->buffer; -} - -Datum Cursor::to_datum() && { - Datum ret((Nil())); - if (d->single) { - if (d->index != 0) { - throw Error("to_datum: already consumed"); - } - ret = std::move(d->buffer[0]); - } else { - d->clear_and_read_all(); - ret = std::move(d->buffer); - } - - return ret; -} - -void Cursor::close() const { - d->conn->stop_query(d->token); - d->no_more = true; -} - -bool Cursor::has_next(double wait) const { - if (d->single) { - d->convert_single(); - } - - while (true) { - if (d->index >= d->buffer.size()) { - if (d->no_more) { - return false; - } - d->add_response(d->conn->d->wait_for_response(d->token, wait)); - } else { - return true; - } - } -} - -bool Cursor::is_single() const { - return d->single; -} - -void CursorPrivate::add_results(Array&& results) const { - if (index >= buffer.size()) { - buffer = std::move(results); - index = 0; - } else { - for (auto& it : results) { - buffer.emplace_back(std::move(it)); - } - } -} - -void CursorPrivate::add_response(Response&& response) const { - using RT = Protocol::Response::ResponseType; - switch (response.type) { - case RT::SUCCESS_SEQUENCE: - add_results(std::move(response.result)); - no_more = true; - break; - case RT::SUCCESS_PARTIAL: - conn->continue_query(token); - add_results(std::move(response.result)); - break; - case RT::SUCCESS_ATOM: - add_results(std::move(response.result)); - single = true; - no_more = true; - break; - case RT::SERVER_INFO: - add_results(std::move(response.result)); - single = true; - no_more = true; - break; - case RT::WAIT_COMPLETE: - case RT::CLIENT_ERROR: - case RT::COMPILE_ERROR: - case RT::RUNTIME_ERROR: - no_more = true; - throw response.as_error(); - } -} - -Cursor::iterator Cursor::begin() { - return iterator(this); -} - -Cursor::iterator Cursor::end() { - return iterator(nullptr); -} - -Cursor::iterator::iterator(Cursor* cursor_) : cursor(cursor_) {} - -Cursor::iterator& Cursor::iterator::operator++ () { - if (cursor == nullptr) { - throw Error("incrementing an exhausted Cursor iterator"); - } - - cursor->next(); - return *this; -} - -Datum& Cursor::iterator::operator* () { - if (cursor == nullptr) { - throw Error("reading from empty Cursor iterator"); - } - - return cursor->peek(); -} - -bool Cursor::iterator::operator!= (const Cursor::iterator& other) const { - if (cursor == other.cursor) { - return false; - } - - return !((cursor == nullptr && !other.cursor->has_next()) || - (other.cursor == nullptr && !cursor->has_next())); -} - -} diff --git a/ext/librethinkdbxx/src/cursor.h b/ext/librethinkdbxx/src/cursor.h deleted file mode 100644 index 60ae1817..00000000 --- a/ext/librethinkdbxx/src/cursor.h +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once - -#include "connection.h" - -namespace RethinkDB { - -// The response from the server, as returned by run. -// The response is either a single datum or a stream: -// * If it is a stream, the cursor represents each element of the stream. -// - Batches are fetched from the server as needed. -// * If it is a single datum, is_single() returns true. -// - If it is an array, the cursor represents each element of that array -// - Otherwise, to_datum() returns the datum and iteration throws an exception. -// The cursor can only be iterated over once, it discards data that has already been read. -class CursorPrivate; -class Cursor { -public: - Cursor() = delete; - ~Cursor(); - - Cursor(Cursor&&); // movable - Cursor& operator=(Cursor&&); - Cursor(const Cursor&) = delete; // not copyable - Cursor& operator=(const Cursor&) = delete; - - // Returned by begin() and end() - class iterator { - public: - iterator(Cursor*); - iterator& operator++ (); - Datum& operator* (); - bool operator!= (const iterator&) const; - - private: - Cursor *cursor; - }; - - // Consume the next element - Datum& next(double wait = FOREVER) const; - - // Peek at the next element - Datum& peek(double wait = FOREVER) const; - - // Call f on every element of the Cursor - void each(std::function<void(Datum&&)> f, double wait = FOREVER) const; - - // Consume and return all elements - Array&& to_array() &&; - - // If is_single(), returns the single datum. Otherwise returns to_array(). - Datum to_datum() &&; - Datum to_datum() const &; - - // Efficiently consume and return all elements - Array to_array() const &; - - // Close the cursor - void close() const; - - // Returns false if there are no more elements - bool has_next(double wait = FOREVER) const; - - // Returns false if the cursor is a stream - bool is_single() const; - - iterator begin(); - iterator end(); - -private: - explicit Cursor(CursorPrivate *dd); - std::unique_ptr<CursorPrivate> d; - - friend class Connection; -}; - -} diff --git a/ext/librethinkdbxx/src/cursor_p.h b/ext/librethinkdbxx/src/cursor_p.h deleted file mode 100644 index ce584cd7..00000000 --- a/ext/librethinkdbxx/src/cursor_p.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef CURSOR_P_H -#define CURSOR_P_H - -#include "connection_p.h" - -namespace RethinkDB { - -class CursorPrivate { -public: - CursorPrivate(uint64_t token, Connection *conn); - CursorPrivate(uint64_t token, Connection *conn, Datum&&); - - void add_response(Response&&) const; - void add_results(Array&&) const; - void clear_and_read_all() const; - void convert_single() const; - - mutable bool single = false; - mutable bool no_more = false; - mutable size_t index = 0; - mutable Array buffer; - - uint64_t token; - Connection *conn; -}; - -} // namespace RethinkDB - -#endif // CURSOR_P_H
\ No newline at end of file diff --git a/ext/librethinkdbxx/src/datum.cc b/ext/librethinkdbxx/src/datum.cc deleted file mode 100644 index e4dbc8dc..00000000 --- a/ext/librethinkdbxx/src/datum.cc +++ /dev/null @@ -1,449 +0,0 @@ -#include <float.h> -#include <cmath> - -#include "datum.h" -#include "json_p.h" -#include "utils.h" -#include "cursor.h" - -#include "rapidjson-config.h" -#include "rapidjson/prettywriter.h" -#include "rapidjson/stringbuffer.h" - -namespace RethinkDB { - -using TT = Protocol::Term::TermType; - -bool Datum::is_nil() const { - return type == Type::NIL; -} - -bool Datum::is_boolean() const { - return type == Type::BOOLEAN; -} - -bool Datum::is_number() const { - return type == Type::NUMBER; -} - -bool Datum::is_string() const { - return type == Type::STRING; -} - -bool Datum::is_object() const { - return type == Type::OBJECT; -} - -bool Datum::is_array() const { - return type == Type::ARRAY; -} - -bool Datum::is_binary() const { - return type == Type::BINARY; -} - -bool Datum::is_time() const { - return type == Type::TIME; -} - -bool* Datum::get_boolean() { - if (type == Type::BOOLEAN) { - return &value.boolean; - } else { - return NULL; - } -} - -const bool* Datum::get_boolean() const { - if (type == Type::BOOLEAN) { - return &value.boolean; - } else { - return NULL; - } -} - -double* Datum::get_number() { - if (type == Type::NUMBER) { - return &value.number; - } else { - return NULL; - } -} - -const double* Datum::get_number() const { - if (type == Type::NUMBER) { - return &value.number; - } else { - return NULL; - } -} - -std::string* Datum::get_string() { - if (type == Type::STRING) { - return &value.string; - } else { - return NULL; - } -} - -const std::string* Datum::get_string() const { - if (type == Type::STRING) { - return &value.string; - } else { - return NULL; - } -} - -Datum* Datum::get_field(std::string key) { - if (type != Type::OBJECT) { - return NULL; - } - auto it = value.object.find(key); - if (it == value.object.end()) { - return NULL; - } - return &it->second; -} - -const Datum* Datum::get_field(std::string key) const { - if (type != Type::OBJECT) { - return NULL; - } - auto it = value.object.find(key); - if (it == value.object.end()) { - return NULL; - } - return &it->second; -} - -Datum* Datum::get_nth(size_t i) { - if (type != Type::ARRAY) { - return NULL; - } - if (i >= value.array.size()) { - return NULL; - } - return &value.array[i]; -} - -const Datum* Datum::get_nth(size_t i) const { - if (type != Type::ARRAY) { - return NULL; - } - if (i >= value.array.size()) { - return NULL; - } - return &value.array[i]; -} - -Object* Datum::get_object() { - if (type == Type::OBJECT) { - return &value.object; - } else { - return NULL; - } -} - -const Object* Datum::get_object() const { - if (type == Type::OBJECT) { - return &value.object; - } else { - return NULL; - } -} - -Array* Datum::get_array() { - if (type == Type::ARRAY) { - return &value.array; - } else { - return NULL; - } -} - -const Array* Datum::get_array() const { - if (type == Type::ARRAY) { - return &value.array; - } else { - return NULL; - } -} - -Binary* Datum::get_binary() { - if (type == Type::BINARY) { - return &value.binary; - } else { - return NULL; - } -} - -const Binary* Datum::get_binary() const { - if (type == Type::BINARY) { - return &value.binary; - } else { - return NULL; - } -} - -Time* Datum::get_time() { - if (type == Type::TIME) { - return &value.time; - } else { - return NULL; - } -} - -const Time* Datum::get_time() const { - if (type == Type::TIME) { - return &value.time; - } else { - return NULL; - } -} - -bool& Datum::extract_boolean() { - if (type != Type::BOOLEAN) { - throw Error("extract_bool: Not a boolean"); - } - return value.boolean; -} - -double& Datum::extract_number() { - if (type != Type::NUMBER) { - throw Error("extract_number: Not a number: %s", write_datum(*this).c_str()); - } - return value.number; -} - -std::string& Datum::extract_string() { - if (type != Type::STRING) { - throw Error("extract_string: Not a string"); - } - return value.string; -} - -Object& Datum::extract_object() { - if (type != Type::OBJECT) { - throw Error("extract_object: Not an object"); - } - return value.object; -} - -Datum& Datum::extract_field(std::string key) { - if (type != Type::OBJECT) { - throw Error("extract_field: Not an object"); - } - auto it = value.object.find(key); - if (it == value.object.end()) { - throw Error("extract_field: No such key in object"); - } - return it->second; -} - -Datum& Datum::extract_nth(size_t i) { - if (type != Type::ARRAY) { - throw Error("extract_nth: Not an array"); - } - if (i >= value.array.size()) { - throw Error("extract_nth: index too large"); - } - return value.array[i]; -} - -Array& Datum::extract_array() { - if (type != Type::ARRAY) { - throw Error("get_array: Not an array"); - } - return value.array; -} - -Binary& Datum::extract_binary() { - if (type != Type::BINARY) { - throw Error("get_binary: Not a binary"); - } - return value.binary; -} - -Time& Datum::extract_time() { - if (type != Type::TIME) { - throw Error("get_time: Not a time"); - } - return value.time; -} - -int Datum::compare(const Datum& other) const { -#define COMPARE(a, b) do { \ - if (a < b) { return -1; } \ - if (a > b) { return 1; } } while(0) -#define COMPARE_OTHER(x) COMPARE(x, other.x) - - COMPARE_OTHER(type); - int c; - switch (type) { - case Type::NIL: case Type::INVALID: break; - case Type::BOOLEAN: COMPARE_OTHER(value.boolean); break; - case Type::NUMBER: COMPARE_OTHER(value.number); break; - case Type::STRING: - c = value.string.compare(other.value.string); - COMPARE(c, 0); - break; - case Type::BINARY: - c = value.binary.data.compare(other.value.binary.data); - COMPARE(c, 0); - break; - case Type::TIME: - COMPARE(value.time.epoch_time, other.value.time.epoch_time); - COMPARE(value.time.utc_offset, other.value.time.utc_offset); - break; - case Type::ARRAY: - COMPARE_OTHER(value.array.size()); - for (size_t i = 0; i < value.array.size(); i++) { - c = value.array[i].compare(other.value.array[i]); - COMPARE(c, 0); - } - break; - case Type::OBJECT: - COMPARE_OTHER(value.object.size()); - for (Object::const_iterator l = value.object.begin(), - r = other.value.object.begin(); - l != value.object.end(); - ++l, ++r) { - COMPARE(l->first, r->first); - c = l->second.compare(r->second); - COMPARE(c, 0); - } - break; - default: - throw Error("cannot compare invalid datum"); - } - return 0; -#undef COMPARE_OTHER -#undef COMPARE -} - -bool Datum::operator== (const Datum& other) const { - return compare(other) == 0; -} - -Datum Datum::from_raw() const { - do { - const Datum* type_field = get_field("$reql_type$"); - if (!type_field) break; - const std::string* type = type_field->get_string(); - if (!type) break;; - if (!strcmp(type->c_str(), "BINARY")) { - const Datum* data_field = get_field("data"); - if (!data_field) break; - const std::string* encoded_data = data_field->get_string(); - if (!encoded_data) break; - Binary binary(""); - if (base64_decode(*encoded_data, binary.data)) { - return binary; - } - } else if (!strcmp(type->c_str(), "TIME")) { - const Datum* epoch_field = get_field("epoch_time"); - if (!epoch_field) break; - const Datum* tz_field = get_field("timezone"); - if (!tz_field) break; - const double* epoch_time = epoch_field->get_number(); - if (!epoch_time) break; - const std::string* tz = tz_field->get_string(); - if (!tz) break; - double offset; - if (!Time::parse_utc_offset(*tz, &offset)) break; - return Time(*epoch_time, offset); - } - } while (0); - return *this; -} - -Datum Datum::to_raw() const { - if (type == Type::BINARY) { - return Object{ - {"$reql_type$", "BINARY"}, - {"data", base64_encode(value.binary.data)}}; - } else if (type == Type::TIME) { - return Object{ - {"$reql_type$", "TIME"}, - {"epoch_time", value.time.epoch_time}, - {"timezone", Time::utc_offset_string(value.time.utc_offset)}}; - } - return *this; -} - -Datum::Datum(Cursor&& cursor) : Datum(cursor.to_datum()) { } -Datum::Datum(const Cursor& cursor) : Datum(cursor.to_datum()) { } - -static const double max_dbl_int = 0x1LL << DBL_MANT_DIG; -static const double min_dbl_int = max_dbl_int * -1; -bool number_as_integer(double d, int64_t *i_out) { - static_assert(DBL_MANT_DIG == 53, "Doubles are wrong size."); - - if (min_dbl_int <= d && d <= max_dbl_int) { - int64_t i = d; - if (static_cast<double>(i) == d) { - *i_out = i; - return true; - } - } - return false; -} - -template void Datum::write_json( - rapidjson::Writer<rapidjson::StringBuffer> *writer) const; -template void Datum::write_json( - rapidjson::PrettyWriter<rapidjson::StringBuffer> *writer) const; - -template <class json_writer_t> -void Datum::write_json(json_writer_t *writer) const { - switch (type) { - case Type::NIL: writer->Null(); break; - case Type::BOOLEAN: writer->Bool(value.boolean); break; - case Type::NUMBER: { - const double d = value.number; - // Always print -0.0 as a double since integers cannot represent -0. - // Otherwise check if the number is an integer and print it as such. - int64_t i; - if (!(d == 0.0 && std::signbit(d)) && number_as_integer(d, &i)) { - writer->Int64(i); - } else { - writer->Double(d); - } - } break; - case Type::STRING: writer->String(value.string.data(), value.string.size()); break; - case Type::ARRAY: { - writer->StartArray(); - for (auto it : value.array) { - it.write_json(writer); - } - writer->EndArray(); - } break; - case Type::OBJECT: { - writer->StartObject(); - for (auto it : value.object) { - writer->Key(it.first.data(), it.first.size()); - it.second.write_json(writer); - } - writer->EndObject(); - } break; - - case Type::BINARY: - case Type::TIME: - to_raw().write_json(writer); - break; - default: - throw Error("cannot write invalid datum"); - } -} - -std::string Datum::as_json() const { - rapidjson::StringBuffer buffer; - rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); - write_json(&writer); - return std::string(buffer.GetString(), buffer.GetSize()); -} - -Datum Datum::from_json(const std::string& json) { - return read_datum(json); -} - -} // namespace RethinkDB diff --git a/ext/librethinkdbxx/src/datum.h b/ext/librethinkdbxx/src/datum.h deleted file mode 100644 index 051e2ca2..00000000 --- a/ext/librethinkdbxx/src/datum.h +++ /dev/null @@ -1,287 +0,0 @@ -#pragma once - -#include <string> -#include <vector> -#include <map> -#include <functional> - -#include "protocol_defs.h" -#include "error.h" -#include "types.h" - -namespace RethinkDB { - -class Cursor; - -// The type of data stored in a RethinkDB database. -// The following JSON types are represented in a Datum as -// * null -> Nil -// * boolean -> bool -// * number -> double -// * unicode strings -> std::string -// * array -> Array (aka std::vector<Datum> -// * object -> Object (aka std::map<std::string, Datum>> -// Datums can also contain one of the following extra types -// * binary strings -> Binary -// * timestamps -> Time -// * points. lines and polygons -> not implemented -class Datum { -public: - Datum() : type(Type::INVALID), value() {} - Datum(Nil) : type(Type::NIL), value() { } - Datum(bool boolean_) : type(Type::BOOLEAN), value(boolean_) { } - Datum(double number_) : type(Type::NUMBER), value(number_) { } - Datum(const std::string& string_) : type(Type::STRING), value(string_) { } - Datum(std::string&& string_) : type(Type::STRING), value(std::move(string_)) { } - Datum(const Array& array_) : type(Type::ARRAY), value(array_) { } - Datum(Array&& array_) : type(Type::ARRAY), value(std::move(array_)) { } - Datum(const Binary& binary) : type(Type::BINARY), value(binary) { } - Datum(Binary&& binary) : type(Type::BINARY), value(std::move(binary)) { } - Datum(const Time time) : type(Type::TIME), value(time) { } - Datum(const Object& object_) : type(Type::OBJECT), value(object_) { } - Datum(Object&& object_) : type(Type::OBJECT), value(std::move(object_)) { } - Datum(const Datum& other) : type(other.type), value(other.type, other.value) { } - Datum(Datum&& other) : type(other.type), value(other.type, std::move(other.value)) { } - - Datum& operator=(const Datum& other) { - value.destroy(type); - type = other.type; - value.set(type, other.value); - return *this; - } - - Datum& operator=(Datum&& other) { - value.destroy(type); - type = other.type; - value.set(type, std::move(other.value)); - return *this; - } - - Datum(unsigned short number_) : Datum(static_cast<double>(number_)) { } - Datum(signed short number_) : Datum(static_cast<double>(number_)) { } - Datum(unsigned int number_) : Datum(static_cast<double>(number_)) { } - Datum(signed int number_) : Datum(static_cast<double>(number_)) { } - Datum(unsigned long number_) : Datum(static_cast<double>(number_)) { } - Datum(signed long number_) : Datum(static_cast<double>(number_)) { } - Datum(unsigned long long number_) : Datum(static_cast<double>(number_)) { } - Datum(signed long long number_) : Datum(static_cast<double>(number_)) { } - - Datum(Protocol::Term::TermType type) : Datum(static_cast<double>(type)) { } - Datum(const char* string) : Datum(static_cast<std::string>(string)) { } - - // Cursors are implicitly converted into datums - Datum(Cursor&&); - Datum(const Cursor&); - - template <class T> - Datum(const std::map<std::string, T>& map) : type(Type::OBJECT), value(Object()) { - for (const auto& it : map) { - value.object.emplace(it.left, Datum(it.right)); - } - } - - template <class T> - Datum(std::map<std::string, T>&& map) : type(Type::OBJECT), value(Object()) { - for (auto& it : map) { - value.object.emplace(it.first, Datum(std::move(it.second))); - } - } - - template <class T> - Datum(const std::vector<T>& vec) : type(Type::ARRAY), value(Array()) { - for (const auto& it : vec) { - value.array.emplace_back(it); - } - } - - template <class T> - Datum(std::vector<T>&& vec) : type(Type::ARRAY), value(Array()) { - for (auto& it : vec) { - value.array.emplace_back(std::move(it)); - } - } - - ~Datum() { - value.destroy(type); - } - - // Apply a visitor - template <class R, class F, class ...A> - R apply(F f, A&& ...args) const & { - switch (type) { - case Type::NIL: return f(Nil(), std::forward<A>(args)...); break; - case Type::BOOLEAN: return f(value.boolean, std::forward<A>(args)...); break; - case Type::NUMBER: return f(value.number, std::forward<A>(args)...); break; - case Type::STRING: return f(value.string, std::forward<A>(args)...); break; - case Type::OBJECT: return f(value.object, std::forward<A>(args)...); break; - case Type::ARRAY: return f(value.array, std::forward<A>(args)...); break; - case Type::BINARY: return f(value.binary, std::forward<A>(args)...); break; - case Type::TIME: return f(value.time, std::forward<A>(args)...); break; - default: - throw Error("internal error: no such datum type %d", static_cast<int>(type)); - } - } - - template <class R, class F, class ...A> - R apply(F f, A&& ...args) && { - switch (type) { - case Type::NIL: return f(Nil(), std::forward<A>(args)...); break; - case Type::BOOLEAN: return f(std::move(value.boolean), std::forward<A>(args)...); break; - case Type::NUMBER: return f(std::move(value.number), std::forward<A>(args)...); break; - case Type::STRING: return f(std::move(value.string), std::forward<A>(args)...); break; - case Type::OBJECT: return f(std::move(value.object), std::forward<A>(args)...); break; - case Type::ARRAY: return f(std::move(value.array), std::forward<A>(args)...); break; - case Type::BINARY: return f(std::move(value.binary), std::forward<A>(args)...); break; - case Type::TIME: return f(std::move(value.time), std::forward<A>(args)...); break; - default: - throw Error("internal error: no such datum type %d", static_cast<int>(type)); - } - } - - bool is_nil() const; - bool is_boolean() const; - bool is_number() const; - bool is_string() const; - bool is_object() const; - bool is_array() const; - bool is_binary() const; - bool is_time() const; - - // get_* returns nullptr if the datum has a different type - - bool* get_boolean(); - const bool* get_boolean() const; - double* get_number(); - const double* get_number() const; - std::string* get_string(); - const std::string* get_string() const; - Object* get_object(); - const Object* get_object() const; - Datum* get_field(std::string); - const Datum* get_field(std::string) const; - Array* get_array(); - const Array* get_array() const; - Datum* get_nth(size_t); - const Datum* get_nth(size_t) const; - Binary* get_binary(); - const Binary* get_binary() const; - Time* get_time(); - const Time* get_time() const; - - // extract_* throws an exception if the types don't match - - bool& extract_boolean(); - double& extract_number(); - std::string& extract_string(); - Object& extract_object(); - Datum& extract_field(std::string); - Array& extract_array(); - Datum& extract_nth(size_t); - Binary& extract_binary(); - Time& extract_time(); - - // negative, zero or positive if this datum is smaller, identical or larger than the other one, respectively - // This is meant to match the results of RethinkDB's comparison operators - int compare(const Datum&) const; - - // Deep equality - bool operator== (const Datum&) const; - - // Recusively replace non-JSON types into objects that represent them - Datum to_raw() const; - - // Recursively replace objects with a $reql_type$ field into the datum they represent - Datum from_raw() const; - - template <class json_writer_t> void write_json(json_writer_t *writer) const; - - std::string as_json() const; - static Datum from_json(const std::string&); - - bool is_valid() const { return type != Type::INVALID; } - -private: - enum class Type { - INVALID, // default constructed - ARRAY, BOOLEAN, NIL, NUMBER, OBJECT, BINARY, STRING, TIME - // POINT, LINE, POLYGON - }; - Type type; - - union datum_value { - bool boolean; - double number; - std::string string; - Object object; - Array array; - Binary binary; - Time time; - - datum_value() { } - datum_value(bool boolean_) : boolean(boolean_) { } - datum_value(double number_) : number(number_) { } - datum_value(const std::string& string_) : string(string_) { } - datum_value(std::string&& string_) : string(std::move(string_)) { } - datum_value(const Object& object_) : object(object_) { } - datum_value(Object&& object_) : object(std::move(object_)) { } - datum_value(const Array& array_) : array(array_) { } - datum_value(Array&& array_) : array(std::move(array_)) { } - datum_value(const Binary& binary_) : binary(binary_) { } - datum_value(Binary&& binary_) : binary(std::move(binary_)) { } - datum_value(Time time) : time(std::move(time)) { } - - datum_value(Type type, const datum_value& other){ - set(type, other); - } - - datum_value(Type type, datum_value&& other){ - set(type, std::move(other)); - } - - void set(Type type, datum_value&& other) { - switch(type){ - case Type::NIL: case Type::INVALID: break; - case Type::BOOLEAN: new (this) bool(other.boolean); break; - case Type::NUMBER: new (this) double(other.number); break; - case Type::STRING: new (this) std::string(std::move(other.string)); break; - case Type::OBJECT: new (this) Object(std::move(other.object)); break; - case Type::ARRAY: new (this) Array(std::move(other.array)); break; - case Type::BINARY: new (this) Binary(std::move(other.binary)); break; - case Type::TIME: new (this) Time(std::move(other.time)); break; - } - } - - void set(Type type, const datum_value& other) { - switch(type){ - case Type::NIL: case Type::INVALID: break; - case Type::BOOLEAN: new (this) bool(other.boolean); break; - case Type::NUMBER: new (this) double(other.number); break; - case Type::STRING: new (this) std::string(other.string); break; - case Type::OBJECT: new (this) Object(other.object); break; - case Type::ARRAY: new (this) Array(other.array); break; - case Type::BINARY: new (this) Binary(other.binary); break; - case Type::TIME: new (this) Time(other.time); break; - } - } - - void destroy(Type type) { - switch(type){ - case Type::INVALID: break; - case Type::NIL: break; - case Type::BOOLEAN: break; - case Type::NUMBER: break; - case Type::STRING: { typedef std::string str; string.~str(); } break; - case Type::OBJECT: object.~Object(); break; - case Type::ARRAY: array.~Array(); break; - case Type::BINARY: binary.~Binary(); break; - case Type::TIME: time.~Time(); break; - } - } - - ~datum_value() { } - }; - - datum_value value; -}; - -} diff --git a/ext/librethinkdbxx/src/error.h b/ext/librethinkdbxx/src/error.h deleted file mode 100644 index ab75e248..00000000 --- a/ext/librethinkdbxx/src/error.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include <cstdarg> -#include <cstring> -#include <string> -#include <cerrno> - -namespace RethinkDB { - -// All errors thrown by the server have this type -struct Error { - template <class ...T> - explicit Error(const char* format_, T... A) { - format(format_, A...); - } - - Error() = default; - Error(Error&&) = default; - Error(const Error&) = default; - - Error& operator= (Error&& other) { - message = std::move(other.message); - return *this; - } - - static Error from_errno(const char* str){ - return Error("%s: %s", str, strerror(errno)); - } - - // The error message - std::string message; - -private: - const size_t max_message_size = 2048; - - void format(const char* format_, ...) { - va_list args; - va_start(args, format_); - char message_[max_message_size]; - vsnprintf(message_, max_message_size, format_, args); - va_end(args); - message = message_; - } -}; - -} diff --git a/ext/librethinkdbxx/src/exceptions.h b/ext/librethinkdbxx/src/exceptions.h deleted file mode 100644 index 08c0b0a0..00000000 --- a/ext/librethinkdbxx/src/exceptions.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef EXCEPTIONS_H -#define EXCEPTIONS_H - -namespace RethinkDB { - -class TimeoutException : public std::exception { -public: - const char *what() const throw () { return "operation timed out"; } -}; - -} - -#endif // EXCEPTIONS_H diff --git a/ext/librethinkdbxx/src/json.cc b/ext/librethinkdbxx/src/json.cc deleted file mode 100644 index c908eefb..00000000 --- a/ext/librethinkdbxx/src/json.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include "json_p.h" -#include "error.h" -#include "utils.h" - -#include "rapidjson-config.h" -#include "rapidjson/document.h" -#include "rapidjson/stringbuffer.h" -#include "rapidjson/writer.h" -#include "rapidjson/prettywriter.h" - -namespace RethinkDB { - -Datum read_datum(const std::string& json) { - rapidjson::Document document; - document.Parse(json); - return read_datum(document); -} - -Datum read_datum(const rapidjson::Value &json) { - switch(json.GetType()) { - case rapidjson::kNullType: return Nil(); - case rapidjson::kFalseType: return false; - case rapidjson::kTrueType: return true; - case rapidjson::kNumberType: return json.GetDouble(); - case rapidjson::kStringType: - return std::string(json.GetString(), json.GetStringLength()); - - case rapidjson::kObjectType: { - Object result; - for (rapidjson::Value::ConstMemberIterator it = json.MemberBegin(); - it != json.MemberEnd(); ++it) { - result.insert(std::make_pair(std::string(it->name.GetString(), - it->name.GetStringLength()), - read_datum(it->value))); - } - - if (result.count("$reql_type$")) - return Datum(std::move(result)).from_raw(); - return std::move(result); - } break; - case rapidjson::kArrayType: { - Array result; - result.reserve(json.Size()); - for (rapidjson::Value::ConstValueIterator it = json.Begin(); - it != json.End(); ++it) { - result.push_back(read_datum(*it)); - } - return std::move(result); - } break; - default: - throw Error("invalid rapidjson value"); - } -} - -std::string write_datum(const Datum& datum) { - rapidjson::StringBuffer buffer; - rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); - datum.write_json(&writer); - return std::string(buffer.GetString(), buffer.GetSize()); -} - -} diff --git a/ext/librethinkdbxx/src/json_p.h b/ext/librethinkdbxx/src/json_p.h deleted file mode 100644 index ebf537a9..00000000 --- a/ext/librethinkdbxx/src/json_p.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "datum.h" - -namespace rapidjson { - class CrtAllocator; - template<typename> struct UTF8; - template <typename, typename> class GenericValue; - template <typename> class MemoryPoolAllocator; - typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value; -} - -namespace RethinkDB { - -Datum read_datum(const std::string&); -Datum read_datum(const rapidjson::Value &json); -std::string write_datum(const Datum&); - -} diff --git a/ext/librethinkdbxx/src/rapidjson-config.h b/ext/librethinkdbxx/src/rapidjson-config.h deleted file mode 100644 index 320c4048..00000000 --- a/ext/librethinkdbxx/src/rapidjson-config.h +++ /dev/null @@ -1,8 +0,0 @@ -#pragma once - -#define RAPIDJSON_HAS_STDSTRING 1 -#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 -#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 -#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1 -#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 -#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseFullPrecisionFlag diff --git a/ext/librethinkdbxx/src/rapidjson/allocators.h b/ext/librethinkdbxx/src/rapidjson/allocators.h deleted file mode 100644 index c7059697..00000000 --- a/ext/librethinkdbxx/src/rapidjson/allocators.h +++ /dev/null @@ -1,263 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ALLOCATORS_H_ -#define RAPIDJSON_ALLOCATORS_H_ - -#include "rapidjson.h" - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// Allocator - -/*! \class rapidjson::Allocator - \brief Concept for allocating, resizing and freeing memory block. - - Note that Malloc() and Realloc() are non-static but Free() is static. - - So if an allocator need to support Free(), it needs to put its pointer in - the header of memory block. - -\code -concept Allocator { - static const bool kNeedFree; //!< Whether this allocator needs to call Free(). - - // Allocate a memory block. - // \param size of the memory block in bytes. - // \returns pointer to the memory block. - void* Malloc(size_t size); - - // Resize a memory block. - // \param originalPtr The pointer to current memory block. Null pointer is permitted. - // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.) - // \param newSize the new size in bytes. - void* Realloc(void* originalPtr, size_t originalSize, size_t newSize); - - // Free a memory block. - // \param pointer to the memory block. Null pointer is permitted. - static void Free(void *ptr); -}; -\endcode -*/ - -/////////////////////////////////////////////////////////////////////////////// -// CrtAllocator - -//! C-runtime library allocator. -/*! This class is just wrapper for standard C library memory routines. - \note implements Allocator concept -*/ -class CrtAllocator { -public: - static const bool kNeedFree = true; - void* Malloc(size_t size) { - if (size) // behavior of malloc(0) is implementation defined. - return std::malloc(size); - else - return NULL; // standardize to returning NULL. - } - void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { - (void)originalSize; - if (newSize == 0) { - std::free(originalPtr); - return NULL; - } - return std::realloc(originalPtr, newSize); - } - static void Free(void *ptr) { std::free(ptr); } -}; - -/////////////////////////////////////////////////////////////////////////////// -// MemoryPoolAllocator - -//! Default memory allocator used by the parser and DOM. -/*! This allocator allocate memory blocks from pre-allocated memory chunks. - - It does not free memory blocks. And Realloc() only allocate new memory. - - The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default. - - User may also supply a buffer as the first chunk. - - If the user-buffer is full then additional chunks are allocated by BaseAllocator. - - The user-buffer is not deallocated by this allocator. - - \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator. - \note implements Allocator concept -*/ -template <typename BaseAllocator = CrtAllocator> -class MemoryPoolAllocator { -public: - static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator) - - //! Constructor with chunkSize. - /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. - \param baseAllocator The allocator for allocating memory chunks. - */ - MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : - chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0) - { - } - - //! Constructor with user-supplied buffer. - /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size. - - The user buffer will not be deallocated when this allocator is destructed. - - \param buffer User supplied buffer. - \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader). - \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. - \param baseAllocator The allocator for allocating memory chunks. - */ - MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : - chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0) - { - RAPIDJSON_ASSERT(buffer != 0); - RAPIDJSON_ASSERT(size > sizeof(ChunkHeader)); - chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer); - chunkHead_->capacity = size - sizeof(ChunkHeader); - chunkHead_->size = 0; - chunkHead_->next = 0; - } - - //! Destructor. - /*! This deallocates all memory chunks, excluding the user-supplied buffer. - */ - ~MemoryPoolAllocator() { - Clear(); - RAPIDJSON_DELETE(ownBaseAllocator_); - } - - //! Deallocates all memory chunks, excluding the user-supplied buffer. - void Clear() { - while (chunkHead_ && chunkHead_ != userBuffer_) { - ChunkHeader* next = chunkHead_->next; - baseAllocator_->Free(chunkHead_); - chunkHead_ = next; - } - if (chunkHead_ && chunkHead_ == userBuffer_) - chunkHead_->size = 0; // Clear user buffer - } - - //! Computes the total capacity of allocated memory chunks. - /*! \return total capacity in bytes. - */ - size_t Capacity() const { - size_t capacity = 0; - for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) - capacity += c->capacity; - return capacity; - } - - //! Computes the memory blocks allocated. - /*! \return total used bytes. - */ - size_t Size() const { - size_t size = 0; - for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) - size += c->size; - return size; - } - - //! Allocates a memory block. (concept Allocator) - void* Malloc(size_t size) { - if (!size) - return NULL; - - size = RAPIDJSON_ALIGN(size); - if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity) - AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size); - - void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size; - chunkHead_->size += size; - return buffer; - } - - //! Resizes a memory block (concept Allocator) - void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { - if (originalPtr == 0) - return Malloc(newSize); - - if (newSize == 0) - return NULL; - - originalSize = RAPIDJSON_ALIGN(originalSize); - newSize = RAPIDJSON_ALIGN(newSize); - - // Do not shrink if new size is smaller than original - if (originalSize >= newSize) - return originalPtr; - - // Simply expand it if it is the last allocation and there is sufficient space - if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { - size_t increment = static_cast<size_t>(newSize - originalSize); - if (chunkHead_->size + increment <= chunkHead_->capacity) { - chunkHead_->size += increment; - return originalPtr; - } - } - - // Realloc process: allocate and copy memory, do not free original buffer. - void* newBuffer = Malloc(newSize); - RAPIDJSON_ASSERT(newBuffer != 0); // Do not handle out-of-memory explicitly. - if (originalSize) - std::memcpy(newBuffer, originalPtr, originalSize); - return newBuffer; - } - - //! Frees a memory block (concept Allocator) - static void Free(void *ptr) { (void)ptr; } // Do nothing - -private: - //! Copy constructor is not permitted. - MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */; - //! Copy assignment operator is not permitted. - MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */; - - //! Creates a new chunk. - /*! \param capacity Capacity of the chunk in bytes. - */ - void AddChunk(size_t capacity) { - if (!baseAllocator_) - ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator()); - ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity)); - chunk->capacity = capacity; - chunk->size = 0; - chunk->next = chunkHead_; - chunkHead_ = chunk; - } - - static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity. - - //! Chunk header for perpending to each chunk. - /*! Chunks are stored as a singly linked list. - */ - struct ChunkHeader { - size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself). - size_t size; //!< Current size of allocated memory in bytes. - ChunkHeader *next; //!< Next chunk in the linked list. - }; - - ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation. - size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated. - void *userBuffer_; //!< User supplied buffer. - BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks. - BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object. -}; - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/document.h b/ext/librethinkdbxx/src/rapidjson/document.h deleted file mode 100644 index f7f846f2..00000000 --- a/ext/librethinkdbxx/src/rapidjson/document.h +++ /dev/null @@ -1,2575 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_DOCUMENT_H_ -#define RAPIDJSON_DOCUMENT_H_ - -/*! \file document.h */ - -#include "reader.h" -#include "internal/meta.h" -#include "internal/strfunc.h" -#include "memorystream.h" -#include "encodedstream.h" -#include <new> // placement new -#include <limits> - -RAPIDJSON_DIAG_PUSH -#ifdef _MSC_VER -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant -RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_OFF(padded) -RAPIDJSON_DIAG_OFF(switch-enum) -RAPIDJSON_DIAG_OFF(c++98-compat) -#endif - -#ifdef __GNUC__ -RAPIDJSON_DIAG_OFF(effc++) -#if __GNUC__ >= 6 -RAPIDJSON_DIAG_OFF(terminate) // ignore throwing RAPIDJSON_ASSERT in RAPIDJSON_NOEXCEPT functions -#endif -#endif // __GNUC__ - -#ifndef RAPIDJSON_NOMEMBERITERATORCLASS -#include <iterator> // std::iterator, std::random_access_iterator_tag -#endif - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS -#include <utility> // std::move -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -// Forward declaration. -template <typename Encoding, typename Allocator> -class GenericValue; - -template <typename Encoding, typename Allocator, typename StackAllocator> -class GenericDocument; - -//! Name-value pair in a JSON object value. -/*! - This class was internal to GenericValue. It used to be a inner struct. - But a compiler (IBM XL C/C++ for AIX) have reported to have problem with that so it moved as a namespace scope struct. - https://code.google.com/p/rapidjson/issues/detail?id=64 -*/ -template <typename Encoding, typename Allocator> -struct GenericMember { - GenericValue<Encoding, Allocator> name; //!< name of member (must be a string) - GenericValue<Encoding, Allocator> value; //!< value of member. -}; - -/////////////////////////////////////////////////////////////////////////////// -// GenericMemberIterator - -#ifndef RAPIDJSON_NOMEMBERITERATORCLASS - -//! (Constant) member iterator for a JSON object value -/*! - \tparam Const Is this a constant iterator? - \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) - \tparam Allocator Allocator type for allocating memory of object, array and string. - - This class implements a Random Access Iterator for GenericMember elements - of a GenericValue, see ISO/IEC 14882:2003(E) C++ standard, 24.1 [lib.iterator.requirements]. - - \note This iterator implementation is mainly intended to avoid implicit - conversions from iterator values to \c NULL, - e.g. from GenericValue::FindMember. - - \note Define \c RAPIDJSON_NOMEMBERITERATORCLASS to fall back to a - pointer-based implementation, if your platform doesn't provide - the C++ <iterator> header. - - \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator - */ -template <bool Const, typename Encoding, typename Allocator> -class GenericMemberIterator - : public std::iterator<std::random_access_iterator_tag - , typename internal::MaybeAddConst<Const,GenericMember<Encoding,Allocator> >::Type> { - - friend class GenericValue<Encoding,Allocator>; - template <bool, typename, typename> friend class GenericMemberIterator; - - typedef GenericMember<Encoding,Allocator> PlainType; - typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType; - typedef std::iterator<std::random_access_iterator_tag,ValueType> BaseType; - -public: - //! Iterator type itself - typedef GenericMemberIterator Iterator; - //! Constant iterator type - typedef GenericMemberIterator<true,Encoding,Allocator> ConstIterator; - //! Non-constant iterator type - typedef GenericMemberIterator<false,Encoding,Allocator> NonConstIterator; - - //! Pointer to (const) GenericMember - typedef typename BaseType::pointer Pointer; - //! Reference to (const) GenericMember - typedef typename BaseType::reference Reference; - //! Signed integer type (e.g. \c ptrdiff_t) - typedef typename BaseType::difference_type DifferenceType; - - //! Default constructor (singular value) - /*! Creates an iterator pointing to no element. - \note All operations, except for comparisons, are undefined on such values. - */ - GenericMemberIterator() : ptr_() {} - - //! Iterator conversions to more const - /*! - \param it (Non-const) iterator to copy from - - Allows the creation of an iterator from another GenericMemberIterator - that is "less const". Especially, creating a non-constant iterator - from a constant iterator are disabled: - \li const -> non-const (not ok) - \li const -> const (ok) - \li non-const -> const (ok) - \li non-const -> non-const (ok) - - \note If the \c Const template parameter is already \c false, this - constructor effectively defines a regular copy-constructor. - Otherwise, the copy constructor is implicitly defined. - */ - GenericMemberIterator(const NonConstIterator & it) : ptr_(it.ptr_) {} - Iterator& operator=(const NonConstIterator & it) { ptr_ = it.ptr_; return *this; } - - //! @name stepping - //@{ - Iterator& operator++(){ ++ptr_; return *this; } - Iterator& operator--(){ --ptr_; return *this; } - Iterator operator++(int){ Iterator old(*this); ++ptr_; return old; } - Iterator operator--(int){ Iterator old(*this); --ptr_; return old; } - //@} - - //! @name increment/decrement - //@{ - Iterator operator+(DifferenceType n) const { return Iterator(ptr_+n); } - Iterator operator-(DifferenceType n) const { return Iterator(ptr_-n); } - - Iterator& operator+=(DifferenceType n) { ptr_+=n; return *this; } - Iterator& operator-=(DifferenceType n) { ptr_-=n; return *this; } - //@} - - //! @name relations - //@{ - bool operator==(ConstIterator that) const { return ptr_ == that.ptr_; } - bool operator!=(ConstIterator that) const { return ptr_ != that.ptr_; } - bool operator<=(ConstIterator that) const { return ptr_ <= that.ptr_; } - bool operator>=(ConstIterator that) const { return ptr_ >= that.ptr_; } - bool operator< (ConstIterator that) const { return ptr_ < that.ptr_; } - bool operator> (ConstIterator that) const { return ptr_ > that.ptr_; } - //@} - - //! @name dereference - //@{ - Reference operator*() const { return *ptr_; } - Pointer operator->() const { return ptr_; } - Reference operator[](DifferenceType n) const { return ptr_[n]; } - //@} - - //! Distance - DifferenceType operator-(ConstIterator that) const { return ptr_-that.ptr_; } - -private: - //! Internal constructor from plain pointer - explicit GenericMemberIterator(Pointer p) : ptr_(p) {} - - Pointer ptr_; //!< raw pointer -}; - -#else // RAPIDJSON_NOMEMBERITERATORCLASS - -// class-based member iterator implementation disabled, use plain pointers - -template <bool Const, typename Encoding, typename Allocator> -struct GenericMemberIterator; - -//! non-const GenericMemberIterator -template <typename Encoding, typename Allocator> -struct GenericMemberIterator<false,Encoding,Allocator> { - //! use plain pointer as iterator type - typedef GenericMember<Encoding,Allocator>* Iterator; -}; -//! const GenericMemberIterator -template <typename Encoding, typename Allocator> -struct GenericMemberIterator<true,Encoding,Allocator> { - //! use plain const pointer as iterator type - typedef const GenericMember<Encoding,Allocator>* Iterator; -}; - -#endif // RAPIDJSON_NOMEMBERITERATORCLASS - -/////////////////////////////////////////////////////////////////////////////// -// GenericStringRef - -//! Reference to a constant string (not taking a copy) -/*! - \tparam CharType character type of the string - - This helper class is used to automatically infer constant string - references for string literals, especially from \c const \b (!) - character arrays. - - The main use is for creating JSON string values without copying the - source string via an \ref Allocator. This requires that the referenced - string pointers have a sufficient lifetime, which exceeds the lifetime - of the associated GenericValue. - - \b Example - \code - Value v("foo"); // ok, no need to copy & calculate length - const char foo[] = "foo"; - v.SetString(foo); // ok - - const char* bar = foo; - // Value x(bar); // not ok, can't rely on bar's lifetime - Value x(StringRef(bar)); // lifetime explicitly guaranteed by user - Value y(StringRef(bar, 3)); // ok, explicitly pass length - \endcode - - \see StringRef, GenericValue::SetString -*/ -template<typename CharType> -struct GenericStringRef { - typedef CharType Ch; //!< character type of the string - - //! Create string reference from \c const character array -#ifndef __clang__ // -Wdocumentation - /*! - This constructor implicitly creates a constant string reference from - a \c const character array. It has better performance than - \ref StringRef(const CharType*) by inferring the string \ref length - from the array length, and also supports strings containing null - characters. - - \tparam N length of the string, automatically inferred - - \param str Constant character array, lifetime assumed to be longer - than the use of the string in e.g. a GenericValue - - \post \ref s == str - - \note Constant complexity. - \note There is a hidden, private overload to disallow references to - non-const character arrays to be created via this constructor. - By this, e.g. function-scope arrays used to be filled via - \c snprintf are excluded from consideration. - In such cases, the referenced string should be \b copied to the - GenericValue instead. - */ -#endif - template<SizeType N> - GenericStringRef(const CharType (&str)[N]) RAPIDJSON_NOEXCEPT - : s(str), length(N-1) {} - - //! Explicitly create string reference from \c const character pointer -#ifndef __clang__ // -Wdocumentation - /*! - This constructor can be used to \b explicitly create a reference to - a constant string pointer. - - \see StringRef(const CharType*) - - \param str Constant character pointer, lifetime assumed to be longer - than the use of the string in e.g. a GenericValue - - \post \ref s == str - - \note There is a hidden, private overload to disallow references to - non-const character arrays to be created via this constructor. - By this, e.g. function-scope arrays used to be filled via - \c snprintf are excluded from consideration. - In such cases, the referenced string should be \b copied to the - GenericValue instead. - */ -#endif - explicit GenericStringRef(const CharType* str) - : s(str), length(internal::StrLen(str)){ RAPIDJSON_ASSERT(s != 0); } - - //! Create constant string reference from pointer and length -#ifndef __clang__ // -Wdocumentation - /*! \param str constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue - \param len length of the string, excluding the trailing NULL terminator - - \post \ref s == str && \ref length == len - \note Constant complexity. - */ -#endif - GenericStringRef(const CharType* str, SizeType len) - : s(str), length(len) { RAPIDJSON_ASSERT(s != 0); } - - GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {} - - GenericStringRef& operator=(const GenericStringRef& rhs) { s = rhs.s; length = rhs.length; } - - //! implicit conversion to plain CharType pointer - operator const Ch *() const { return s; } - - const Ch* const s; //!< plain CharType pointer - const SizeType length; //!< length of the string (excluding the trailing NULL terminator) - -private: - //! Disallow construction from non-const array - template<SizeType N> - GenericStringRef(CharType (&str)[N]) /* = delete */; -}; - -//! Mark a character pointer as constant string -/*! Mark a plain character pointer as a "string literal". This function - can be used to avoid copying a character string to be referenced as a - value in a JSON GenericValue object, if the string's lifetime is known - to be valid long enough. - \tparam CharType Character type of the string - \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue - \return GenericStringRef string reference object - \relatesalso GenericStringRef - - \see GenericValue::GenericValue(StringRefType), GenericValue::operator=(StringRefType), GenericValue::SetString(StringRefType), GenericValue::PushBack(StringRefType, Allocator&), GenericValue::AddMember -*/ -template<typename CharType> -inline GenericStringRef<CharType> StringRef(const CharType* str) { - return GenericStringRef<CharType>(str, internal::StrLen(str)); -} - -//! Mark a character pointer as constant string -/*! Mark a plain character pointer as a "string literal". This function - can be used to avoid copying a character string to be referenced as a - value in a JSON GenericValue object, if the string's lifetime is known - to be valid long enough. - - This version has better performance with supplied length, and also - supports string containing null characters. - - \tparam CharType character type of the string - \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue - \param length The length of source string. - \return GenericStringRef string reference object - \relatesalso GenericStringRef -*/ -template<typename CharType> -inline GenericStringRef<CharType> StringRef(const CharType* str, size_t length) { - return GenericStringRef<CharType>(str, SizeType(length)); -} - -#if RAPIDJSON_HAS_STDSTRING -//! Mark a string object as constant string -/*! Mark a string object (e.g. \c std::string) as a "string literal". - This function can be used to avoid copying a string to be referenced as a - value in a JSON GenericValue object, if the string's lifetime is known - to be valid long enough. - - \tparam CharType character type of the string - \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue - \return GenericStringRef string reference object - \relatesalso GenericStringRef - \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. -*/ -template<typename CharType> -inline GenericStringRef<CharType> StringRef(const std::basic_string<CharType>& str) { - return GenericStringRef<CharType>(str.data(), SizeType(str.size())); -} -#endif - -/////////////////////////////////////////////////////////////////////////////// -// GenericValue type traits -namespace internal { - -template <typename T, typename Encoding = void, typename Allocator = void> -struct IsGenericValueImpl : FalseType {}; - -// select candidates according to nested encoding and allocator types -template <typename T> struct IsGenericValueImpl<T, typename Void<typename T::EncodingType>::Type, typename Void<typename T::AllocatorType>::Type> - : IsBaseOf<GenericValue<typename T::EncodingType, typename T::AllocatorType>, T>::Type {}; - -// helper to match arbitrary GenericValue instantiations, including derived classes -template <typename T> struct IsGenericValue : IsGenericValueImpl<T>::Type {}; - -} // namespace internal - -/////////////////////////////////////////////////////////////////////////////// -// TypeHelper - -namespace internal { - -template <typename ValueType, typename T> -struct TypeHelper {}; - -template<typename ValueType> -struct TypeHelper<ValueType, bool> { - static bool Is(const ValueType& v) { return v.IsBool(); } - static bool Get(const ValueType& v) { return v.GetBool(); } - static ValueType& Set(ValueType& v, bool data) { return v.SetBool(data); } - static ValueType& Set(ValueType& v, bool data, typename ValueType::AllocatorType&) { return v.SetBool(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, int> { - static bool Is(const ValueType& v) { return v.IsInt(); } - static int Get(const ValueType& v) { return v.GetInt(); } - static ValueType& Set(ValueType& v, int data) { return v.SetInt(data); } - static ValueType& Set(ValueType& v, int data, typename ValueType::AllocatorType&) { return v.SetInt(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, unsigned> { - static bool Is(const ValueType& v) { return v.IsUint(); } - static unsigned Get(const ValueType& v) { return v.GetUint(); } - static ValueType& Set(ValueType& v, unsigned data) { return v.SetUint(data); } - static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, int64_t> { - static bool Is(const ValueType& v) { return v.IsInt64(); } - static int64_t Get(const ValueType& v) { return v.GetInt64(); } - static ValueType& Set(ValueType& v, int64_t data) { return v.SetInt64(data); } - static ValueType& Set(ValueType& v, int64_t data, typename ValueType::AllocatorType&) { return v.SetInt64(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, uint64_t> { - static bool Is(const ValueType& v) { return v.IsUint64(); } - static uint64_t Get(const ValueType& v) { return v.GetUint64(); } - static ValueType& Set(ValueType& v, uint64_t data) { return v.SetUint64(data); } - static ValueType& Set(ValueType& v, uint64_t data, typename ValueType::AllocatorType&) { return v.SetUint64(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, double> { - static bool Is(const ValueType& v) { return v.IsDouble(); } - static double Get(const ValueType& v) { return v.GetDouble(); } - static ValueType& Set(ValueType& v, double data) { return v.SetDouble(data); } - static ValueType& Set(ValueType& v, double data, typename ValueType::AllocatorType&) { return v.SetDouble(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, float> { - static bool Is(const ValueType& v) { return v.IsFloat(); } - static float Get(const ValueType& v) { return v.GetFloat(); } - static ValueType& Set(ValueType& v, float data) { return v.SetFloat(data); } - static ValueType& Set(ValueType& v, float data, typename ValueType::AllocatorType&) { return v.SetFloat(data); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, const typename ValueType::Ch*> { - typedef const typename ValueType::Ch* StringType; - static bool Is(const ValueType& v) { return v.IsString(); } - static StringType Get(const ValueType& v) { return v.GetString(); } - static ValueType& Set(ValueType& v, const StringType data) { return v.SetString(typename ValueType::StringRefType(data)); } - static ValueType& Set(ValueType& v, const StringType data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } -}; - -#if RAPIDJSON_HAS_STDSTRING -template<typename ValueType> -struct TypeHelper<ValueType, std::basic_string<typename ValueType::Ch> > { - typedef std::basic_string<typename ValueType::Ch> StringType; - static bool Is(const ValueType& v) { return v.IsString(); } - static StringType Get(const ValueType& v) { return v.GetString(); } - static ValueType& Set(ValueType& v, const StringType& data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } -}; -#endif - -template<typename ValueType> -struct TypeHelper<ValueType, typename ValueType::Array> { - typedef typename ValueType::Array ArrayType; - static bool Is(const ValueType& v) { return v.IsArray(); } - static ArrayType Get(ValueType& v) { return v.GetArray(); } - static ValueType& Set(ValueType& v, ArrayType data) { return v = data; } - static ValueType& Set(ValueType& v, ArrayType data, typename ValueType::AllocatorType&) { return v = data; } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, typename ValueType::ConstArray> { - typedef typename ValueType::ConstArray ArrayType; - static bool Is(const ValueType& v) { return v.IsArray(); } - static ArrayType Get(const ValueType& v) { return v.GetArray(); } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, typename ValueType::Object> { - typedef typename ValueType::Object ObjectType; - static bool Is(const ValueType& v) { return v.IsObject(); } - static ObjectType Get(ValueType& v) { return v.GetObject(); } - static ValueType& Set(ValueType& v, ObjectType data) { return v = data; } - static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { v = data; } -}; - -template<typename ValueType> -struct TypeHelper<ValueType, typename ValueType::ConstObject> { - typedef typename ValueType::ConstObject ObjectType; - static bool Is(const ValueType& v) { return v.IsObject(); } - static ObjectType Get(const ValueType& v) { return v.GetObject(); } -}; - -} // namespace internal - -// Forward declarations -template <bool, typename> class GenericArray; -template <bool, typename> class GenericObject; - -/////////////////////////////////////////////////////////////////////////////// -// GenericValue - -//! Represents a JSON value. Use Value for UTF8 encoding and default allocator. -/*! - A JSON value can be one of 7 types. This class is a variant type supporting - these types. - - Use the Value if UTF8 and default allocator - - \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) - \tparam Allocator Allocator type for allocating memory of object, array and string. -*/ -template <typename Encoding, typename Allocator = MemoryPoolAllocator<> > -class GenericValue { -public: - //! Name-value pair in an object. - typedef GenericMember<Encoding, Allocator> Member; - typedef Encoding EncodingType; //!< Encoding type from template parameter. - typedef Allocator AllocatorType; //!< Allocator type from template parameter. - typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. - typedef GenericStringRef<Ch> StringRefType; //!< Reference to a constant string - typedef typename GenericMemberIterator<false,Encoding,Allocator>::Iterator MemberIterator; //!< Member iterator for iterating in object. - typedef typename GenericMemberIterator<true,Encoding,Allocator>::Iterator ConstMemberIterator; //!< Constant member iterator for iterating in object. - typedef GenericValue* ValueIterator; //!< Value iterator for iterating in array. - typedef const GenericValue* ConstValueIterator; //!< Constant value iterator for iterating in array. - typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of itself. - typedef GenericArray<false, ValueType> Array; - typedef GenericArray<true, ValueType> ConstArray; - typedef GenericObject<false, ValueType> Object; - typedef GenericObject<true, ValueType> ConstObject; - - //!@name Constructors and destructor. - //@{ - - //! Default constructor creates a null value. - GenericValue() RAPIDJSON_NOEXCEPT : data_() { data_.f.flags = kNullFlag; } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Move constructor in C++11 - GenericValue(GenericValue&& rhs) RAPIDJSON_NOEXCEPT : data_(rhs.data_) { - rhs.data_.f.flags = kNullFlag; // give up contents - } -#endif - -private: - //! Copy constructor is not permitted. - GenericValue(const GenericValue& rhs); - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Moving from a GenericDocument is not permitted. - template <typename StackAllocator> - GenericValue(GenericDocument<Encoding,Allocator,StackAllocator>&& rhs); - - //! Move assignment from a GenericDocument is not permitted. - template <typename StackAllocator> - GenericValue& operator=(GenericDocument<Encoding,Allocator,StackAllocator>&& rhs); -#endif - -public: - - //! Constructor with JSON value type. - /*! This creates a Value of specified type with default content. - \param type Type of the value. - \note Default content for number is zero. - */ - explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() { - static const uint16_t defaultFlags[7] = { - kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag, - kNumberAnyFlag - }; - RAPIDJSON_ASSERT(type <= kNumberType); - data_.f.flags = defaultFlags[type]; - - // Use ShortString to store empty string. - if (type == kStringType) - data_.ss.SetLength(0); - } - - //! Explicit copy constructor (with allocator) - /*! Creates a copy of a Value by using the given Allocator - \tparam SourceAllocator allocator of \c rhs - \param rhs Value to copy from (read-only) - \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator(). - \see CopyFrom() - */ - template< typename SourceAllocator > - GenericValue(const GenericValue<Encoding, SourceAllocator>& rhs, Allocator & allocator); - - //! Constructor for boolean value. - /*! \param b Boolean value - \note This constructor is limited to \em real boolean values and rejects - implicitly converted types like arbitrary pointers. Use an explicit cast - to \c bool, if you want to construct a boolean JSON value in such cases. - */ -#ifndef RAPIDJSON_DOXYGEN_RUNNING // hide SFINAE from Doxygen - template <typename T> - explicit GenericValue(T b, RAPIDJSON_ENABLEIF((internal::IsSame<bool, T>))) RAPIDJSON_NOEXCEPT // See #472 -#else - explicit GenericValue(bool b) RAPIDJSON_NOEXCEPT -#endif - : data_() { - // safe-guard against failing SFINAE - RAPIDJSON_STATIC_ASSERT((internal::IsSame<bool,T>::Value)); - data_.f.flags = b ? kTrueFlag : kFalseFlag; - } - - //! Constructor for int value. - explicit GenericValue(int i) RAPIDJSON_NOEXCEPT : data_() { - data_.n.i64 = i; - data_.f.flags = (i >= 0) ? (kNumberIntFlag | kUintFlag | kUint64Flag) : kNumberIntFlag; - } - - //! Constructor for unsigned value. - explicit GenericValue(unsigned u) RAPIDJSON_NOEXCEPT : data_() { - data_.n.u64 = u; - data_.f.flags = (u & 0x80000000) ? kNumberUintFlag : (kNumberUintFlag | kIntFlag | kInt64Flag); - } - - //! Constructor for int64_t value. - explicit GenericValue(int64_t i64) RAPIDJSON_NOEXCEPT : data_() { - data_.n.i64 = i64; - data_.f.flags = kNumberInt64Flag; - if (i64 >= 0) { - data_.f.flags |= kNumberUint64Flag; - if (!(static_cast<uint64_t>(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) - data_.f.flags |= kUintFlag; - if (!(static_cast<uint64_t>(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) - data_.f.flags |= kIntFlag; - } - else if (i64 >= static_cast<int64_t>(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) - data_.f.flags |= kIntFlag; - } - - //! Constructor for uint64_t value. - explicit GenericValue(uint64_t u64) RAPIDJSON_NOEXCEPT : data_() { - data_.n.u64 = u64; - data_.f.flags = kNumberUint64Flag; - if (!(u64 & RAPIDJSON_UINT64_C2(0x80000000, 0x00000000))) - data_.f.flags |= kInt64Flag; - if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) - data_.f.flags |= kUintFlag; - if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) - data_.f.flags |= kIntFlag; - } - - //! Constructor for double value. - explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; } - - //! Constructor for constant string (i.e. do not make a copy of string) - GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); } - - //! Constructor for constant string (i.e. do not make a copy of string) - explicit GenericValue(StringRefType s) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(s); } - - //! Constructor for copy-string (i.e. do make a copy of string) - GenericValue(const Ch* s, SizeType length, Allocator& allocator) : data_() { SetStringRaw(StringRef(s, length), allocator); } - - //! Constructor for copy-string (i.e. do make a copy of string) - GenericValue(const Ch*s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } - -#if RAPIDJSON_HAS_STDSTRING - //! Constructor for copy-string from a string object (i.e. do make a copy of string) - /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. - */ - GenericValue(const std::basic_string<Ch>& s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } -#endif - - //! Constructor for Array. - /*! - \param a An array obtained by \c GetArray(). - \note \c Array is always pass-by-value. - \note the source array is moved into this value and the sourec array becomes empty. - */ - GenericValue(Array a) RAPIDJSON_NOEXCEPT : data_(a.value_.data_) { - a.value_.data_ = Data(); - a.value_.data_.f.flags = kArrayFlag; - } - - //! Constructor for Object. - /*! - \param o An object obtained by \c GetObject(). - \note \c Object is always pass-by-value. - \note the source object is moved into this value and the sourec object becomes empty. - */ - GenericValue(Object o) RAPIDJSON_NOEXCEPT : data_(o.value_.data_) { - o.value_.data_ = Data(); - o.value_.data_.f.flags = kObjectFlag; - } - - //! Destructor. - /*! Need to destruct elements of array, members of object, or copy-string. - */ - ~GenericValue() { - if (Allocator::kNeedFree) { // Shortcut by Allocator's trait - switch(data_.f.flags) { - case kArrayFlag: - { - GenericValue* e = GetElementsPointer(); - for (GenericValue* v = e; v != e + data_.a.size; ++v) - v->~GenericValue(); - Allocator::Free(e); - } - break; - - case kObjectFlag: - for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) - m->~Member(); - Allocator::Free(GetMembersPointer()); - break; - - case kCopyStringFlag: - Allocator::Free(const_cast<Ch*>(GetStringPointer())); - break; - - default: - break; // Do nothing for other types. - } - } - } - - //@} - - //!@name Assignment operators - //@{ - - //! Assignment with move semantics. - /*! \param rhs Source of the assignment. It will become a null value after assignment. - */ - GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT { - RAPIDJSON_ASSERT(this != &rhs); - this->~GenericValue(); - RawAssign(rhs); - return *this; - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Move assignment in C++11 - GenericValue& operator=(GenericValue&& rhs) RAPIDJSON_NOEXCEPT { - return *this = rhs.Move(); - } -#endif - - //! Assignment of constant string reference (no copy) - /*! \param str Constant string reference to be assigned - \note This overload is needed to avoid clashes with the generic primitive type assignment overload below. - \see GenericStringRef, operator=(T) - */ - GenericValue& operator=(StringRefType str) RAPIDJSON_NOEXCEPT { - GenericValue s(str); - return *this = s; - } - - //! Assignment with primitive types. - /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t - \param value The value to be assigned. - - \note The source type \c T explicitly disallows all pointer types, - especially (\c const) \ref Ch*. This helps avoiding implicitly - referencing character strings with insufficient lifetime, use - \ref SetString(const Ch*, Allocator&) (for copying) or - \ref StringRef() (to explicitly mark the pointer as constant) instead. - All other pointer types would implicitly convert to \c bool, - use \ref SetBool() instead. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::IsPointer<T>), (GenericValue&)) - operator=(T value) { - GenericValue v(value); - return *this = v; - } - - //! Deep-copy assignment from Value - /*! Assigns a \b copy of the Value to the current Value object - \tparam SourceAllocator Allocator type of \c rhs - \param rhs Value to copy from (read-only) - \param allocator Allocator to use for copying - */ - template <typename SourceAllocator> - GenericValue& CopyFrom(const GenericValue<Encoding, SourceAllocator>& rhs, Allocator& allocator) { - RAPIDJSON_ASSERT(static_cast<void*>(this) != static_cast<void const*>(&rhs)); - this->~GenericValue(); - new (this) GenericValue(rhs, allocator); - return *this; - } - - //! Exchange the contents of this value with those of other. - /*! - \param other Another value. - \note Constant complexity. - */ - GenericValue& Swap(GenericValue& other) RAPIDJSON_NOEXCEPT { - GenericValue temp; - temp.RawAssign(*this); - RawAssign(other); - other.RawAssign(temp); - return *this; - } - - //! free-standing swap function helper - /*! - Helper function to enable support for common swap implementation pattern based on \c std::swap: - \code - void swap(MyClass& a, MyClass& b) { - using std::swap; - swap(a.value, b.value); - // ... - } - \endcode - \see Swap() - */ - friend inline void swap(GenericValue& a, GenericValue& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } - - //! Prepare Value for move semantics - /*! \return *this */ - GenericValue& Move() RAPIDJSON_NOEXCEPT { return *this; } - //@} - - //!@name Equal-to and not-equal-to operators - //@{ - //! Equal-to operator - /*! - \note If an object contains duplicated named member, comparing equality with any object is always \c false. - \note Linear time complexity (number of all values in the subtree and total lengths of all strings). - */ - template <typename SourceAllocator> - bool operator==(const GenericValue<Encoding, SourceAllocator>& rhs) const { - typedef GenericValue<Encoding, SourceAllocator> RhsType; - if (GetType() != rhs.GetType()) - return false; - - switch (GetType()) { - case kObjectType: // Warning: O(n^2) inner-loop - if (data_.o.size != rhs.data_.o.size) - return false; - for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) { - typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name); - if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value) - return false; - } - return true; - - case kArrayType: - if (data_.a.size != rhs.data_.a.size) - return false; - for (SizeType i = 0; i < data_.a.size; i++) - if ((*this)[i] != rhs[i]) - return false; - return true; - - case kStringType: - return StringEqual(rhs); - - case kNumberType: - if (IsDouble() || rhs.IsDouble()) { - double a = GetDouble(); // May convert from integer to double. - double b = rhs.GetDouble(); // Ditto - return a >= b && a <= b; // Prevent -Wfloat-equal - } - else - return data_.n.u64 == rhs.data_.n.u64; - - default: - return true; - } - } - - //! Equal-to operator with const C-string pointer - bool operator==(const Ch* rhs) const { return *this == GenericValue(StringRef(rhs)); } - -#if RAPIDJSON_HAS_STDSTRING - //! Equal-to operator with string object - /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. - */ - bool operator==(const std::basic_string<Ch>& rhs) const { return *this == GenericValue(StringRef(rhs)); } -#endif - - //! Equal-to operator with primitive types - /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c true, \c false - */ - template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>,internal::IsGenericValue<T> >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); } - - //! Not-equal-to operator - /*! \return !(*this == rhs) - */ - template <typename SourceAllocator> - bool operator!=(const GenericValue<Encoding, SourceAllocator>& rhs) const { return !(*this == rhs); } - - //! Not-equal-to operator with const C-string pointer - bool operator!=(const Ch* rhs) const { return !(*this == rhs); } - - //! Not-equal-to operator with arbitrary types - /*! \return !(*this == rhs) - */ - template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); } - - //! Equal-to operator with arbitrary types (symmetric version) - /*! \return (rhs == lhs) - */ - template <typename T> friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator==(const T& lhs, const GenericValue& rhs) { return rhs == lhs; } - - //! Not-Equal-to operator with arbitrary types (symmetric version) - /*! \return !(rhs == lhs) - */ - template <typename T> friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); } - //@} - - //!@name Type - //@{ - - Type GetType() const { return static_cast<Type>(data_.f.flags & kTypeMask); } - bool IsNull() const { return data_.f.flags == kNullFlag; } - bool IsFalse() const { return data_.f.flags == kFalseFlag; } - bool IsTrue() const { return data_.f.flags == kTrueFlag; } - bool IsBool() const { return (data_.f.flags & kBoolFlag) != 0; } - bool IsObject() const { return data_.f.flags == kObjectFlag; } - bool IsArray() const { return data_.f.flags == kArrayFlag; } - bool IsNumber() const { return (data_.f.flags & kNumberFlag) != 0; } - bool IsInt() const { return (data_.f.flags & kIntFlag) != 0; } - bool IsUint() const { return (data_.f.flags & kUintFlag) != 0; } - bool IsInt64() const { return (data_.f.flags & kInt64Flag) != 0; } - bool IsUint64() const { return (data_.f.flags & kUint64Flag) != 0; } - bool IsDouble() const { return (data_.f.flags & kDoubleFlag) != 0; } - bool IsString() const { return (data_.f.flags & kStringFlag) != 0; } - - // Checks whether a number can be losslessly converted to a double. - bool IsLosslessDouble() const { - if (!IsNumber()) return false; - if (IsUint64()) { - uint64_t u = GetUint64(); - volatile double d = static_cast<double>(u); - return (d >= 0.0) - && (d < static_cast<double>(std::numeric_limits<uint64_t>::max())) - && (u == static_cast<uint64_t>(d)); - } - if (IsInt64()) { - int64_t i = GetInt64(); - volatile double d = static_cast<double>(i); - return (d >= static_cast<double>(std::numeric_limits<int64_t>::min())) - && (d < static_cast<double>(std::numeric_limits<int64_t>::max())) - && (i == static_cast<int64_t>(d)); - } - return true; // double, int, uint are always lossless - } - - // Checks whether a number is a float (possible lossy). - bool IsFloat() const { - if ((data_.f.flags & kDoubleFlag) == 0) - return false; - double d = GetDouble(); - return d >= -3.4028234e38 && d <= 3.4028234e38; - } - // Checks whether a number can be losslessly converted to a float. - bool IsLosslessFloat() const { - if (!IsNumber()) return false; - double a = GetDouble(); - if (a < static_cast<double>(-std::numeric_limits<float>::max()) - || a > static_cast<double>(std::numeric_limits<float>::max())) - return false; - double b = static_cast<double>(static_cast<float>(a)); - return a >= b && a <= b; // Prevent -Wfloat-equal - } - - //@} - - //!@name Null - //@{ - - GenericValue& SetNull() { this->~GenericValue(); new (this) GenericValue(); return *this; } - - //@} - - //!@name Bool - //@{ - - bool GetBool() const { RAPIDJSON_ASSERT(IsBool()); return data_.f.flags == kTrueFlag; } - //!< Set boolean value - /*! \post IsBool() == true */ - GenericValue& SetBool(bool b) { this->~GenericValue(); new (this) GenericValue(b); return *this; } - - //@} - - //!@name Object - //@{ - - //! Set this value as an empty object. - /*! \post IsObject() == true */ - GenericValue& SetObject() { this->~GenericValue(); new (this) GenericValue(kObjectType); return *this; } - - //! Get the number of members in the object. - SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; } - - //! Check whether the object is empty. - bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; } - - //! Get a value from an object associated with the name. - /*! \pre IsObject() == true - \tparam T Either \c Ch or \c const \c Ch (template used for disambiguation with \ref operator[](SizeType)) - \note In version 0.1x, if the member is not found, this function returns a null value. This makes issue 7. - Since 0.2, if the name is not correct, it will assert. - If user is unsure whether a member exists, user should use HasMember() first. - A better approach is to use FindMember(). - \note Linear time complexity. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >),(GenericValue&)) operator[](T* name) { - GenericValue n(StringRef(name)); - return (*this)[n]; - } - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >),(const GenericValue&)) operator[](T* name) const { return const_cast<GenericValue&>(*this)[name]; } - - //! Get a value from an object associated with the name. - /*! \pre IsObject() == true - \tparam SourceAllocator Allocator of the \c name value - - \note Compared to \ref operator[](T*), this version is faster because it does not need a StrLen(). - And it can also handle strings with embedded null characters. - - \note Linear time complexity. - */ - template <typename SourceAllocator> - GenericValue& operator[](const GenericValue<Encoding, SourceAllocator>& name) { - MemberIterator member = FindMember(name); - if (member != MemberEnd()) - return member->value; - else { - RAPIDJSON_ASSERT(false); // see above note - - // This will generate -Wexit-time-destructors in clang - // static GenericValue NullValue; - // return NullValue; - - // Use static buffer and placement-new to prevent destruction - static char buffer[sizeof(GenericValue)]; - return *new (buffer) GenericValue(); - } - } - template <typename SourceAllocator> - const GenericValue& operator[](const GenericValue<Encoding, SourceAllocator>& name) const { return const_cast<GenericValue&>(*this)[name]; } - -#if RAPIDJSON_HAS_STDSTRING - //! Get a value from an object associated with name (string object). - GenericValue& operator[](const std::basic_string<Ch>& name) { return (*this)[GenericValue(StringRef(name))]; } - const GenericValue& operator[](const std::basic_string<Ch>& name) const { return (*this)[GenericValue(StringRef(name))]; } -#endif - - //! Const member iterator - /*! \pre IsObject() == true */ - ConstMemberIterator MemberBegin() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer()); } - //! Const \em past-the-end member iterator - /*! \pre IsObject() == true */ - ConstMemberIterator MemberEnd() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer() + data_.o.size); } - //! Member iterator - /*! \pre IsObject() == true */ - MemberIterator MemberBegin() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer()); } - //! \em Past-the-end member iterator - /*! \pre IsObject() == true */ - MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); } - - //! Check whether a member exists in the object. - /*! - \param name Member name to be searched. - \pre IsObject() == true - \return Whether a member with that name exists. - \note It is better to use FindMember() directly if you need the obtain the value as well. - \note Linear time complexity. - */ - bool HasMember(const Ch* name) const { return FindMember(name) != MemberEnd(); } - -#if RAPIDJSON_HAS_STDSTRING - //! Check whether a member exists in the object with string object. - /*! - \param name Member name to be searched. - \pre IsObject() == true - \return Whether a member with that name exists. - \note It is better to use FindMember() directly if you need the obtain the value as well. - \note Linear time complexity. - */ - bool HasMember(const std::basic_string<Ch>& name) const { return FindMember(name) != MemberEnd(); } -#endif - - //! Check whether a member exists in the object with GenericValue name. - /*! - This version is faster because it does not need a StrLen(). It can also handle string with null character. - \param name Member name to be searched. - \pre IsObject() == true - \return Whether a member with that name exists. - \note It is better to use FindMember() directly if you need the obtain the value as well. - \note Linear time complexity. - */ - template <typename SourceAllocator> - bool HasMember(const GenericValue<Encoding, SourceAllocator>& name) const { return FindMember(name) != MemberEnd(); } - - //! Find member by name. - /*! - \param name Member name to be searched. - \pre IsObject() == true - \return Iterator to member, if it exists. - Otherwise returns \ref MemberEnd(). - - \note Earlier versions of Rapidjson returned a \c NULL pointer, in case - the requested member doesn't exist. For consistency with e.g. - \c std::map, this has been changed to MemberEnd() now. - \note Linear time complexity. - */ - MemberIterator FindMember(const Ch* name) { - GenericValue n(StringRef(name)); - return FindMember(n); - } - - ConstMemberIterator FindMember(const Ch* name) const { return const_cast<GenericValue&>(*this).FindMember(name); } - - //! Find member by name. - /*! - This version is faster because it does not need a StrLen(). It can also handle string with null character. - \param name Member name to be searched. - \pre IsObject() == true - \return Iterator to member, if it exists. - Otherwise returns \ref MemberEnd(). - - \note Earlier versions of Rapidjson returned a \c NULL pointer, in case - the requested member doesn't exist. For consistency with e.g. - \c std::map, this has been changed to MemberEnd() now. - \note Linear time complexity. - */ - template <typename SourceAllocator> - MemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) { - RAPIDJSON_ASSERT(IsObject()); - RAPIDJSON_ASSERT(name.IsString()); - MemberIterator member = MemberBegin(); - for ( ; member != MemberEnd(); ++member) - if (name.StringEqual(member->name)) - break; - return member; - } - template <typename SourceAllocator> ConstMemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) const { return const_cast<GenericValue&>(*this).FindMember(name); } - -#if RAPIDJSON_HAS_STDSTRING - //! Find member by string object name. - /*! - \param name Member name to be searched. - \pre IsObject() == true - \return Iterator to member, if it exists. - Otherwise returns \ref MemberEnd(). - */ - MemberIterator FindMember(const std::basic_string<Ch>& name) { return FindMember(StringRef(name)); } - ConstMemberIterator FindMember(const std::basic_string<Ch>& name) const { return FindMember(StringRef(name)); } -#endif - - //! Add a member (name-value pair) to the object. - /*! \param name A string value as name of member. - \param value Value of any type. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \note The ownership of \c name and \c value will be transferred to this object on success. - \pre IsObject() && name.IsString() - \post name.IsNull() && value.IsNull() - \note Amortized Constant time complexity. - */ - GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) { - RAPIDJSON_ASSERT(IsObject()); - RAPIDJSON_ASSERT(name.IsString()); - - ObjectData& o = data_.o; - if (o.size >= o.capacity) { - if (o.capacity == 0) { - o.capacity = kDefaultObjectCapacity; - SetMembersPointer(reinterpret_cast<Member*>(allocator.Malloc(o.capacity * sizeof(Member)))); - } - else { - SizeType oldCapacity = o.capacity; - o.capacity += (oldCapacity + 1) / 2; // grow by factor 1.5 - SetMembersPointer(reinterpret_cast<Member*>(allocator.Realloc(GetMembersPointer(), oldCapacity * sizeof(Member), o.capacity * sizeof(Member)))); - } - } - Member* members = GetMembersPointer(); - members[o.size].name.RawAssign(name); - members[o.size].value.RawAssign(value); - o.size++; - return *this; - } - - //! Add a constant string value as member (name-value pair) to the object. - /*! \param name A string value as name of member. - \param value constant string reference as value of member. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \pre IsObject() - \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. - \note Amortized Constant time complexity. - */ - GenericValue& AddMember(GenericValue& name, StringRefType value, Allocator& allocator) { - GenericValue v(value); - return AddMember(name, v, allocator); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Add a string object as member (name-value pair) to the object. - /*! \param name A string value as name of member. - \param value constant string reference as value of member. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \pre IsObject() - \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. - \note Amortized Constant time complexity. - */ - GenericValue& AddMember(GenericValue& name, std::basic_string<Ch>& value, Allocator& allocator) { - GenericValue v(value, allocator); - return AddMember(name, v, allocator); - } -#endif - - //! Add any primitive value as member (name-value pair) to the object. - /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t - \param name A string value as name of member. - \param value Value of primitive type \c T as value of member - \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \pre IsObject() - - \note The source type \c T explicitly disallows all pointer types, - especially (\c const) \ref Ch*. This helps avoiding implicitly - referencing character strings with insufficient lifetime, use - \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref - AddMember(StringRefType, StringRefType, Allocator&). - All other pointer types would implicitly convert to \c bool, - use an explicit cast instead, if needed. - \note Amortized Constant time complexity. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&)) - AddMember(GenericValue& name, T value, Allocator& allocator) { - GenericValue v(value); - return AddMember(name, v, allocator); - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericValue& AddMember(GenericValue&& name, GenericValue&& value, Allocator& allocator) { - return AddMember(name, value, allocator); - } - GenericValue& AddMember(GenericValue&& name, GenericValue& value, Allocator& allocator) { - return AddMember(name, value, allocator); - } - GenericValue& AddMember(GenericValue& name, GenericValue&& value, Allocator& allocator) { - return AddMember(name, value, allocator); - } - GenericValue& AddMember(StringRefType name, GenericValue&& value, Allocator& allocator) { - GenericValue n(name); - return AddMember(n, value, allocator); - } -#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS - - - //! Add a member (name-value pair) to the object. - /*! \param name A constant string reference as name of member. - \param value Value of any type. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \note The ownership of \c value will be transferred to this object on success. - \pre IsObject() - \post value.IsNull() - \note Amortized Constant time complexity. - */ - GenericValue& AddMember(StringRefType name, GenericValue& value, Allocator& allocator) { - GenericValue n(name); - return AddMember(n, value, allocator); - } - - //! Add a constant string value as member (name-value pair) to the object. - /*! \param name A constant string reference as name of member. - \param value constant string reference as value of member. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \pre IsObject() - \note This overload is needed to avoid clashes with the generic primitive type AddMember(StringRefType,T,Allocator&) overload below. - \note Amortized Constant time complexity. - */ - GenericValue& AddMember(StringRefType name, StringRefType value, Allocator& allocator) { - GenericValue v(value); - return AddMember(name, v, allocator); - } - - //! Add any primitive value as member (name-value pair) to the object. - /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t - \param name A constant string reference as name of member. - \param value Value of primitive type \c T as value of member - \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \pre IsObject() - - \note The source type \c T explicitly disallows all pointer types, - especially (\c const) \ref Ch*. This helps avoiding implicitly - referencing character strings with insufficient lifetime, use - \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref - AddMember(StringRefType, StringRefType, Allocator&). - All other pointer types would implicitly convert to \c bool, - use an explicit cast instead, if needed. - \note Amortized Constant time complexity. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&)) - AddMember(StringRefType name, T value, Allocator& allocator) { - GenericValue n(name); - return AddMember(n, value, allocator); - } - - //! Remove all members in the object. - /*! This function do not deallocate memory in the object, i.e. the capacity is unchanged. - \note Linear time complexity. - */ - void RemoveAllMembers() { - RAPIDJSON_ASSERT(IsObject()); - for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) - m->~Member(); - data_.o.size = 0; - } - - //! Remove a member in object by its name. - /*! \param name Name of member to be removed. - \return Whether the member existed. - \note This function may reorder the object members. Use \ref - EraseMember(ConstMemberIterator) if you need to preserve the - relative order of the remaining members. - \note Linear time complexity. - */ - bool RemoveMember(const Ch* name) { - GenericValue n(StringRef(name)); - return RemoveMember(n); - } - -#if RAPIDJSON_HAS_STDSTRING - bool RemoveMember(const std::basic_string<Ch>& name) { return RemoveMember(GenericValue(StringRef(name))); } -#endif - - template <typename SourceAllocator> - bool RemoveMember(const GenericValue<Encoding, SourceAllocator>& name) { - MemberIterator m = FindMember(name); - if (m != MemberEnd()) { - RemoveMember(m); - return true; - } - else - return false; - } - - //! Remove a member in object by iterator. - /*! \param m member iterator (obtained by FindMember() or MemberBegin()). - \return the new iterator after removal. - \note This function may reorder the object members. Use \ref - EraseMember(ConstMemberIterator) if you need to preserve the - relative order of the remaining members. - \note Constant time complexity. - */ - MemberIterator RemoveMember(MemberIterator m) { - RAPIDJSON_ASSERT(IsObject()); - RAPIDJSON_ASSERT(data_.o.size > 0); - RAPIDJSON_ASSERT(GetMembersPointer() != 0); - RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd()); - - MemberIterator last(GetMembersPointer() + (data_.o.size - 1)); - if (data_.o.size > 1 && m != last) - *m = *last; // Move the last one to this place - else - m->~Member(); // Only one left, just destroy - --data_.o.size; - return m; - } - - //! Remove a member from an object by iterator. - /*! \param pos iterator to the member to remove - \pre IsObject() == true && \ref MemberBegin() <= \c pos < \ref MemberEnd() - \return Iterator following the removed element. - If the iterator \c pos refers to the last element, the \ref MemberEnd() iterator is returned. - \note This function preserves the relative order of the remaining object - members. If you do not need this, use the more efficient \ref RemoveMember(MemberIterator). - \note Linear time complexity. - */ - MemberIterator EraseMember(ConstMemberIterator pos) { - return EraseMember(pos, pos +1); - } - - //! Remove members in the range [first, last) from an object. - /*! \param first iterator to the first member to remove - \param last iterator following the last member to remove - \pre IsObject() == true && \ref MemberBegin() <= \c first <= \c last <= \ref MemberEnd() - \return Iterator following the last removed element. - \note This function preserves the relative order of the remaining object - members. - \note Linear time complexity. - */ - MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) { - RAPIDJSON_ASSERT(IsObject()); - RAPIDJSON_ASSERT(data_.o.size > 0); - RAPIDJSON_ASSERT(GetMembersPointer() != 0); - RAPIDJSON_ASSERT(first >= MemberBegin()); - RAPIDJSON_ASSERT(first <= last); - RAPIDJSON_ASSERT(last <= MemberEnd()); - - MemberIterator pos = MemberBegin() + (first - MemberBegin()); - for (MemberIterator itr = pos; itr != last; ++itr) - itr->~Member(); - std::memmove(&*pos, &*last, static_cast<size_t>(MemberEnd() - last) * sizeof(Member)); - data_.o.size -= static_cast<SizeType>(last - first); - return pos; - } - - //! Erase a member in object by its name. - /*! \param name Name of member to be removed. - \return Whether the member existed. - \note Linear time complexity. - */ - bool EraseMember(const Ch* name) { - GenericValue n(StringRef(name)); - return EraseMember(n); - } - -#if RAPIDJSON_HAS_STDSTRING - bool EraseMember(const std::basic_string<Ch>& name) { return EraseMember(GenericValue(StringRef(name))); } -#endif - - template <typename SourceAllocator> - bool EraseMember(const GenericValue<Encoding, SourceAllocator>& name) { - MemberIterator m = FindMember(name); - if (m != MemberEnd()) { - EraseMember(m); - return true; - } - else - return false; - } - - Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); } - ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); } - - //@} - - //!@name Array - //@{ - - //! Set this value as an empty array. - /*! \post IsArray == true */ - GenericValue& SetArray() { this->~GenericValue(); new (this) GenericValue(kArrayType); return *this; } - - //! Get the number of elements in array. - SizeType Size() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size; } - - //! Get the capacity of array. - SizeType Capacity() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.capacity; } - - //! Check whether the array is empty. - bool Empty() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size == 0; } - - //! Remove all elements in the array. - /*! This function do not deallocate memory in the array, i.e. the capacity is unchanged. - \note Linear time complexity. - */ - void Clear() { - RAPIDJSON_ASSERT(IsArray()); - GenericValue* e = GetElementsPointer(); - for (GenericValue* v = e; v != e + data_.a.size; ++v) - v->~GenericValue(); - data_.a.size = 0; - } - - //! Get an element from array by index. - /*! \pre IsArray() == true - \param index Zero-based index of element. - \see operator[](T*) - */ - GenericValue& operator[](SizeType index) { - RAPIDJSON_ASSERT(IsArray()); - RAPIDJSON_ASSERT(index < data_.a.size); - return GetElementsPointer()[index]; - } - const GenericValue& operator[](SizeType index) const { return const_cast<GenericValue&>(*this)[index]; } - - //! Element iterator - /*! \pre IsArray() == true */ - ValueIterator Begin() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer(); } - //! \em Past-the-end element iterator - /*! \pre IsArray() == true */ - ValueIterator End() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer() + data_.a.size; } - //! Constant element iterator - /*! \pre IsArray() == true */ - ConstValueIterator Begin() const { return const_cast<GenericValue&>(*this).Begin(); } - //! Constant \em past-the-end element iterator - /*! \pre IsArray() == true */ - ConstValueIterator End() const { return const_cast<GenericValue&>(*this).End(); } - - //! Request the array to have enough capacity to store elements. - /*! \param newCapacity The capacity that the array at least need to have. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \note Linear time complexity. - */ - GenericValue& Reserve(SizeType newCapacity, Allocator &allocator) { - RAPIDJSON_ASSERT(IsArray()); - if (newCapacity > data_.a.capacity) { - SetElementsPointer(reinterpret_cast<GenericValue*>(allocator.Realloc(GetElementsPointer(), data_.a.capacity * sizeof(GenericValue), newCapacity * sizeof(GenericValue)))); - data_.a.capacity = newCapacity; - } - return *this; - } - - //! Append a GenericValue at the end of the array. - /*! \param value Value to be appended. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \pre IsArray() == true - \post value.IsNull() == true - \return The value itself for fluent API. - \note The ownership of \c value will be transferred to this array on success. - \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. - \note Amortized constant time complexity. - */ - GenericValue& PushBack(GenericValue& value, Allocator& allocator) { - RAPIDJSON_ASSERT(IsArray()); - if (data_.a.size >= data_.a.capacity) - Reserve(data_.a.capacity == 0 ? kDefaultArrayCapacity : (data_.a.capacity + (data_.a.capacity + 1) / 2), allocator); - GetElementsPointer()[data_.a.size++].RawAssign(value); - return *this; - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericValue& PushBack(GenericValue&& value, Allocator& allocator) { - return PushBack(value, allocator); - } -#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS - - //! Append a constant string reference at the end of the array. - /*! \param value Constant string reference to be appended. - \param allocator Allocator for reallocating memory. It must be the same one used previously. Commonly use GenericDocument::GetAllocator(). - \pre IsArray() == true - \return The value itself for fluent API. - \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. - \note Amortized constant time complexity. - \see GenericStringRef - */ - GenericValue& PushBack(StringRefType value, Allocator& allocator) { - return (*this).template PushBack<StringRefType>(value, allocator); - } - - //! Append a primitive value at the end of the array. - /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t - \param value Value of primitive type T to be appended. - \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). - \pre IsArray() == true - \return The value itself for fluent API. - \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. - - \note The source type \c T explicitly disallows all pointer types, - especially (\c const) \ref Ch*. This helps avoiding implicitly - referencing character strings with insufficient lifetime, use - \ref PushBack(GenericValue&, Allocator&) or \ref - PushBack(StringRefType, Allocator&). - All other pointer types would implicitly convert to \c bool, - use an explicit cast instead, if needed. - \note Amortized constant time complexity. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&)) - PushBack(T value, Allocator& allocator) { - GenericValue v(value); - return PushBack(v, allocator); - } - - //! Remove the last element in the array. - /*! - \note Constant time complexity. - */ - GenericValue& PopBack() { - RAPIDJSON_ASSERT(IsArray()); - RAPIDJSON_ASSERT(!Empty()); - GetElementsPointer()[--data_.a.size].~GenericValue(); - return *this; - } - - //! Remove an element of array by iterator. - /*! - \param pos iterator to the element to remove - \pre IsArray() == true && \ref Begin() <= \c pos < \ref End() - \return Iterator following the removed element. If the iterator pos refers to the last element, the End() iterator is returned. - \note Linear time complexity. - */ - ValueIterator Erase(ConstValueIterator pos) { - return Erase(pos, pos + 1); - } - - //! Remove elements in the range [first, last) of the array. - /*! - \param first iterator to the first element to remove - \param last iterator following the last element to remove - \pre IsArray() == true && \ref Begin() <= \c first <= \c last <= \ref End() - \return Iterator following the last removed element. - \note Linear time complexity. - */ - ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) { - RAPIDJSON_ASSERT(IsArray()); - RAPIDJSON_ASSERT(data_.a.size > 0); - RAPIDJSON_ASSERT(GetElementsPointer() != 0); - RAPIDJSON_ASSERT(first >= Begin()); - RAPIDJSON_ASSERT(first <= last); - RAPIDJSON_ASSERT(last <= End()); - ValueIterator pos = Begin() + (first - Begin()); - for (ValueIterator itr = pos; itr != last; ++itr) - itr->~GenericValue(); - std::memmove(pos, last, static_cast<size_t>(End() - last) * sizeof(GenericValue)); - data_.a.size -= static_cast<SizeType>(last - first); - return pos; - } - - Array GetArray() { RAPIDJSON_ASSERT(IsArray()); return Array(*this); } - ConstArray GetArray() const { RAPIDJSON_ASSERT(IsArray()); return ConstArray(*this); } - - //@} - - //!@name Number - //@{ - - int GetInt() const { RAPIDJSON_ASSERT(data_.f.flags & kIntFlag); return data_.n.i.i; } - unsigned GetUint() const { RAPIDJSON_ASSERT(data_.f.flags & kUintFlag); return data_.n.u.u; } - int64_t GetInt64() const { RAPIDJSON_ASSERT(data_.f.flags & kInt64Flag); return data_.n.i64; } - uint64_t GetUint64() const { RAPIDJSON_ASSERT(data_.f.flags & kUint64Flag); return data_.n.u64; } - - //! Get the value as double type. - /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessDouble() to check whether the converison is lossless. - */ - double GetDouble() const { - RAPIDJSON_ASSERT(IsNumber()); - if ((data_.f.flags & kDoubleFlag) != 0) return data_.n.d; // exact type, no conversion. - if ((data_.f.flags & kIntFlag) != 0) return data_.n.i.i; // int -> double - if ((data_.f.flags & kUintFlag) != 0) return data_.n.u.u; // unsigned -> double - if ((data_.f.flags & kInt64Flag) != 0) return static_cast<double>(data_.n.i64); // int64_t -> double (may lose precision) - RAPIDJSON_ASSERT((data_.f.flags & kUint64Flag) != 0); return static_cast<double>(data_.n.u64); // uint64_t -> double (may lose precision) - } - - //! Get the value as float type. - /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessFloat() to check whether the converison is lossless. - */ - float GetFloat() const { - return static_cast<float>(GetDouble()); - } - - GenericValue& SetInt(int i) { this->~GenericValue(); new (this) GenericValue(i); return *this; } - GenericValue& SetUint(unsigned u) { this->~GenericValue(); new (this) GenericValue(u); return *this; } - GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; } - GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; } - GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; } - GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(f); return *this; } - - //@} - - //!@name String - //@{ - - const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); } - - //! Get the length of string. - /*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength(). - */ - SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); } - - //! Set this value as a string without copying source string. - /*! This version has better performance with supplied length, and also support string containing null character. - \param s source string pointer. - \param length The length of source string, excluding the trailing null terminator. - \return The value itself for fluent API. - \post IsString() == true && GetString() == s && GetStringLength() == length - \see SetString(StringRefType) - */ - GenericValue& SetString(const Ch* s, SizeType length) { return SetString(StringRef(s, length)); } - - //! Set this value as a string without copying source string. - /*! \param s source string reference - \return The value itself for fluent API. - \post IsString() == true && GetString() == s && GetStringLength() == s.length - */ - GenericValue& SetString(StringRefType s) { this->~GenericValue(); SetStringRaw(s); return *this; } - - //! Set this value as a string by copying from source string. - /*! This version has better performance with supplied length, and also support string containing null character. - \param s source string. - \param length The length of source string, excluding the trailing null terminator. - \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length - */ - GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { this->~GenericValue(); SetStringRaw(StringRef(s, length), allocator); return *this; } - - //! Set this value as a string by copying from source string. - /*! \param s source string. - \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length - */ - GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(s, internal::StrLen(s), allocator); } - -#if RAPIDJSON_HAS_STDSTRING - //! Set this value as a string by copying from source string. - /*! \param s source string. - \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). - \return The value itself for fluent API. - \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size() - \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. - */ - GenericValue& SetString(const std::basic_string<Ch>& s, Allocator& allocator) { return SetString(s.data(), SizeType(s.size()), allocator); } -#endif - - //@} - - //!@name Array - //@{ - - //! Templated version for checking whether this value is type T. - /*! - \tparam T Either \c bool, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c float, \c const \c char*, \c std::basic_string<Ch> - */ - template <typename T> - bool Is() const { return internal::TypeHelper<ValueType, T>::Is(*this); } - - template <typename T> - T Get() const { return internal::TypeHelper<ValueType, T>::Get(*this); } - - template <typename T> - T Get() { return internal::TypeHelper<ValueType, T>::Get(*this); } - - template<typename T> - ValueType& Set(const T& data) { return internal::TypeHelper<ValueType, T>::Set(*this, data); } - - template<typename T> - ValueType& Set(const T& data, AllocatorType& allocator) { return internal::TypeHelper<ValueType, T>::Set(*this, data, allocator); } - - //@} - - //! Generate events of this value to a Handler. - /*! This function adopts the GoF visitor pattern. - Typical usage is to output this JSON value as JSON text via Writer, which is a Handler. - It can also be used to deep clone this value via GenericDocument, which is also a Handler. - \tparam Handler type of handler. - \param handler An object implementing concept Handler. - */ - template <typename Handler> - bool Accept(Handler& handler) const { - switch(GetType()) { - case kNullType: return handler.Null(); - case kFalseType: return handler.Bool(false); - case kTrueType: return handler.Bool(true); - - case kObjectType: - if (RAPIDJSON_UNLIKELY(!handler.StartObject())) - return false; - for (ConstMemberIterator m = MemberBegin(); m != MemberEnd(); ++m) { - RAPIDJSON_ASSERT(m->name.IsString()); // User may change the type of name by MemberIterator. - if (RAPIDJSON_UNLIKELY(!handler.Key(m->name.GetString(), m->name.GetStringLength(), (m->name.data_.f.flags & kCopyFlag) != 0))) - return false; - if (RAPIDJSON_UNLIKELY(!m->value.Accept(handler))) - return false; - } - return handler.EndObject(data_.o.size); - - case kArrayType: - if (RAPIDJSON_UNLIKELY(!handler.StartArray())) - return false; - for (const GenericValue* v = Begin(); v != End(); ++v) - if (RAPIDJSON_UNLIKELY(!v->Accept(handler))) - return false; - return handler.EndArray(data_.a.size); - - case kStringType: - return handler.String(GetString(), GetStringLength(), (data_.f.flags & kCopyFlag) != 0); - - default: - RAPIDJSON_ASSERT(GetType() == kNumberType); - if (IsDouble()) return handler.Double(data_.n.d); - else if (IsInt()) return handler.Int(data_.n.i.i); - else if (IsUint()) return handler.Uint(data_.n.u.u); - else if (IsInt64()) return handler.Int64(data_.n.i64); - else return handler.Uint64(data_.n.u64); - } - } - -private: - template <typename, typename> friend class GenericValue; - template <typename, typename, typename> friend class GenericDocument; - - enum { - kBoolFlag = 0x0008, - kNumberFlag = 0x0010, - kIntFlag = 0x0020, - kUintFlag = 0x0040, - kInt64Flag = 0x0080, - kUint64Flag = 0x0100, - kDoubleFlag = 0x0200, - kStringFlag = 0x0400, - kCopyFlag = 0x0800, - kInlineStrFlag = 0x1000, - - // Initial flags of different types. - kNullFlag = kNullType, - kTrueFlag = kTrueType | kBoolFlag, - kFalseFlag = kFalseType | kBoolFlag, - kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag, - kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag, - kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag, - kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag, - kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag, - kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag, - kConstStringFlag = kStringType | kStringFlag, - kCopyStringFlag = kStringType | kStringFlag | kCopyFlag, - kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag, - kObjectFlag = kObjectType, - kArrayFlag = kArrayType, - - kTypeMask = 0x07 - }; - - static const SizeType kDefaultArrayCapacity = 16; - static const SizeType kDefaultObjectCapacity = 16; - - struct Flag { -#if RAPIDJSON_48BITPOINTER_OPTIMIZATION - char payload[sizeof(SizeType) * 2 + 6]; // 2 x SizeType + lower 48-bit pointer -#elif RAPIDJSON_64BIT - char payload[sizeof(SizeType) * 2 + sizeof(void*) + 6]; // 6 padding bytes -#else - char payload[sizeof(SizeType) * 2 + sizeof(void*) + 2]; // 2 padding bytes -#endif - uint16_t flags; - }; - - struct String { - SizeType length; - SizeType hashcode; //!< reserved - const Ch* str; - }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode - - // implementation detail: ShortString can represent zero-terminated strings up to MaxSize chars - // (excluding the terminating zero) and store a value to determine the length of the contained - // string in the last character str[LenPos] by storing "MaxSize - length" there. If the string - // to store has the maximal length of MaxSize then str[LenPos] will be 0 and therefore act as - // the string terminator as well. For getting the string length back from that value just use - // "MaxSize - str[LenPos]". - // This allows to store 13-chars strings in 32-bit mode, 21-chars strings in 64-bit mode, - // 13-chars strings for RAPIDJSON_48BITPOINTER_OPTIMIZATION=1 inline (for `UTF8`-encoded strings). - struct ShortString { - enum { MaxChars = sizeof(static_cast<Flag*>(0)->payload) / sizeof(Ch), MaxSize = MaxChars - 1, LenPos = MaxSize }; - Ch str[MaxChars]; - - inline static bool Usable(SizeType len) { return (MaxSize >= len); } - inline void SetLength(SizeType len) { str[LenPos] = static_cast<Ch>(MaxSize - len); } - inline SizeType GetLength() const { return static_cast<SizeType>(MaxSize - str[LenPos]); } - }; // at most as many bytes as "String" above => 12 bytes in 32-bit mode, 16 bytes in 64-bit mode - - // By using proper binary layout, retrieval of different integer types do not need conversions. - union Number { -#if RAPIDJSON_ENDIAN == RAPIDJSON_LITTLEENDIAN - struct I { - int i; - char padding[4]; - }i; - struct U { - unsigned u; - char padding2[4]; - }u; -#else - struct I { - char padding[4]; - int i; - }i; - struct U { - char padding2[4]; - unsigned u; - }u; -#endif - int64_t i64; - uint64_t u64; - double d; - }; // 8 bytes - - struct ObjectData { - SizeType size; - SizeType capacity; - Member* members; - }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode - - struct ArrayData { - SizeType size; - SizeType capacity; - GenericValue* elements; - }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode - - union Data { - String s; - ShortString ss; - Number n; - ObjectData o; - ArrayData a; - Flag f; - }; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION - - RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); } - RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); } - RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); } - RAPIDJSON_FORCEINLINE GenericValue* SetElementsPointer(GenericValue* elements) { return RAPIDJSON_SETPOINTER(GenericValue, data_.a.elements, elements); } - RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); } - RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); } - - // Initialize this value as array with initial data, without calling destructor. - void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) { - data_.f.flags = kArrayFlag; - if (count) { - GenericValue* e = static_cast<GenericValue*>(allocator.Malloc(count * sizeof(GenericValue))); - SetElementsPointer(e); - std::memcpy(e, values, count * sizeof(GenericValue)); - } - else - SetElementsPointer(0); - data_.a.size = data_.a.capacity = count; - } - - //! Initialize this value as object with initial data, without calling destructor. - void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) { - data_.f.flags = kObjectFlag; - if (count) { - Member* m = static_cast<Member*>(allocator.Malloc(count * sizeof(Member))); - SetMembersPointer(m); - std::memcpy(m, members, count * sizeof(Member)); - } - else - SetMembersPointer(0); - data_.o.size = data_.o.capacity = count; - } - - //! Initialize this value as constant string, without calling destructor. - void SetStringRaw(StringRefType s) RAPIDJSON_NOEXCEPT { - data_.f.flags = kConstStringFlag; - SetStringPointer(s); - data_.s.length = s.length; - } - - //! Initialize this value as copy string with initial data, without calling destructor. - void SetStringRaw(StringRefType s, Allocator& allocator) { - Ch* str = 0; - if (ShortString::Usable(s.length)) { - data_.f.flags = kShortStringFlag; - data_.ss.SetLength(s.length); - str = data_.ss.str; - } else { - data_.f.flags = kCopyStringFlag; - data_.s.length = s.length; - str = static_cast<Ch *>(allocator.Malloc((s.length + 1) * sizeof(Ch))); - SetStringPointer(str); - } - std::memcpy(str, s, s.length * sizeof(Ch)); - str[s.length] = '\0'; - } - - //! Assignment without calling destructor - void RawAssign(GenericValue& rhs) RAPIDJSON_NOEXCEPT { - data_ = rhs.data_; - // data_.f.flags = rhs.data_.f.flags; - rhs.data_.f.flags = kNullFlag; - } - - template <typename SourceAllocator> - bool StringEqual(const GenericValue<Encoding, SourceAllocator>& rhs) const { - RAPIDJSON_ASSERT(IsString()); - RAPIDJSON_ASSERT(rhs.IsString()); - - const SizeType len1 = GetStringLength(); - const SizeType len2 = rhs.GetStringLength(); - if(len1 != len2) { return false; } - - const Ch* const str1 = GetString(); - const Ch* const str2 = rhs.GetString(); - if(str1 == str2) { return true; } // fast path for constant string - - return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0); - } - - Data data_; -}; - -//! GenericValue with UTF8 encoding -typedef GenericValue<UTF8<> > Value; - -/////////////////////////////////////////////////////////////////////////////// -// GenericDocument - -//! A document for parsing JSON text as DOM. -/*! - \note implements Handler concept - \tparam Encoding Encoding for both parsing and string storage. - \tparam Allocator Allocator for allocating memory for the DOM - \tparam StackAllocator Allocator for allocating memory for stack during parsing. - \warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue. -*/ -template <typename Encoding, typename Allocator = MemoryPoolAllocator<>, typename StackAllocator = CrtAllocator> -class GenericDocument : public GenericValue<Encoding, Allocator> { -public: - typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. - typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of the document. - typedef Allocator AllocatorType; //!< Allocator type from template parameter. - - //! Constructor - /*! Creates an empty document of specified type. - \param type Mandatory type of object to create. - \param allocator Optional allocator for allocating memory. - \param stackCapacity Optional initial capacity of stack in bytes. - \param stackAllocator Optional allocator for allocating memory for stack. - */ - explicit GenericDocument(Type type, Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : - GenericValue<Encoding, Allocator>(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() - { - if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - } - - //! Constructor - /*! Creates an empty document which type is Null. - \param allocator Optional allocator for allocating memory. - \param stackCapacity Optional initial capacity of stack in bytes. - \param stackAllocator Optional allocator for allocating memory for stack. - */ - GenericDocument(Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : - allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() - { - if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Move constructor in C++11 - GenericDocument(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT - : ValueType(std::forward<ValueType>(rhs)), // explicit cast to avoid prohibited move from Document - allocator_(rhs.allocator_), - ownAllocator_(rhs.ownAllocator_), - stack_(std::move(rhs.stack_)), - parseResult_(rhs.parseResult_) - { - rhs.allocator_ = 0; - rhs.ownAllocator_ = 0; - rhs.parseResult_ = ParseResult(); - } -#endif - - ~GenericDocument() { - Destroy(); - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Move assignment in C++11 - GenericDocument& operator=(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT - { - // The cast to ValueType is necessary here, because otherwise it would - // attempt to call GenericValue's templated assignment operator. - ValueType::operator=(std::forward<ValueType>(rhs)); - - // Calling the destructor here would prematurely call stack_'s destructor - Destroy(); - - allocator_ = rhs.allocator_; - ownAllocator_ = rhs.ownAllocator_; - stack_ = std::move(rhs.stack_); - parseResult_ = rhs.parseResult_; - - rhs.allocator_ = 0; - rhs.ownAllocator_ = 0; - rhs.parseResult_ = ParseResult(); - - return *this; - } -#endif - - //! Exchange the contents of this document with those of another. - /*! - \param rhs Another document. - \note Constant complexity. - \see GenericValue::Swap - */ - GenericDocument& Swap(GenericDocument& rhs) RAPIDJSON_NOEXCEPT { - ValueType::Swap(rhs); - stack_.Swap(rhs.stack_); - internal::Swap(allocator_, rhs.allocator_); - internal::Swap(ownAllocator_, rhs.ownAllocator_); - internal::Swap(parseResult_, rhs.parseResult_); - return *this; - } - - //! free-standing swap function helper - /*! - Helper function to enable support for common swap implementation pattern based on \c std::swap: - \code - void swap(MyClass& a, MyClass& b) { - using std::swap; - swap(a.doc, b.doc); - // ... - } - \endcode - \see Swap() - */ - friend inline void swap(GenericDocument& a, GenericDocument& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } - - //! Populate this document by a generator which produces SAX events. - /*! \tparam Generator A functor with <tt>bool f(Handler)</tt> prototype. - \param g Generator functor which sends SAX events to the parameter. - \return The document itself for fluent API. - */ - template <typename Generator> - GenericDocument& Populate(Generator& g) { - ClearStackOnExit scope(*this); - if (g(*this)) { - RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object - ValueType::operator=(*stack_.template Pop<ValueType>(1));// Move value from stack to document - } - return *this; - } - - //!@name Parse from stream - //!@{ - - //! Parse JSON text from an input stream (with Encoding conversion) - /*! \tparam parseFlags Combination of \ref ParseFlag. - \tparam SourceEncoding Encoding of input stream - \tparam InputStream Type of input stream, implementing Stream concept - \param is Input stream to be parsed. - \return The document itself for fluent API. - */ - template <unsigned parseFlags, typename SourceEncoding, typename InputStream> - GenericDocument& ParseStream(InputStream& is) { - GenericReader<SourceEncoding, Encoding, StackAllocator> reader( - stack_.HasAllocator() ? &stack_.GetAllocator() : 0); - ClearStackOnExit scope(*this); - parseResult_ = reader.template Parse<parseFlags>(is, *this); - if (parseResult_) { - RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object - ValueType::operator=(*stack_.template Pop<ValueType>(1));// Move value from stack to document - } - return *this; - } - - //! Parse JSON text from an input stream - /*! \tparam parseFlags Combination of \ref ParseFlag. - \tparam InputStream Type of input stream, implementing Stream concept - \param is Input stream to be parsed. - \return The document itself for fluent API. - */ - template <unsigned parseFlags, typename InputStream> - GenericDocument& ParseStream(InputStream& is) { - return ParseStream<parseFlags, Encoding, InputStream>(is); - } - - //! Parse JSON text from an input stream (with \ref kParseDefaultFlags) - /*! \tparam InputStream Type of input stream, implementing Stream concept - \param is Input stream to be parsed. - \return The document itself for fluent API. - */ - template <typename InputStream> - GenericDocument& ParseStream(InputStream& is) { - return ParseStream<kParseDefaultFlags, Encoding, InputStream>(is); - } - //!@} - - //!@name Parse in-place from mutable string - //!@{ - - //! Parse JSON text from a mutable string - /*! \tparam parseFlags Combination of \ref ParseFlag. - \param str Mutable zero-terminated string to be parsed. - \return The document itself for fluent API. - */ - template <unsigned parseFlags> - GenericDocument& ParseInsitu(Ch* str) { - GenericInsituStringStream<Encoding> s(str); - return ParseStream<parseFlags | kParseInsituFlag>(s); - } - - //! Parse JSON text from a mutable string (with \ref kParseDefaultFlags) - /*! \param str Mutable zero-terminated string to be parsed. - \return The document itself for fluent API. - */ - GenericDocument& ParseInsitu(Ch* str) { - return ParseInsitu<kParseDefaultFlags>(str); - } - //!@} - - //!@name Parse from read-only string - //!@{ - - //! Parse JSON text from a read-only string (with Encoding conversion) - /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). - \tparam SourceEncoding Transcoding from input Encoding - \param str Read-only zero-terminated string to be parsed. - */ - template <unsigned parseFlags, typename SourceEncoding> - GenericDocument& Parse(const typename SourceEncoding::Ch* str) { - RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); - GenericStringStream<SourceEncoding> s(str); - return ParseStream<parseFlags, SourceEncoding>(s); - } - - //! Parse JSON text from a read-only string - /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). - \param str Read-only zero-terminated string to be parsed. - */ - template <unsigned parseFlags> - GenericDocument& Parse(const Ch* str) { - return Parse<parseFlags, Encoding>(str); - } - - //! Parse JSON text from a read-only string (with \ref kParseDefaultFlags) - /*! \param str Read-only zero-terminated string to be parsed. - */ - GenericDocument& Parse(const Ch* str) { - return Parse<kParseDefaultFlags>(str); - } - - template <unsigned parseFlags, typename SourceEncoding> - GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) { - RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); - MemoryStream ms(static_cast<const char*>(str), length * sizeof(typename SourceEncoding::Ch)); - EncodedInputStream<SourceEncoding, MemoryStream> is(ms); - ParseStream<parseFlags, SourceEncoding>(is); - return *this; - } - - template <unsigned parseFlags> - GenericDocument& Parse(const Ch* str, size_t length) { - return Parse<parseFlags, Encoding>(str, length); - } - - GenericDocument& Parse(const Ch* str, size_t length) { - return Parse<kParseDefaultFlags>(str, length); - } - -#if RAPIDJSON_HAS_STDSTRING - template <unsigned parseFlags, typename SourceEncoding> - GenericDocument& Parse(const std::basic_string<typename SourceEncoding::Ch>& str) { - // c_str() is constant complexity according to standard. Should be faster than Parse(const char*, size_t) - return Parse<parseFlags, SourceEncoding>(str.c_str()); - } - - template <unsigned parseFlags> - GenericDocument& Parse(const std::basic_string<Ch>& str) { - return Parse<parseFlags, Encoding>(str.c_str()); - } - - GenericDocument& Parse(const std::basic_string<Ch>& str) { - return Parse<kParseDefaultFlags>(str); - } -#endif // RAPIDJSON_HAS_STDSTRING - - //!@} - - //!@name Handling parse errors - //!@{ - - //! Whether a parse error has occured in the last parsing. - bool HasParseError() const { return parseResult_.IsError(); } - - //! Get the \ref ParseErrorCode of last parsing. - ParseErrorCode GetParseError() const { return parseResult_.Code(); } - - //! Get the position of last parsing error in input, 0 otherwise. - size_t GetErrorOffset() const { return parseResult_.Offset(); } - - //! Implicit conversion to get the last parse result -#ifndef __clang // -Wdocumentation - /*! \return \ref ParseResult of the last parse operation - - \code - Document doc; - ParseResult ok = doc.Parse(json); - if (!ok) - printf( "JSON parse error: %s (%u)\n", GetParseError_En(ok.Code()), ok.Offset()); - \endcode - */ -#endif - operator ParseResult() const { return parseResult_; } - //!@} - - //! Get the allocator of this document. - Allocator& GetAllocator() { - RAPIDJSON_ASSERT(allocator_); - return *allocator_; - } - - //! Get the capacity of stack in bytes. - size_t GetStackCapacity() const { return stack_.GetCapacity(); } - -private: - // clear stack on any exit from ParseStream, e.g. due to exception - struct ClearStackOnExit { - explicit ClearStackOnExit(GenericDocument& d) : d_(d) {} - ~ClearStackOnExit() { d_.ClearStack(); } - private: - ClearStackOnExit(const ClearStackOnExit&); - ClearStackOnExit& operator=(const ClearStackOnExit&); - GenericDocument& d_; - }; - - // callers of the following private Handler functions - // template <typename,typename,typename> friend class GenericReader; // for parsing - template <typename, typename> friend class GenericValue; // for deep copying - -public: - // Implementation of Handler - bool Null() { new (stack_.template Push<ValueType>()) ValueType(); return true; } - bool Bool(bool b) { new (stack_.template Push<ValueType>()) ValueType(b); return true; } - bool Int(int i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; } - bool Uint(unsigned i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; } - bool Int64(int64_t i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; } - bool Uint64(uint64_t i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; } - bool Double(double d) { new (stack_.template Push<ValueType>()) ValueType(d); return true; } - - bool RawNumber(const Ch* str, SizeType length, bool copy) { - if (copy) - new (stack_.template Push<ValueType>()) ValueType(str, length, GetAllocator()); - else - new (stack_.template Push<ValueType>()) ValueType(str, length); - return true; - } - - bool String(const Ch* str, SizeType length, bool copy) { - if (copy) - new (stack_.template Push<ValueType>()) ValueType(str, length, GetAllocator()); - else - new (stack_.template Push<ValueType>()) ValueType(str, length); - return true; - } - - bool StartObject() { new (stack_.template Push<ValueType>()) ValueType(kObjectType); return true; } - - bool Key(const Ch* str, SizeType length, bool copy) { return String(str, length, copy); } - - bool EndObject(SizeType memberCount) { - typename ValueType::Member* members = stack_.template Pop<typename ValueType::Member>(memberCount); - stack_.template Top<ValueType>()->SetObjectRaw(members, memberCount, GetAllocator()); - return true; - } - - bool StartArray() { new (stack_.template Push<ValueType>()) ValueType(kArrayType); return true; } - - bool EndArray(SizeType elementCount) { - ValueType* elements = stack_.template Pop<ValueType>(elementCount); - stack_.template Top<ValueType>()->SetArrayRaw(elements, elementCount, GetAllocator()); - return true; - } - -private: - //! Prohibit copying - GenericDocument(const GenericDocument&); - //! Prohibit assignment - GenericDocument& operator=(const GenericDocument&); - - void ClearStack() { - if (Allocator::kNeedFree) - while (stack_.GetSize() > 0) // Here assumes all elements in stack array are GenericValue (Member is actually 2 GenericValue objects) - (stack_.template Pop<ValueType>(1))->~ValueType(); - else - stack_.Clear(); - stack_.ShrinkToFit(); - } - - void Destroy() { - RAPIDJSON_DELETE(ownAllocator_); - } - - static const size_t kDefaultStackCapacity = 1024; - Allocator* allocator_; - Allocator* ownAllocator_; - internal::Stack<StackAllocator> stack_; - ParseResult parseResult_; -}; - -//! GenericDocument with UTF8 encoding -typedef GenericDocument<UTF8<> > Document; - -// defined here due to the dependency on GenericDocument -template <typename Encoding, typename Allocator> -template <typename SourceAllocator> -inline -GenericValue<Encoding,Allocator>::GenericValue(const GenericValue<Encoding,SourceAllocator>& rhs, Allocator& allocator) -{ - switch (rhs.GetType()) { - case kObjectType: - case kArrayType: { // perform deep copy via SAX Handler - GenericDocument<Encoding,Allocator> d(&allocator); - rhs.Accept(d); - RawAssign(*d.stack_.template Pop<GenericValue>(1)); - } - break; - case kStringType: - if (rhs.data_.f.flags == kConstStringFlag) { - data_.f.flags = rhs.data_.f.flags; - data_ = *reinterpret_cast<const Data*>(&rhs.data_); - } else { - SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); - } - break; - default: - data_.f.flags = rhs.data_.f.flags; - data_ = *reinterpret_cast<const Data*>(&rhs.data_); - break; - } -} - -//! Helper class for accessing Value of array type. -/*! - Instance of this helper class is obtained by \c GenericValue::GetArray(). - In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. -*/ -template <bool Const, typename ValueT> -class GenericArray { -public: - typedef GenericArray<true, ValueT> ConstArray; - typedef GenericArray<false, ValueT> Array; - typedef ValueT PlainType; - typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType; - typedef ValueType* ValueIterator; // This may be const or non-const iterator - typedef const ValueT* ConstValueIterator; - typedef typename ValueType::AllocatorType AllocatorType; - typedef typename ValueType::StringRefType StringRefType; - - template <typename, typename> - friend class GenericValue; - - GenericArray(const GenericArray& rhs) : value_(rhs.value_) {} - GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; } - ~GenericArray() {} - - SizeType Size() const { return value_.Size(); } - SizeType Capacity() const { return value_.Capacity(); } - bool Empty() const { return value_.Empty(); } - void Clear() const { value_.Clear(); } - ValueType& operator[](SizeType index) const { return value_[index]; } - ValueIterator Begin() const { return value_.Begin(); } - ValueIterator End() const { return value_.End(); } - GenericArray Reserve(SizeType newCapacity, AllocatorType &allocator) const { value_.Reserve(newCapacity, allocator); return *this; } - GenericArray PushBack(ValueType& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericArray PushBack(ValueType&& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } -#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericArray PushBack(StringRefType value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } - template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (const GenericArray&)) PushBack(T value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } - GenericArray PopBack() const { value_.PopBack(); return *this; } - ValueIterator Erase(ConstValueIterator pos) const { return value_.Erase(pos); } - ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) const { return value_.Erase(first, last); } - -#if RAPIDJSON_HAS_CXX11_RANGE_FOR - ValueIterator begin() const { return value_.Begin(); } - ValueIterator end() const { return value_.End(); } -#endif - -private: - GenericArray(); - GenericArray(ValueType& value) : value_(value) {} - ValueType& value_; -}; - -//! Helper class for accessing Value of object type. -/*! - Instance of this helper class is obtained by \c GenericValue::GetObject(). - In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. -*/ -template <bool Const, typename ValueT> -class GenericObject { -public: - typedef GenericObject<true, ValueT> ConstObject; - typedef GenericObject<false, ValueT> Object; - typedef ValueT PlainType; - typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType; - typedef GenericMemberIterator<Const, typename ValueT::EncodingType, typename ValueT::AllocatorType> MemberIterator; // This may be const or non-const iterator - typedef GenericMemberIterator<true, typename ValueT::EncodingType, typename ValueT::AllocatorType> ConstMemberIterator; - typedef typename ValueType::AllocatorType AllocatorType; - typedef typename ValueType::StringRefType StringRefType; - typedef typename ValueType::EncodingType EncodingType; - typedef typename ValueType::Ch Ch; - - template <typename, typename> - friend class GenericValue; - - GenericObject(const GenericObject& rhs) : value_(rhs.value_) {} - GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; } - ~GenericObject() {} - - SizeType MemberCount() const { return value_.MemberCount(); } - bool ObjectEmpty() const { return value_.ObjectEmpty(); } - template <typename T> ValueType& operator[](T* name) const { return value_[name]; } - template <typename SourceAllocator> ValueType& operator[](const GenericValue<EncodingType, SourceAllocator>& name) const { return value_[name]; } -#if RAPIDJSON_HAS_STDSTRING - ValueType& operator[](const std::basic_string<Ch>& name) const { return value_[name]; } -#endif - MemberIterator MemberBegin() const { return value_.MemberBegin(); } - MemberIterator MemberEnd() const { return value_.MemberEnd(); } - bool HasMember(const Ch* name) const { return value_.HasMember(name); } -#if RAPIDJSON_HAS_STDSTRING - bool HasMember(const std::basic_string<Ch>& name) const { return value_.HasMember(name); } -#endif - template <typename SourceAllocator> bool HasMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.HasMember(name); } - MemberIterator FindMember(const Ch* name) const { return value_.FindMember(name); } - template <typename SourceAllocator> MemberIterator FindMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.FindMember(name); } -#if RAPIDJSON_HAS_STDSTRING - MemberIterator FindMember(const std::basic_string<Ch>& name) const { return value_.FindMember(name); } -#endif - GenericObject AddMember(ValueType& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - GenericObject AddMember(ValueType& name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } -#if RAPIDJSON_HAS_STDSTRING - GenericObject AddMember(ValueType& name, std::basic_string<Ch>& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } -#endif - template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) AddMember(ValueType& name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericObject AddMember(ValueType&& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - GenericObject AddMember(ValueType&& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - GenericObject AddMember(ValueType& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - GenericObject AddMember(StringRefType name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } -#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - void RemoveAllMembers() { return value_.RemoveAllMembers(); } - bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); } -#if RAPIDJSON_HAS_STDSTRING - bool RemoveMember(const std::basic_string<Ch>& name) const { return value_.RemoveMember(name); } -#endif - template <typename SourceAllocator> bool RemoveMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.RemoveMember(name); } - MemberIterator RemoveMember(MemberIterator m) const { return value_.RemoveMember(m); } - MemberIterator EraseMember(ConstMemberIterator pos) const { return value_.EraseMember(pos); } - MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) const { return value_.EraseMember(first, last); } - bool EraseMember(const Ch* name) const { return value_.EraseMember(name); } -#if RAPIDJSON_HAS_STDSTRING - bool EraseMember(const std::basic_string<Ch>& name) const { return EraseMember(ValueType(StringRef(name))); } -#endif - template <typename SourceAllocator> bool EraseMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.EraseMember(name); } - -#if RAPIDJSON_HAS_CXX11_RANGE_FOR - MemberIterator begin() const { return value_.MemberBegin(); } - MemberIterator end() const { return value_.MemberEnd(); } -#endif - -private: - GenericObject(); - GenericObject(ValueType& value) : value_(value) {} - ValueType& value_; -}; - -RAPIDJSON_NAMESPACE_END -RAPIDJSON_DIAG_POP - -#endif // RAPIDJSON_DOCUMENT_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/encodedstream.h b/ext/librethinkdbxx/src/rapidjson/encodedstream.h deleted file mode 100644 index 14506838..00000000 --- a/ext/librethinkdbxx/src/rapidjson/encodedstream.h +++ /dev/null @@ -1,299 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ENCODEDSTREAM_H_ -#define RAPIDJSON_ENCODEDSTREAM_H_ - -#include "stream.h" -#include "memorystream.h" - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Input byte stream wrapper with a statically bound encoding. -/*! - \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. - \tparam InputByteStream Type of input byte stream. For example, FileReadStream. -*/ -template <typename Encoding, typename InputByteStream> -class EncodedInputStream { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); -public: - typedef typename Encoding::Ch Ch; - - EncodedInputStream(InputByteStream& is) : is_(is) { - current_ = Encoding::TakeBOM(is_); - } - - Ch Peek() const { return current_; } - Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; } - size_t Tell() const { return is_.Tell(); } - - // Not implemented - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - EncodedInputStream(const EncodedInputStream&); - EncodedInputStream& operator=(const EncodedInputStream&); - - InputByteStream& is_; - Ch current_; -}; - -//! Specialized for UTF8 MemoryStream. -template <> -class EncodedInputStream<UTF8<>, MemoryStream> { -public: - typedef UTF8<>::Ch Ch; - - EncodedInputStream(MemoryStream& is) : is_(is) { - if (static_cast<unsigned char>(is_.Peek()) == 0xEFu) is_.Take(); - if (static_cast<unsigned char>(is_.Peek()) == 0xBBu) is_.Take(); - if (static_cast<unsigned char>(is_.Peek()) == 0xBFu) is_.Take(); - } - Ch Peek() const { return is_.Peek(); } - Ch Take() { return is_.Take(); } - size_t Tell() const { return is_.Tell(); } - - // Not implemented - void Put(Ch) {} - void Flush() {} - Ch* PutBegin() { return 0; } - size_t PutEnd(Ch*) { return 0; } - - MemoryStream& is_; - -private: - EncodedInputStream(const EncodedInputStream&); - EncodedInputStream& operator=(const EncodedInputStream&); -}; - -//! Output byte stream wrapper with statically bound encoding. -/*! - \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. - \tparam OutputByteStream Type of input byte stream. For example, FileWriteStream. -*/ -template <typename Encoding, typename OutputByteStream> -class EncodedOutputStream { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); -public: - typedef typename Encoding::Ch Ch; - - EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) { - if (putBOM) - Encoding::PutBOM(os_); - } - - void Put(Ch c) { Encoding::Put(os_, c); } - void Flush() { os_.Flush(); } - - // Not implemented - Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} - Ch Take() { RAPIDJSON_ASSERT(false); return 0;} - size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - EncodedOutputStream(const EncodedOutputStream&); - EncodedOutputStream& operator=(const EncodedOutputStream&); - - OutputByteStream& os_; -}; - -#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x - -//! Input stream wrapper with dynamically bound encoding and automatic encoding detection. -/*! - \tparam CharType Type of character for reading. - \tparam InputByteStream type of input byte stream to be wrapped. -*/ -template <typename CharType, typename InputByteStream> -class AutoUTFInputStream { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); -public: - typedef CharType Ch; - - //! Constructor. - /*! - \param is input stream to be wrapped. - \param type UTF encoding type if it is not detected from the stream. - */ - AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) { - RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); - DetectType(); - static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) }; - takeFunc_ = f[type_]; - current_ = takeFunc_(*is_); - } - - UTFType GetType() const { return type_; } - bool HasBOM() const { return hasBOM_; } - - Ch Peek() const { return current_; } - Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; } - size_t Tell() const { return is_->Tell(); } - - // Not implemented - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - AutoUTFInputStream(const AutoUTFInputStream&); - AutoUTFInputStream& operator=(const AutoUTFInputStream&); - - // Detect encoding type with BOM or RFC 4627 - void DetectType() { - // BOM (Byte Order Mark): - // 00 00 FE FF UTF-32BE - // FF FE 00 00 UTF-32LE - // FE FF UTF-16BE - // FF FE UTF-16LE - // EF BB BF UTF-8 - - const unsigned char* c = reinterpret_cast<const unsigned char *>(is_->Peek4()); - if (!c) - return; - - unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24)); - hasBOM_ = false; - if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } - else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } - else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); } - else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); } - else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); } - - // RFC 4627: Section 3 - // "Since the first two characters of a JSON text will always be ASCII - // characters [RFC0020], it is possible to determine whether an octet - // stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking - // at the pattern of nulls in the first four octets." - // 00 00 00 xx UTF-32BE - // 00 xx 00 xx UTF-16BE - // xx 00 00 00 UTF-32LE - // xx 00 xx 00 UTF-16LE - // xx xx xx xx UTF-8 - - if (!hasBOM_) { - unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); - switch (pattern) { - case 0x08: type_ = kUTF32BE; break; - case 0x0A: type_ = kUTF16BE; break; - case 0x01: type_ = kUTF32LE; break; - case 0x05: type_ = kUTF16LE; break; - case 0x0F: type_ = kUTF8; break; - default: break; // Use type defined by user. - } - } - - // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. - if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); - if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); - } - - typedef Ch (*TakeFunc)(InputByteStream& is); - InputByteStream* is_; - UTFType type_; - Ch current_; - TakeFunc takeFunc_; - bool hasBOM_; -}; - -//! Output stream wrapper with dynamically bound encoding and automatic encoding detection. -/*! - \tparam CharType Type of character for writing. - \tparam OutputByteStream type of output byte stream to be wrapped. -*/ -template <typename CharType, typename OutputByteStream> -class AutoUTFOutputStream { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); -public: - typedef CharType Ch; - - //! Constructor. - /*! - \param os output stream to be wrapped. - \param type UTF encoding type. - \param putBOM Whether to write BOM at the beginning of the stream. - */ - AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) { - RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); - - // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. - if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); - if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); - - static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) }; - putFunc_ = f[type_]; - - if (putBOM) - PutBOM(); - } - - UTFType GetType() const { return type_; } - - void Put(Ch c) { putFunc_(*os_, c); } - void Flush() { os_->Flush(); } - - // Not implemented - Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} - Ch Take() { RAPIDJSON_ASSERT(false); return 0;} - size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - AutoUTFOutputStream(const AutoUTFOutputStream&); - AutoUTFOutputStream& operator=(const AutoUTFOutputStream&); - - void PutBOM() { - typedef void (*PutBOMFunc)(OutputByteStream&); - static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) }; - f[type_](*os_); - } - - typedef void (*PutFunc)(OutputByteStream&, Ch); - - OutputByteStream* os_; - UTFType type_; - PutFunc putFunc_; -}; - -#undef RAPIDJSON_ENCODINGS_FUNC - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef __GNUC__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/encodings.h b/ext/librethinkdbxx/src/rapidjson/encodings.h deleted file mode 100644 index baa7c2b1..00000000 --- a/ext/librethinkdbxx/src/rapidjson/encodings.h +++ /dev/null @@ -1,716 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ENCODINGS_H_ -#define RAPIDJSON_ENCODINGS_H_ - -#include "rapidjson.h" - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data -RAPIDJSON_DIAG_OFF(4702) // unreachable code -#elif defined(__GNUC__) -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -RAPIDJSON_DIAG_OFF(overflow) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// Encoding - -/*! \class rapidjson::Encoding - \brief Concept for encoding of Unicode characters. - -\code -concept Encoding { - typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition. - - enum { supportUnicode = 1 }; // or 0 if not supporting unicode - - //! \brief Encode a Unicode codepoint to an output stream. - //! \param os Output stream. - //! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively. - template<typename OutputStream> - static void Encode(OutputStream& os, unsigned codepoint); - - //! \brief Decode a Unicode codepoint from an input stream. - //! \param is Input stream. - //! \param codepoint Output of the unicode codepoint. - //! \return true if a valid codepoint can be decoded from the stream. - template <typename InputStream> - static bool Decode(InputStream& is, unsigned* codepoint); - - //! \brief Validate one Unicode codepoint from an encoded stream. - //! \param is Input stream to obtain codepoint. - //! \param os Output for copying one codepoint. - //! \return true if it is valid. - //! \note This function just validating and copying the codepoint without actually decode it. - template <typename InputStream, typename OutputStream> - static bool Validate(InputStream& is, OutputStream& os); - - // The following functions are deal with byte streams. - - //! Take a character from input byte stream, skip BOM if exist. - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is); - - //! Take a character from input byte stream. - template <typename InputByteStream> - static Ch Take(InputByteStream& is); - - //! Put BOM to output byte stream. - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os); - - //! Put a character to output byte stream. - template <typename OutputByteStream> - static void Put(OutputByteStream& os, Ch c); -}; -\endcode -*/ - -/////////////////////////////////////////////////////////////////////////////// -// UTF8 - -//! UTF-8 encoding. -/*! http://en.wikipedia.org/wiki/UTF-8 - http://tools.ietf.org/html/rfc3629 - \tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char. - \note implements Encoding concept -*/ -template<typename CharType = char> -struct UTF8 { - typedef CharType Ch; - - enum { supportUnicode = 1 }; - - template<typename OutputStream> - static void Encode(OutputStream& os, unsigned codepoint) { - if (codepoint <= 0x7F) - os.Put(static_cast<Ch>(codepoint & 0xFF)); - else if (codepoint <= 0x7FF) { - os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF))); - os.Put(static_cast<Ch>(0x80 | ((codepoint & 0x3F)))); - } - else if (codepoint <= 0xFFFF) { - os.Put(static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF))); - os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F))); - os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F))); - } - else { - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - os.Put(static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF))); - os.Put(static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F))); - os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F))); - os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F))); - } - } - - template<typename OutputStream> - static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { - if (codepoint <= 0x7F) - PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF)); - else if (codepoint <= 0x7FF) { - PutUnsafe(os, static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF))); - PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint & 0x3F)))); - } - else if (codepoint <= 0xFFFF) { - PutUnsafe(os, static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF))); - PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F))); - PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F))); - } - else { - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - PutUnsafe(os, static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF))); - PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F))); - PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F))); - PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F))); - } - } - - template <typename InputStream> - static bool Decode(InputStream& is, unsigned* codepoint) { -#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu) -#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0) -#define TAIL() COPY(); TRANS(0x70) - typename InputStream::Ch c = is.Take(); - if (!(c & 0x80)) { - *codepoint = static_cast<unsigned char>(c); - return true; - } - - unsigned char type = GetRange(static_cast<unsigned char>(c)); - if (type >= 32) { - *codepoint = 0; - } else { - *codepoint = (0xFF >> type) & static_cast<unsigned char>(c); - } - bool result = true; - switch (type) { - case 2: TAIL(); return result; - case 3: TAIL(); TAIL(); return result; - case 4: COPY(); TRANS(0x50); TAIL(); return result; - case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; - case 6: TAIL(); TAIL(); TAIL(); return result; - case 10: COPY(); TRANS(0x20); TAIL(); return result; - case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; - default: return false; - } -#undef COPY -#undef TRANS -#undef TAIL - } - - template <typename InputStream, typename OutputStream> - static bool Validate(InputStream& is, OutputStream& os) { -#define COPY() os.Put(c = is.Take()) -#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0) -#define TAIL() COPY(); TRANS(0x70) - Ch c; - COPY(); - if (!(c & 0x80)) - return true; - - bool result = true; - switch (GetRange(static_cast<unsigned char>(c))) { - case 2: TAIL(); return result; - case 3: TAIL(); TAIL(); return result; - case 4: COPY(); TRANS(0x50); TAIL(); return result; - case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; - case 6: TAIL(); TAIL(); TAIL(); return result; - case 10: COPY(); TRANS(0x20); TAIL(); return result; - case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; - default: return false; - } -#undef COPY -#undef TRANS -#undef TAIL - } - - static unsigned char GetRange(unsigned char c) { - // Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ - // With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types. - static const unsigned char type[] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, - 0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40, - 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, - 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, - 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, - 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, - }; - return type[c]; - } - - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - typename InputByteStream::Ch c = Take(is); - if (static_cast<unsigned char>(c) != 0xEFu) return c; - c = is.Take(); - if (static_cast<unsigned char>(c) != 0xBBu) return c; - c = is.Take(); - if (static_cast<unsigned char>(c) != 0xBFu) return c; - c = is.Take(); - return c; - } - - template <typename InputByteStream> - static Ch Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - return static_cast<Ch>(is.Take()); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(0xEFu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xBBu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xBFu)); - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, Ch c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(c)); - } -}; - -/////////////////////////////////////////////////////////////////////////////// -// UTF16 - -//! UTF-16 encoding. -/*! http://en.wikipedia.org/wiki/UTF-16 - http://tools.ietf.org/html/rfc2781 - \tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead. - \note implements Encoding concept - - \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. - For streaming, use UTF16LE and UTF16BE, which handle endianness. -*/ -template<typename CharType = wchar_t> -struct UTF16 { - typedef CharType Ch; - RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2); - - enum { supportUnicode = 1 }; - - template<typename OutputStream> - static void Encode(OutputStream& os, unsigned codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); - if (codepoint <= 0xFFFF) { - RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair - os.Put(static_cast<typename OutputStream::Ch>(codepoint)); - } - else { - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - unsigned v = codepoint - 0x10000; - os.Put(static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800)); - os.Put((v & 0x3FF) | 0xDC00); - } - } - - - template<typename OutputStream> - static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); - if (codepoint <= 0xFFFF) { - RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair - PutUnsafe(os, static_cast<typename OutputStream::Ch>(codepoint)); - } - else { - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - unsigned v = codepoint - 0x10000; - PutUnsafe(os, static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800)); - PutUnsafe(os, (v & 0x3FF) | 0xDC00); - } - } - - template <typename InputStream> - static bool Decode(InputStream& is, unsigned* codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); - typename InputStream::Ch c = is.Take(); - if (c < 0xD800 || c > 0xDFFF) { - *codepoint = static_cast<unsigned>(c); - return true; - } - else if (c <= 0xDBFF) { - *codepoint = (static_cast<unsigned>(c) & 0x3FF) << 10; - c = is.Take(); - *codepoint |= (static_cast<unsigned>(c) & 0x3FF); - *codepoint += 0x10000; - return c >= 0xDC00 && c <= 0xDFFF; - } - return false; - } - - template <typename InputStream, typename OutputStream> - static bool Validate(InputStream& is, OutputStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); - typename InputStream::Ch c; - os.Put(static_cast<typename OutputStream::Ch>(c = is.Take())); - if (c < 0xD800 || c > 0xDFFF) - return true; - else if (c <= 0xDBFF) { - os.Put(c = is.Take()); - return c >= 0xDC00 && c <= 0xDFFF; - } - return false; - } -}; - -//! UTF-16 little endian encoding. -template<typename CharType = wchar_t> -struct UTF16LE : UTF16<CharType> { - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - CharType c = Take(is); - return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c; - } - - template <typename InputByteStream> - static CharType Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - unsigned c = static_cast<uint8_t>(is.Take()); - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8; - return static_cast<CharType>(c); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu)); - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, CharType c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu)); - } -}; - -//! UTF-16 big endian encoding. -template<typename CharType = wchar_t> -struct UTF16BE : UTF16<CharType> { - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - CharType c = Take(is); - return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c; - } - - template <typename InputByteStream> - static CharType Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8; - c |= static_cast<uint8_t>(is.Take()); - return static_cast<CharType>(c); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu)); - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, CharType c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu)); - } -}; - -/////////////////////////////////////////////////////////////////////////////// -// UTF32 - -//! UTF-32 encoding. -/*! http://en.wikipedia.org/wiki/UTF-32 - \tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead. - \note implements Encoding concept - - \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. - For streaming, use UTF32LE and UTF32BE, which handle endianness. -*/ -template<typename CharType = unsigned> -struct UTF32 { - typedef CharType Ch; - RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4); - - enum { supportUnicode = 1 }; - - template<typename OutputStream> - static void Encode(OutputStream& os, unsigned codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - os.Put(codepoint); - } - - template<typename OutputStream> - static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); - RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); - PutUnsafe(os, codepoint); - } - - template <typename InputStream> - static bool Decode(InputStream& is, unsigned* codepoint) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); - Ch c = is.Take(); - *codepoint = c; - return c <= 0x10FFFF; - } - - template <typename InputStream, typename OutputStream> - static bool Validate(InputStream& is, OutputStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); - Ch c; - os.Put(c = is.Take()); - return c <= 0x10FFFF; - } -}; - -//! UTF-32 little endian enocoding. -template<typename CharType = unsigned> -struct UTF32LE : UTF32<CharType> { - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - CharType c = Take(is); - return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c; - } - - template <typename InputByteStream> - static CharType Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - unsigned c = static_cast<uint8_t>(is.Take()); - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8; - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16; - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24; - return static_cast<CharType>(c); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0x00u)); - os.Put(static_cast<typename OutputByteStream::Ch>(0x00u)); - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, CharType c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu)); - } -}; - -//! UTF-32 big endian encoding. -template<typename CharType = unsigned> -struct UTF32BE : UTF32<CharType> { - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - CharType c = Take(is); - return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c; - } - - template <typename InputByteStream> - static CharType Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24; - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16; - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8; - c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())); - return static_cast<CharType>(c); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(0x00u)); - os.Put(static_cast<typename OutputByteStream::Ch>(0x00u)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu)); - os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu)); - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, CharType c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu)); - os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu)); - } -}; - -/////////////////////////////////////////////////////////////////////////////// -// ASCII - -//! ASCII encoding. -/*! http://en.wikipedia.org/wiki/ASCII - \tparam CharType Code unit for storing 7-bit ASCII data. Default is char. - \note implements Encoding concept -*/ -template<typename CharType = char> -struct ASCII { - typedef CharType Ch; - - enum { supportUnicode = 0 }; - - template<typename OutputStream> - static void Encode(OutputStream& os, unsigned codepoint) { - RAPIDJSON_ASSERT(codepoint <= 0x7F); - os.Put(static_cast<Ch>(codepoint & 0xFF)); - } - - template<typename OutputStream> - static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { - RAPIDJSON_ASSERT(codepoint <= 0x7F); - PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF)); - } - - template <typename InputStream> - static bool Decode(InputStream& is, unsigned* codepoint) { - uint8_t c = static_cast<uint8_t>(is.Take()); - *codepoint = c; - return c <= 0X7F; - } - - template <typename InputStream, typename OutputStream> - static bool Validate(InputStream& is, OutputStream& os) { - uint8_t c = static_cast<uint8_t>(is.Take()); - os.Put(static_cast<typename OutputStream::Ch>(c)); - return c <= 0x7F; - } - - template <typename InputByteStream> - static CharType TakeBOM(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - uint8_t c = static_cast<uint8_t>(Take(is)); - return static_cast<Ch>(c); - } - - template <typename InputByteStream> - static Ch Take(InputByteStream& is) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); - return static_cast<Ch>(is.Take()); - } - - template <typename OutputByteStream> - static void PutBOM(OutputByteStream& os) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - (void)os; - } - - template <typename OutputByteStream> - static void Put(OutputByteStream& os, Ch c) { - RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); - os.Put(static_cast<typename OutputByteStream::Ch>(c)); - } -}; - -/////////////////////////////////////////////////////////////////////////////// -// AutoUTF - -//! Runtime-specified UTF encoding type of a stream. -enum UTFType { - kUTF8 = 0, //!< UTF-8. - kUTF16LE = 1, //!< UTF-16 little endian. - kUTF16BE = 2, //!< UTF-16 big endian. - kUTF32LE = 3, //!< UTF-32 little endian. - kUTF32BE = 4 //!< UTF-32 big endian. -}; - -//! Dynamically select encoding according to stream's runtime-specified UTF encoding type. -/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType(). -*/ -template<typename CharType> -struct AutoUTF { - typedef CharType Ch; - - enum { supportUnicode = 1 }; - -#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x - - template<typename OutputStream> - RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) { - typedef void (*EncodeFunc)(OutputStream&, unsigned); - static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) }; - (*f[os.GetType()])(os, codepoint); - } - - template<typename OutputStream> - RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { - typedef void (*EncodeFunc)(OutputStream&, unsigned); - static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) }; - (*f[os.GetType()])(os, codepoint); - } - - template <typename InputStream> - RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) { - typedef bool (*DecodeFunc)(InputStream&, unsigned*); - static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) }; - return (*f[is.GetType()])(is, codepoint); - } - - template <typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { - typedef bool (*ValidateFunc)(InputStream&, OutputStream&); - static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) }; - return (*f[is.GetType()])(is, os); - } - -#undef RAPIDJSON_ENCODINGS_FUNC -}; - -/////////////////////////////////////////////////////////////////////////////// -// Transcoder - -//! Encoding conversion. -template<typename SourceEncoding, typename TargetEncoding> -struct Transcoder { - //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream. - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { - unsigned codepoint; - if (!SourceEncoding::Decode(is, &codepoint)) - return false; - TargetEncoding::Encode(os, codepoint); - return true; - } - - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { - unsigned codepoint; - if (!SourceEncoding::Decode(is, &codepoint)) - return false; - TargetEncoding::EncodeUnsafe(os, codepoint); - return true; - } - - //! Validate one Unicode codepoint from an encoded stream. - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { - return Transcode(is, os); // Since source/target encoding is different, must transcode. - } -}; - -// Forward declaration. -template<typename Stream> -inline void PutUnsafe(Stream& stream, typename Stream::Ch c); - -//! Specialization of Transcoder with same source and target encoding. -template<typename Encoding> -struct Transcoder<Encoding, Encoding> { - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { - os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class. - return true; - } - - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { - PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class. - return true; - } - - template<typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { - return Encoding::Validate(is, os); // source/target encoding are the same - } -}; - -RAPIDJSON_NAMESPACE_END - -#if defined(__GNUC__) || defined(_MSC_VER) -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/error/en.h b/ext/librethinkdbxx/src/rapidjson/error/en.h deleted file mode 100644 index 2db838bf..00000000 --- a/ext/librethinkdbxx/src/rapidjson/error/en.h +++ /dev/null @@ -1,74 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ERROR_EN_H_ -#define RAPIDJSON_ERROR_EN_H_ - -#include "error.h" - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(switch-enum) -RAPIDJSON_DIAG_OFF(covered-switch-default) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Maps error code of parsing into error message. -/*! - \ingroup RAPIDJSON_ERRORS - \param parseErrorCode Error code obtained in parsing. - \return the error message. - \note User can make a copy of this function for localization. - Using switch-case is safer for future modification of error codes. -*/ -inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) { - switch (parseErrorCode) { - case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error."); - - case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty."); - case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values."); - - case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value."); - - case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member."); - case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member."); - case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member."); - - case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element."); - - case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string."); - case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid."); - case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string."); - case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string."); - case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string."); - - case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double."); - case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number."); - case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number."); - - case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error."); - case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error."); - - default: return RAPIDJSON_ERROR_STRING("Unknown error."); - } -} - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_ERROR_EN_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/error/error.h b/ext/librethinkdbxx/src/rapidjson/error/error.h deleted file mode 100644 index 95cb31a7..00000000 --- a/ext/librethinkdbxx/src/rapidjson/error/error.h +++ /dev/null @@ -1,155 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ERROR_ERROR_H_ -#define RAPIDJSON_ERROR_ERROR_H_ - -#include "../rapidjson.h" - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -#endif - -/*! \file error.h */ - -/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */ - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_ERROR_CHARTYPE - -//! Character type of error messages. -/*! \ingroup RAPIDJSON_ERRORS - The default character type is \c char. - On Windows, user can define this macro as \c TCHAR for supporting both - unicode/non-unicode settings. -*/ -#ifndef RAPIDJSON_ERROR_CHARTYPE -#define RAPIDJSON_ERROR_CHARTYPE char -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_ERROR_STRING - -//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[]. -/*! \ingroup RAPIDJSON_ERRORS - By default this conversion macro does nothing. - On Windows, user can define this macro as \c _T(x) for supporting both - unicode/non-unicode settings. -*/ -#ifndef RAPIDJSON_ERROR_STRING -#define RAPIDJSON_ERROR_STRING(x) x -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// ParseErrorCode - -//! Error code of parsing. -/*! \ingroup RAPIDJSON_ERRORS - \see GenericReader::Parse, GenericReader::GetParseErrorCode -*/ -enum ParseErrorCode { - kParseErrorNone = 0, //!< No error. - - kParseErrorDocumentEmpty, //!< The document is empty. - kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values. - - kParseErrorValueInvalid, //!< Invalid value. - - kParseErrorObjectMissName, //!< Missing a name for object member. - kParseErrorObjectMissColon, //!< Missing a colon after a name of object member. - kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member. - - kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element. - - kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string. - kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid. - kParseErrorStringEscapeInvalid, //!< Invalid escape character in string. - kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string. - kParseErrorStringInvalidEncoding, //!< Invalid encoding in string. - - kParseErrorNumberTooBig, //!< Number too big to be stored in double. - kParseErrorNumberMissFraction, //!< Miss fraction part in number. - kParseErrorNumberMissExponent, //!< Miss exponent in number. - - kParseErrorTermination, //!< Parsing was terminated. - kParseErrorUnspecificSyntaxError //!< Unspecific syntax error. -}; - -//! Result of parsing (wraps ParseErrorCode) -/*! - \ingroup RAPIDJSON_ERRORS - \code - Document doc; - ParseResult ok = doc.Parse("[42]"); - if (!ok) { - fprintf(stderr, "JSON parse error: %s (%u)", - GetParseError_En(ok.Code()), ok.Offset()); - exit(EXIT_FAILURE); - } - \endcode - \see GenericReader::Parse, GenericDocument::Parse -*/ -struct ParseResult { -public: - //! Default constructor, no error. - ParseResult() : code_(kParseErrorNone), offset_(0) {} - //! Constructor to set an error. - ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {} - - //! Get the error code. - ParseErrorCode Code() const { return code_; } - //! Get the error offset, if \ref IsError(), 0 otherwise. - size_t Offset() const { return offset_; } - - //! Conversion to \c bool, returns \c true, iff !\ref IsError(). - operator bool() const { return !IsError(); } - //! Whether the result is an error. - bool IsError() const { return code_ != kParseErrorNone; } - - bool operator==(const ParseResult& that) const { return code_ == that.code_; } - bool operator==(ParseErrorCode code) const { return code_ == code; } - friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; } - - //! Reset error code. - void Clear() { Set(kParseErrorNone); } - //! Update error code and offset. - void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; } - -private: - ParseErrorCode code_; - size_t offset_; -}; - -//! Function pointer type of GetParseError(). -/*! \ingroup RAPIDJSON_ERRORS - - This is the prototype for \c GetParseError_X(), where \c X is a locale. - User can dynamically change locale in runtime, e.g.: -\code - GetParseErrorFunc GetParseError = GetParseError_En; // or whatever - const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode()); -\endcode -*/ -typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode); - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_ERROR_ERROR_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/filereadstream.h b/ext/librethinkdbxx/src/rapidjson/filereadstream.h deleted file mode 100644 index b56ea13b..00000000 --- a/ext/librethinkdbxx/src/rapidjson/filereadstream.h +++ /dev/null @@ -1,99 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_FILEREADSTREAM_H_ -#define RAPIDJSON_FILEREADSTREAM_H_ - -#include "stream.h" -#include <cstdio> - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -RAPIDJSON_DIAG_OFF(unreachable-code) -RAPIDJSON_DIAG_OFF(missing-noreturn) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! File byte stream for input using fread(). -/*! - \note implements Stream concept -*/ -class FileReadStream { -public: - typedef char Ch; //!< Character type (byte). - - //! Constructor. - /*! - \param fp File pointer opened for read. - \param buffer user-supplied buffer. - \param bufferSize size of buffer in bytes. Must >=4 bytes. - */ - FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { - RAPIDJSON_ASSERT(fp_ != 0); - RAPIDJSON_ASSERT(bufferSize >= 4); - Read(); - } - - Ch Peek() const { return *current_; } - Ch Take() { Ch c = *current_; Read(); return c; } - size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); } - - // Not implemented - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - - // For encoding detection only. - const Ch* Peek4() const { - return (current_ + 4 <= bufferLast_) ? current_ : 0; - } - -private: - void Read() { - if (current_ < bufferLast_) - ++current_; - else if (!eof_) { - count_ += readCount_; - readCount_ = fread(buffer_, 1, bufferSize_, fp_); - bufferLast_ = buffer_ + readCount_ - 1; - current_ = buffer_; - - if (readCount_ < bufferSize_) { - buffer_[readCount_] = '\0'; - ++bufferLast_; - eof_ = true; - } - } - } - - std::FILE* fp_; - Ch *buffer_; - size_t bufferSize_; - Ch *bufferLast_; - Ch *current_; - size_t readCount_; - size_t count_; //!< Number of characters read - bool eof_; -}; - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/filewritestream.h b/ext/librethinkdbxx/src/rapidjson/filewritestream.h deleted file mode 100644 index 6378dd60..00000000 --- a/ext/librethinkdbxx/src/rapidjson/filewritestream.h +++ /dev/null @@ -1,104 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_FILEWRITESTREAM_H_ -#define RAPIDJSON_FILEWRITESTREAM_H_ - -#include "stream.h" -#include <cstdio> - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(unreachable-code) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Wrapper of C file stream for input using fread(). -/*! - \note implements Stream concept -*/ -class FileWriteStream { -public: - typedef char Ch; //!< Character type. Only support char. - - FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) { - RAPIDJSON_ASSERT(fp_ != 0); - } - - void Put(char c) { - if (current_ >= bufferEnd_) - Flush(); - - *current_++ = c; - } - - void PutN(char c, size_t n) { - size_t avail = static_cast<size_t>(bufferEnd_ - current_); - while (n > avail) { - std::memset(current_, c, avail); - current_ += avail; - Flush(); - n -= avail; - avail = static_cast<size_t>(bufferEnd_ - current_); - } - - if (n > 0) { - std::memset(current_, c, n); - current_ += n; - } - } - - void Flush() { - if (current_ != buffer_) { - size_t result = fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_); - if (result < static_cast<size_t>(current_ - buffer_)) { - // failure deliberately ignored at this time - // added to avoid warn_unused_result build errors - } - current_ = buffer_; - } - } - - // Not implemented - char Peek() const { RAPIDJSON_ASSERT(false); return 0; } - char Take() { RAPIDJSON_ASSERT(false); return 0; } - size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } - char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - // Prohibit copy constructor & assignment operator. - FileWriteStream(const FileWriteStream&); - FileWriteStream& operator=(const FileWriteStream&); - - std::FILE* fp_; - char *buffer_; - char *bufferEnd_; - char *current_; -}; - -//! Implement specialized version of PutN() with memset() for better performance. -template<> -inline void PutN(FileWriteStream& stream, char c, size_t n) { - stream.PutN(c, n); -} - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/fwd.h b/ext/librethinkdbxx/src/rapidjson/fwd.h deleted file mode 100644 index e8104e84..00000000 --- a/ext/librethinkdbxx/src/rapidjson/fwd.h +++ /dev/null @@ -1,151 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_FWD_H_ -#define RAPIDJSON_FWD_H_ - -#include "rapidjson.h" - -RAPIDJSON_NAMESPACE_BEGIN - -// encodings.h - -template<typename CharType> struct UTF8; -template<typename CharType> struct UTF16; -template<typename CharType> struct UTF16BE; -template<typename CharType> struct UTF16LE; -template<typename CharType> struct UTF32; -template<typename CharType> struct UTF32BE; -template<typename CharType> struct UTF32LE; -template<typename CharType> struct ASCII; -template<typename CharType> struct AutoUTF; - -template<typename SourceEncoding, typename TargetEncoding> -struct Transcoder; - -// allocators.h - -class CrtAllocator; - -template <typename BaseAllocator> -class MemoryPoolAllocator; - -// stream.h - -template <typename Encoding> -struct GenericStringStream; - -typedef GenericStringStream<UTF8<char> > StringStream; - -template <typename Encoding> -struct GenericInsituStringStream; - -typedef GenericInsituStringStream<UTF8<char> > InsituStringStream; - -// stringbuffer.h - -template <typename Encoding, typename Allocator> -class GenericStringBuffer; - -typedef GenericStringBuffer<UTF8<char>, CrtAllocator> StringBuffer; - -// filereadstream.h - -class FileReadStream; - -// filewritestream.h - -class FileWriteStream; - -// memorybuffer.h - -template <typename Allocator> -struct GenericMemoryBuffer; - -typedef GenericMemoryBuffer<CrtAllocator> MemoryBuffer; - -// memorystream.h - -struct MemoryStream; - -// reader.h - -template<typename Encoding, typename Derived> -struct BaseReaderHandler; - -template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator> -class GenericReader; - -typedef GenericReader<UTF8<char>, UTF8<char>, CrtAllocator> Reader; - -// writer.h - -template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags> -class Writer; - -// prettywriter.h - -template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags> -class PrettyWriter; - -// document.h - -template <typename Encoding, typename Allocator> -struct GenericMember; - -template <bool Const, typename Encoding, typename Allocator> -class GenericMemberIterator; - -template<typename CharType> -struct GenericStringRef; - -template <typename Encoding, typename Allocator> -class GenericValue; - -typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value; - -template <typename Encoding, typename Allocator, typename StackAllocator> -class GenericDocument; - -typedef GenericDocument<UTF8<char>, MemoryPoolAllocator<CrtAllocator>, CrtAllocator> Document; - -// pointer.h - -template <typename ValueType, typename Allocator> -class GenericPointer; - -typedef GenericPointer<Value, CrtAllocator> Pointer; - -// schema.h - -template <typename SchemaDocumentType> -class IGenericRemoteSchemaDocumentProvider; - -template <typename ValueT, typename Allocator> -class GenericSchemaDocument; - -typedef GenericSchemaDocument<Value, CrtAllocator> SchemaDocument; -typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider; - -template < - typename SchemaDocumentType, - typename OutputHandler, - typename StateAllocator> -class GenericSchemaValidator; - -typedef GenericSchemaValidator<SchemaDocument, BaseReaderHandler<UTF8<char>, void>, CrtAllocator> SchemaValidator; - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_RAPIDJSONFWD_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/biginteger.h b/ext/librethinkdbxx/src/rapidjson/internal/biginteger.h deleted file mode 100644 index 9d3e88c9..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/biginteger.h +++ /dev/null @@ -1,290 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_BIGINTEGER_H_ -#define RAPIDJSON_BIGINTEGER_H_ - -#include "../rapidjson.h" - -#if defined(_MSC_VER) && defined(_M_AMD64) -#include <intrin.h> // for _umul128 -#pragma intrinsic(_umul128) -#endif - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -class BigInteger { -public: - typedef uint64_t Type; - - BigInteger(const BigInteger& rhs) : count_(rhs.count_) { - std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); - } - - explicit BigInteger(uint64_t u) : count_(1) { - digits_[0] = u; - } - - BigInteger(const char* decimals, size_t length) : count_(1) { - RAPIDJSON_ASSERT(length > 0); - digits_[0] = 0; - size_t i = 0; - const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 - while (length >= kMaxDigitPerIteration) { - AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); - length -= kMaxDigitPerIteration; - i += kMaxDigitPerIteration; - } - - if (length > 0) - AppendDecimal64(decimals + i, decimals + i + length); - } - - BigInteger& operator=(const BigInteger &rhs) - { - if (this != &rhs) { - count_ = rhs.count_; - std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); - } - return *this; - } - - BigInteger& operator=(uint64_t u) { - digits_[0] = u; - count_ = 1; - return *this; - } - - BigInteger& operator+=(uint64_t u) { - Type backup = digits_[0]; - digits_[0] += u; - for (size_t i = 0; i < count_ - 1; i++) { - if (digits_[i] >= backup) - return *this; // no carry - backup = digits_[i + 1]; - digits_[i + 1] += 1; - } - - // Last carry - if (digits_[count_ - 1] < backup) - PushBack(1); - - return *this; - } - - BigInteger& operator*=(uint64_t u) { - if (u == 0) return *this = 0; - if (u == 1) return *this; - if (*this == 1) return *this = u; - - uint64_t k = 0; - for (size_t i = 0; i < count_; i++) { - uint64_t hi; - digits_[i] = MulAdd64(digits_[i], u, k, &hi); - k = hi; - } - - if (k > 0) - PushBack(k); - - return *this; - } - - BigInteger& operator*=(uint32_t u) { - if (u == 0) return *this = 0; - if (u == 1) return *this; - if (*this == 1) return *this = u; - - uint64_t k = 0; - for (size_t i = 0; i < count_; i++) { - const uint64_t c = digits_[i] >> 32; - const uint64_t d = digits_[i] & 0xFFFFFFFF; - const uint64_t uc = u * c; - const uint64_t ud = u * d; - const uint64_t p0 = ud + k; - const uint64_t p1 = uc + (p0 >> 32); - digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); - k = p1 >> 32; - } - - if (k > 0) - PushBack(k); - - return *this; - } - - BigInteger& operator<<=(size_t shift) { - if (IsZero() || shift == 0) return *this; - - size_t offset = shift / kTypeBit; - size_t interShift = shift % kTypeBit; - RAPIDJSON_ASSERT(count_ + offset <= kCapacity); - - if (interShift == 0) { - std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type)); - count_ += offset; - } - else { - digits_[count_] = 0; - for (size_t i = count_; i > 0; i--) - digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); - digits_[offset] = digits_[0] << interShift; - count_ += offset; - if (digits_[count_]) - count_++; - } - - std::memset(digits_, 0, offset * sizeof(Type)); - - return *this; - } - - bool operator==(const BigInteger& rhs) const { - return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; - } - - bool operator==(const Type rhs) const { - return count_ == 1 && digits_[0] == rhs; - } - - BigInteger& MultiplyPow5(unsigned exp) { - static const uint32_t kPow5[12] = { - 5, - 5 * 5, - 5 * 5 * 5, - 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, - 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 - }; - if (exp == 0) return *this; - for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 - for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13 - if (exp > 0) *this *= kPow5[exp - 1]; - return *this; - } - - // Compute absolute difference of this and rhs. - // Assume this != rhs - bool Difference(const BigInteger& rhs, BigInteger* out) const { - int cmp = Compare(rhs); - RAPIDJSON_ASSERT(cmp != 0); - const BigInteger *a, *b; // Makes a > b - bool ret; - if (cmp < 0) { a = &rhs; b = this; ret = true; } - else { a = this; b = &rhs; ret = false; } - - Type borrow = 0; - for (size_t i = 0; i < a->count_; i++) { - Type d = a->digits_[i] - borrow; - if (i < b->count_) - d -= b->digits_[i]; - borrow = (d > a->digits_[i]) ? 1 : 0; - out->digits_[i] = d; - if (d != 0) - out->count_ = i + 1; - } - - return ret; - } - - int Compare(const BigInteger& rhs) const { - if (count_ != rhs.count_) - return count_ < rhs.count_ ? -1 : 1; - - for (size_t i = count_; i-- > 0;) - if (digits_[i] != rhs.digits_[i]) - return digits_[i] < rhs.digits_[i] ? -1 : 1; - - return 0; - } - - size_t GetCount() const { return count_; } - Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } - bool IsZero() const { return count_ == 1 && digits_[0] == 0; } - -private: - void AppendDecimal64(const char* begin, const char* end) { - uint64_t u = ParseUint64(begin, end); - if (IsZero()) - *this = u; - else { - unsigned exp = static_cast<unsigned>(end - begin); - (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u - } - } - - void PushBack(Type digit) { - RAPIDJSON_ASSERT(count_ < kCapacity); - digits_[count_++] = digit; - } - - static uint64_t ParseUint64(const char* begin, const char* end) { - uint64_t r = 0; - for (const char* p = begin; p != end; ++p) { - RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); - r = r * 10u + static_cast<unsigned>(*p - '0'); - } - return r; - } - - // Assume a * b + k < 2^128 - static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { -#if defined(_MSC_VER) && defined(_M_AMD64) - uint64_t low = _umul128(a, b, outHigh) + k; - if (low < k) - (*outHigh)++; - return low; -#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) - __extension__ typedef unsigned __int128 uint128; - uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b); - p += k; - *outHigh = static_cast<uint64_t>(p >> 64); - return static_cast<uint64_t>(p); -#else - const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; - uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; - x1 += (x0 >> 32); // can't give carry - x1 += x2; - if (x1 < x2) - x3 += (static_cast<uint64_t>(1) << 32); - uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); - uint64_t hi = x3 + (x1 >> 32); - - lo += k; - if (lo < k) - hi++; - *outHigh = hi; - return lo; -#endif - } - - static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 - static const size_t kCapacity = kBitCount / sizeof(Type); - static const size_t kTypeBit = sizeof(Type) * 8; - - Type digits_[kCapacity]; - size_t count_; -}; - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_BIGINTEGER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/diyfp.h b/ext/librethinkdbxx/src/rapidjson/internal/diyfp.h deleted file mode 100644 index c9fefdc6..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/diyfp.h +++ /dev/null @@ -1,258 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -// This is a C++ header-only implementation of Grisu2 algorithm from the publication: -// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with -// integers." ACM Sigplan Notices 45.6 (2010): 233-243. - -#ifndef RAPIDJSON_DIYFP_H_ -#define RAPIDJSON_DIYFP_H_ - -#include "../rapidjson.h" - -#if defined(_MSC_VER) && defined(_M_AMD64) -#include <intrin.h> -#pragma intrinsic(_BitScanReverse64) -#pragma intrinsic(_umul128) -#endif - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -#endif - -struct DiyFp { - DiyFp() : f(), e() {} - - DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {} - - explicit DiyFp(double d) { - union { - double d; - uint64_t u64; - } u = { d }; - - int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize); - uint64_t significand = (u.u64 & kDpSignificandMask); - if (biased_e != 0) { - f = significand + kDpHiddenBit; - e = biased_e - kDpExponentBias; - } - else { - f = significand; - e = kDpMinExponent + 1; - } - } - - DiyFp operator-(const DiyFp& rhs) const { - return DiyFp(f - rhs.f, e); - } - - DiyFp operator*(const DiyFp& rhs) const { -#if defined(_MSC_VER) && defined(_M_AMD64) - uint64_t h; - uint64_t l = _umul128(f, rhs.f, &h); - if (l & (uint64_t(1) << 63)) // rounding - h++; - return DiyFp(h, e + rhs.e + 64); -#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) - __extension__ typedef unsigned __int128 uint128; - uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f); - uint64_t h = static_cast<uint64_t>(p >> 64); - uint64_t l = static_cast<uint64_t>(p); - if (l & (uint64_t(1) << 63)) // rounding - h++; - return DiyFp(h, e + rhs.e + 64); -#else - const uint64_t M32 = 0xFFFFFFFF; - const uint64_t a = f >> 32; - const uint64_t b = f & M32; - const uint64_t c = rhs.f >> 32; - const uint64_t d = rhs.f & M32; - const uint64_t ac = a * c; - const uint64_t bc = b * c; - const uint64_t ad = a * d; - const uint64_t bd = b * d; - uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32); - tmp += 1U << 31; /// mult_round - return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64); -#endif - } - - DiyFp Normalize() const { -#if defined(_MSC_VER) && defined(_M_AMD64) - unsigned long index; - _BitScanReverse64(&index, f); - return DiyFp(f << (63 - index), e - (63 - index)); -#elif defined(__GNUC__) && __GNUC__ >= 4 - int s = __builtin_clzll(f); - return DiyFp(f << s, e - s); -#else - DiyFp res = *this; - while (!(res.f & (static_cast<uint64_t>(1) << 63))) { - res.f <<= 1; - res.e--; - } - return res; -#endif - } - - DiyFp NormalizeBoundary() const { - DiyFp res = *this; - while (!(res.f & (kDpHiddenBit << 1))) { - res.f <<= 1; - res.e--; - } - res.f <<= (kDiySignificandSize - kDpSignificandSize - 2); - res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2); - return res; - } - - void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const { - DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary(); - DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1); - mi.f <<= mi.e - pl.e; - mi.e = pl.e; - *plus = pl; - *minus = mi; - } - - double ToDouble() const { - union { - double d; - uint64_t u64; - }u; - const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : - static_cast<uint64_t>(e + kDpExponentBias); - u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize); - return u.d; - } - - static const int kDiySignificandSize = 64; - static const int kDpSignificandSize = 52; - static const int kDpExponentBias = 0x3FF + kDpSignificandSize; - static const int kDpMaxExponent = 0x7FF - kDpExponentBias; - static const int kDpMinExponent = -kDpExponentBias; - static const int kDpDenormalExponent = -kDpExponentBias + 1; - static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); - static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); - static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); - - uint64_t f; - int e; -}; - -inline DiyFp GetCachedPowerByIndex(size_t index) { - // 10^-348, 10^-340, ..., 10^340 - static const uint64_t kCachedPowers_F[] = { - RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76), - RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea), - RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df), - RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f), - RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c), - RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5), - RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d), - RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637), - RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7), - RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5), - RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b), - RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996), - RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6), - RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8), - RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053), - RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd), - RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94), - RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b), - RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac), - RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3), - RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb), - RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c), - RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000), - RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984), - RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70), - RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245), - RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8), - RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a), - RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea), - RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85), - RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2), - RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3), - RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25), - RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece), - RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5), - RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a), - RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c), - RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a), - RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129), - RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429), - RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d), - RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841), - RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9), - RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b) - }; - static const int16_t kCachedPowers_E[] = { - -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, - -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, - -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, - -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, - -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, - 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, - 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, - 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, - 907, 933, 960, 986, 1013, 1039, 1066 - }; - return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]); -} - -inline DiyFp GetCachedPower(int e, int* K) { - - //int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374; - double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive - int k = static_cast<int>(dk); - if (dk - k > 0.0) - k++; - - unsigned index = static_cast<unsigned>((k >> 3) + 1); - *K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table - - return GetCachedPowerByIndex(index); -} - -inline DiyFp GetCachedPower10(int exp, int *outExp) { - unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u; - *outExp = -348 + static_cast<int>(index) * 8; - return GetCachedPowerByIndex(index); - } - -#ifdef __GNUC__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -RAPIDJSON_DIAG_OFF(padded) -#endif - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_DIYFP_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/dtoa.h b/ext/librethinkdbxx/src/rapidjson/internal/dtoa.h deleted file mode 100644 index 8d6350e6..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/dtoa.h +++ /dev/null @@ -1,245 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -// This is a C++ header-only implementation of Grisu2 algorithm from the publication: -// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with -// integers." ACM Sigplan Notices 45.6 (2010): 233-243. - -#ifndef RAPIDJSON_DTOA_ -#define RAPIDJSON_DTOA_ - -#include "itoa.h" // GetDigitsLut() -#include "diyfp.h" -#include "ieee754.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124 -#endif - -inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) { - while (rest < wp_w && delta - rest >= ten_kappa && - (rest + ten_kappa < wp_w || /// closer - wp_w - rest > rest + ten_kappa - wp_w)) { - buffer[len - 1]--; - rest += ten_kappa; - } -} - -inline unsigned CountDecimalDigit32(uint32_t n) { - // Simple pure C++ implementation was faster than __builtin_clz version in this situation. - if (n < 10) return 1; - if (n < 100) return 2; - if (n < 1000) return 3; - if (n < 10000) return 4; - if (n < 100000) return 5; - if (n < 1000000) return 6; - if (n < 10000000) return 7; - if (n < 100000000) return 8; - // Will not reach 10 digits in DigitGen() - //if (n < 1000000000) return 9; - //return 10; - return 9; -} - -inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) { - static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; - const DiyFp one(uint64_t(1) << -Mp.e, Mp.e); - const DiyFp wp_w = Mp - W; - uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e); - uint64_t p2 = Mp.f & (one.f - 1); - unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9] - *len = 0; - - while (kappa > 0) { - uint32_t d = 0; - switch (kappa) { - case 9: d = p1 / 100000000; p1 %= 100000000; break; - case 8: d = p1 / 10000000; p1 %= 10000000; break; - case 7: d = p1 / 1000000; p1 %= 1000000; break; - case 6: d = p1 / 100000; p1 %= 100000; break; - case 5: d = p1 / 10000; p1 %= 10000; break; - case 4: d = p1 / 1000; p1 %= 1000; break; - case 3: d = p1 / 100; p1 %= 100; break; - case 2: d = p1 / 10; p1 %= 10; break; - case 1: d = p1; p1 = 0; break; - default:; - } - if (d || *len) - buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d)); - kappa--; - uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2; - if (tmp <= delta) { - *K += kappa; - GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f); - return; - } - } - - // kappa = 0 - for (;;) { - p2 *= 10; - delta *= 10; - char d = static_cast<char>(p2 >> -one.e); - if (d || *len) - buffer[(*len)++] = static_cast<char>('0' + d); - p2 &= one.f - 1; - kappa--; - if (p2 < delta) { - *K += kappa; - int index = -static_cast<int>(kappa); - GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast<int>(kappa)] : 0)); - return; - } - } -} - -inline void Grisu2(double value, char* buffer, int* length, int* K) { - const DiyFp v(value); - DiyFp w_m, w_p; - v.NormalizedBoundaries(&w_m, &w_p); - - const DiyFp c_mk = GetCachedPower(w_p.e, K); - const DiyFp W = v.Normalize() * c_mk; - DiyFp Wp = w_p * c_mk; - DiyFp Wm = w_m * c_mk; - Wm.f++; - Wp.f--; - DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K); -} - -inline char* WriteExponent(int K, char* buffer) { - if (K < 0) { - *buffer++ = '-'; - K = -K; - } - - if (K >= 100) { - *buffer++ = static_cast<char>('0' + static_cast<char>(K / 100)); - K %= 100; - const char* d = GetDigitsLut() + K * 2; - *buffer++ = d[0]; - *buffer++ = d[1]; - } - else if (K >= 10) { - const char* d = GetDigitsLut() + K * 2; - *buffer++ = d[0]; - *buffer++ = d[1]; - } - else - *buffer++ = static_cast<char>('0' + static_cast<char>(K)); - - return buffer; -} - -inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) { - const int kk = length + k; // 10^(kk-1) <= v < 10^kk - - if (0 <= k && kk <= 21) { - // 1234e7 -> 12340000000 - for (int i = length; i < kk; i++) - buffer[i] = '0'; - buffer[kk] = '.'; - buffer[kk + 1] = '0'; - return &buffer[kk + 2]; - } - else if (0 < kk && kk <= 21) { - // 1234e-2 -> 12.34 - std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk)); - buffer[kk] = '.'; - if (0 > k + maxDecimalPlaces) { - // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1 - // Remove extra trailing zeros (at least one) after truncation. - for (int i = kk + maxDecimalPlaces; i > kk + 1; i--) - if (buffer[i] != '0') - return &buffer[i + 1]; - return &buffer[kk + 2]; // Reserve one zero - } - else - return &buffer[length + 1]; - } - else if (-6 < kk && kk <= 0) { - // 1234e-6 -> 0.001234 - const int offset = 2 - kk; - std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length)); - buffer[0] = '0'; - buffer[1] = '.'; - for (int i = 2; i < offset; i++) - buffer[i] = '0'; - if (length - kk > maxDecimalPlaces) { - // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1 - // Remove extra trailing zeros (at least one) after truncation. - for (int i = maxDecimalPlaces + 1; i > 2; i--) - if (buffer[i] != '0') - return &buffer[i + 1]; - return &buffer[3]; // Reserve one zero - } - else - return &buffer[length + offset]; - } - else if (kk < -maxDecimalPlaces) { - // Truncate to zero - buffer[0] = '0'; - buffer[1] = '.'; - buffer[2] = '0'; - return &buffer[3]; - } - else if (length == 1) { - // 1e30 - buffer[1] = 'e'; - return WriteExponent(kk - 1, &buffer[2]); - } - else { - // 1234e30 -> 1.234e33 - std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1)); - buffer[1] = '.'; - buffer[length + 1] = 'e'; - return WriteExponent(kk - 1, &buffer[0 + length + 2]); - } -} - -inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) { - RAPIDJSON_ASSERT(maxDecimalPlaces >= 1); - Double d(value); - if (d.IsZero()) { - if (d.Sign()) - *buffer++ = '-'; // -0.0, Issue #289 - buffer[0] = '0'; - buffer[1] = '.'; - buffer[2] = '0'; - return &buffer[3]; - } - else { - if (value < 0) { - *buffer++ = '-'; - value = -value; - } - int length, K; - Grisu2(value, buffer, &length, &K); - return Prettify(buffer, length, K, maxDecimalPlaces); - } -} - -#ifdef __GNUC__ -RAPIDJSON_DIAG_POP -#endif - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_DTOA_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/ieee754.h b/ext/librethinkdbxx/src/rapidjson/internal/ieee754.h deleted file mode 100644 index 82bb0b99..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/ieee754.h +++ /dev/null @@ -1,78 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_IEEE754_ -#define RAPIDJSON_IEEE754_ - -#include "../rapidjson.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -class Double { -public: - Double() {} - Double(double d) : d_(d) {} - Double(uint64_t u) : u_(u) {} - - double Value() const { return d_; } - uint64_t Uint64Value() const { return u_; } - - double NextPositiveDouble() const { - RAPIDJSON_ASSERT(!Sign()); - return Double(u_ + 1).Value(); - } - - bool Sign() const { return (u_ & kSignMask) != 0; } - uint64_t Significand() const { return u_ & kSignificandMask; } - int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); } - - bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; } - bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; } - bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; } - bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; } - bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; } - - uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); } - int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } - uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } - - static unsigned EffectiveSignificandSize(int order) { - if (order >= -1021) - return 53; - else if (order <= -1074) - return 0; - else - return static_cast<unsigned>(order) + 1074; - } - -private: - static const int kSignificandSize = 52; - static const int kExponentBias = 0x3FF; - static const int kDenormalExponent = 1 - kExponentBias; - static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000); - static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); - static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); - static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); - - union { - double d_; - uint64_t u_; - }; -}; - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_IEEE754_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/itoa.h b/ext/librethinkdbxx/src/rapidjson/internal/itoa.h deleted file mode 100644 index 01a4e7e7..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/itoa.h +++ /dev/null @@ -1,304 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ITOA_ -#define RAPIDJSON_ITOA_ - -#include "../rapidjson.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -inline const char* GetDigitsLut() { - static const char cDigitsLut[200] = { - '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', - '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', - '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', - '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', - '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', - '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', - '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', - '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', - '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', - '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' - }; - return cDigitsLut; -} - -inline char* u32toa(uint32_t value, char* buffer) { - const char* cDigitsLut = GetDigitsLut(); - - if (value < 10000) { - const uint32_t d1 = (value / 100) << 1; - const uint32_t d2 = (value % 100) << 1; - - if (value >= 1000) - *buffer++ = cDigitsLut[d1]; - if (value >= 100) - *buffer++ = cDigitsLut[d1 + 1]; - if (value >= 10) - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - } - else if (value < 100000000) { - // value = bbbbcccc - const uint32_t b = value / 10000; - const uint32_t c = value % 10000; - - const uint32_t d1 = (b / 100) << 1; - const uint32_t d2 = (b % 100) << 1; - - const uint32_t d3 = (c / 100) << 1; - const uint32_t d4 = (c % 100) << 1; - - if (value >= 10000000) - *buffer++ = cDigitsLut[d1]; - if (value >= 1000000) - *buffer++ = cDigitsLut[d1 + 1]; - if (value >= 100000) - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - - *buffer++ = cDigitsLut[d3]; - *buffer++ = cDigitsLut[d3 + 1]; - *buffer++ = cDigitsLut[d4]; - *buffer++ = cDigitsLut[d4 + 1]; - } - else { - // value = aabbbbcccc in decimal - - const uint32_t a = value / 100000000; // 1 to 42 - value %= 100000000; - - if (a >= 10) { - const unsigned i = a << 1; - *buffer++ = cDigitsLut[i]; - *buffer++ = cDigitsLut[i + 1]; - } - else - *buffer++ = static_cast<char>('0' + static_cast<char>(a)); - - const uint32_t b = value / 10000; // 0 to 9999 - const uint32_t c = value % 10000; // 0 to 9999 - - const uint32_t d1 = (b / 100) << 1; - const uint32_t d2 = (b % 100) << 1; - - const uint32_t d3 = (c / 100) << 1; - const uint32_t d4 = (c % 100) << 1; - - *buffer++ = cDigitsLut[d1]; - *buffer++ = cDigitsLut[d1 + 1]; - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - *buffer++ = cDigitsLut[d3]; - *buffer++ = cDigitsLut[d3 + 1]; - *buffer++ = cDigitsLut[d4]; - *buffer++ = cDigitsLut[d4 + 1]; - } - return buffer; -} - -inline char* i32toa(int32_t value, char* buffer) { - uint32_t u = static_cast<uint32_t>(value); - if (value < 0) { - *buffer++ = '-'; - u = ~u + 1; - } - - return u32toa(u, buffer); -} - -inline char* u64toa(uint64_t value, char* buffer) { - const char* cDigitsLut = GetDigitsLut(); - const uint64_t kTen8 = 100000000; - const uint64_t kTen9 = kTen8 * 10; - const uint64_t kTen10 = kTen8 * 100; - const uint64_t kTen11 = kTen8 * 1000; - const uint64_t kTen12 = kTen8 * 10000; - const uint64_t kTen13 = kTen8 * 100000; - const uint64_t kTen14 = kTen8 * 1000000; - const uint64_t kTen15 = kTen8 * 10000000; - const uint64_t kTen16 = kTen8 * kTen8; - - if (value < kTen8) { - uint32_t v = static_cast<uint32_t>(value); - if (v < 10000) { - const uint32_t d1 = (v / 100) << 1; - const uint32_t d2 = (v % 100) << 1; - - if (v >= 1000) - *buffer++ = cDigitsLut[d1]; - if (v >= 100) - *buffer++ = cDigitsLut[d1 + 1]; - if (v >= 10) - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - } - else { - // value = bbbbcccc - const uint32_t b = v / 10000; - const uint32_t c = v % 10000; - - const uint32_t d1 = (b / 100) << 1; - const uint32_t d2 = (b % 100) << 1; - - const uint32_t d3 = (c / 100) << 1; - const uint32_t d4 = (c % 100) << 1; - - if (value >= 10000000) - *buffer++ = cDigitsLut[d1]; - if (value >= 1000000) - *buffer++ = cDigitsLut[d1 + 1]; - if (value >= 100000) - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - - *buffer++ = cDigitsLut[d3]; - *buffer++ = cDigitsLut[d3 + 1]; - *buffer++ = cDigitsLut[d4]; - *buffer++ = cDigitsLut[d4 + 1]; - } - } - else if (value < kTen16) { - const uint32_t v0 = static_cast<uint32_t>(value / kTen8); - const uint32_t v1 = static_cast<uint32_t>(value % kTen8); - - const uint32_t b0 = v0 / 10000; - const uint32_t c0 = v0 % 10000; - - const uint32_t d1 = (b0 / 100) << 1; - const uint32_t d2 = (b0 % 100) << 1; - - const uint32_t d3 = (c0 / 100) << 1; - const uint32_t d4 = (c0 % 100) << 1; - - const uint32_t b1 = v1 / 10000; - const uint32_t c1 = v1 % 10000; - - const uint32_t d5 = (b1 / 100) << 1; - const uint32_t d6 = (b1 % 100) << 1; - - const uint32_t d7 = (c1 / 100) << 1; - const uint32_t d8 = (c1 % 100) << 1; - - if (value >= kTen15) - *buffer++ = cDigitsLut[d1]; - if (value >= kTen14) - *buffer++ = cDigitsLut[d1 + 1]; - if (value >= kTen13) - *buffer++ = cDigitsLut[d2]; - if (value >= kTen12) - *buffer++ = cDigitsLut[d2 + 1]; - if (value >= kTen11) - *buffer++ = cDigitsLut[d3]; - if (value >= kTen10) - *buffer++ = cDigitsLut[d3 + 1]; - if (value >= kTen9) - *buffer++ = cDigitsLut[d4]; - if (value >= kTen8) - *buffer++ = cDigitsLut[d4 + 1]; - - *buffer++ = cDigitsLut[d5]; - *buffer++ = cDigitsLut[d5 + 1]; - *buffer++ = cDigitsLut[d6]; - *buffer++ = cDigitsLut[d6 + 1]; - *buffer++ = cDigitsLut[d7]; - *buffer++ = cDigitsLut[d7 + 1]; - *buffer++ = cDigitsLut[d8]; - *buffer++ = cDigitsLut[d8 + 1]; - } - else { - const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844 - value %= kTen16; - - if (a < 10) - *buffer++ = static_cast<char>('0' + static_cast<char>(a)); - else if (a < 100) { - const uint32_t i = a << 1; - *buffer++ = cDigitsLut[i]; - *buffer++ = cDigitsLut[i + 1]; - } - else if (a < 1000) { - *buffer++ = static_cast<char>('0' + static_cast<char>(a / 100)); - - const uint32_t i = (a % 100) << 1; - *buffer++ = cDigitsLut[i]; - *buffer++ = cDigitsLut[i + 1]; - } - else { - const uint32_t i = (a / 100) << 1; - const uint32_t j = (a % 100) << 1; - *buffer++ = cDigitsLut[i]; - *buffer++ = cDigitsLut[i + 1]; - *buffer++ = cDigitsLut[j]; - *buffer++ = cDigitsLut[j + 1]; - } - - const uint32_t v0 = static_cast<uint32_t>(value / kTen8); - const uint32_t v1 = static_cast<uint32_t>(value % kTen8); - - const uint32_t b0 = v0 / 10000; - const uint32_t c0 = v0 % 10000; - - const uint32_t d1 = (b0 / 100) << 1; - const uint32_t d2 = (b0 % 100) << 1; - - const uint32_t d3 = (c0 / 100) << 1; - const uint32_t d4 = (c0 % 100) << 1; - - const uint32_t b1 = v1 / 10000; - const uint32_t c1 = v1 % 10000; - - const uint32_t d5 = (b1 / 100) << 1; - const uint32_t d6 = (b1 % 100) << 1; - - const uint32_t d7 = (c1 / 100) << 1; - const uint32_t d8 = (c1 % 100) << 1; - - *buffer++ = cDigitsLut[d1]; - *buffer++ = cDigitsLut[d1 + 1]; - *buffer++ = cDigitsLut[d2]; - *buffer++ = cDigitsLut[d2 + 1]; - *buffer++ = cDigitsLut[d3]; - *buffer++ = cDigitsLut[d3 + 1]; - *buffer++ = cDigitsLut[d4]; - *buffer++ = cDigitsLut[d4 + 1]; - *buffer++ = cDigitsLut[d5]; - *buffer++ = cDigitsLut[d5 + 1]; - *buffer++ = cDigitsLut[d6]; - *buffer++ = cDigitsLut[d6 + 1]; - *buffer++ = cDigitsLut[d7]; - *buffer++ = cDigitsLut[d7 + 1]; - *buffer++ = cDigitsLut[d8]; - *buffer++ = cDigitsLut[d8 + 1]; - } - - return buffer; -} - -inline char* i64toa(int64_t value, char* buffer) { - uint64_t u = static_cast<uint64_t>(value); - if (value < 0) { - *buffer++ = '-'; - u = ~u + 1; - } - - return u64toa(u, buffer); -} - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_ITOA_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/meta.h b/ext/librethinkdbxx/src/rapidjson/internal/meta.h deleted file mode 100644 index 5a9aaa42..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/meta.h +++ /dev/null @@ -1,181 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_INTERNAL_META_H_ -#define RAPIDJSON_INTERNAL_META_H_ - -#include "../rapidjson.h" - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif -#if defined(_MSC_VER) -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(6334) -#endif - -#if RAPIDJSON_HAS_CXX11_TYPETRAITS -#include <type_traits> -#endif - -//@cond RAPIDJSON_INTERNAL -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching -template <typename T> struct Void { typedef void Type; }; - -/////////////////////////////////////////////////////////////////////////////// -// BoolType, TrueType, FalseType -// -template <bool Cond> struct BoolType { - static const bool Value = Cond; - typedef BoolType Type; -}; -typedef BoolType<true> TrueType; -typedef BoolType<false> FalseType; - - -/////////////////////////////////////////////////////////////////////////////// -// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr -// - -template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; }; -template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; }; -template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {}; -template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {}; - -template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {}; -template <> struct AndExprCond<true, true> : TrueType {}; -template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {}; -template <> struct OrExprCond<false, false> : FalseType {}; - -template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {}; -template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {}; -template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {}; -template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {}; - - -/////////////////////////////////////////////////////////////////////////////// -// AddConst, MaybeAddConst, RemoveConst -template <typename T> struct AddConst { typedef const T Type; }; -template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {}; -template <typename T> struct RemoveConst { typedef T Type; }; -template <typename T> struct RemoveConst<const T> { typedef T Type; }; - - -/////////////////////////////////////////////////////////////////////////////// -// IsSame, IsConst, IsMoreConst, IsPointer -// -template <typename T, typename U> struct IsSame : FalseType {}; -template <typename T> struct IsSame<T, T> : TrueType {}; - -template <typename T> struct IsConst : FalseType {}; -template <typename T> struct IsConst<const T> : TrueType {}; - -template <typename CT, typename T> -struct IsMoreConst - : AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>, - BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {}; - -template <typename T> struct IsPointer : FalseType {}; -template <typename T> struct IsPointer<T*> : TrueType {}; - -/////////////////////////////////////////////////////////////////////////////// -// IsBaseOf -// -#if RAPIDJSON_HAS_CXX11_TYPETRAITS - -template <typename B, typename D> struct IsBaseOf - : BoolType< ::std::is_base_of<B,D>::value> {}; - -#else // simplified version adopted from Boost - -template<typename B, typename D> struct IsBaseOfImpl { - RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0); - RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0); - - typedef char (&Yes)[1]; - typedef char (&No) [2]; - - template <typename T> - static Yes Check(const D*, T); - static No Check(const B*, int); - - struct Host { - operator const B*() const; - operator const D*(); - }; - - enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) }; -}; - -template <typename B, typename D> struct IsBaseOf - : OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {}; - -#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS - - -////////////////////////////////////////////////////////////////////////// -// EnableIf / DisableIf -// -template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; }; -template <typename T> struct EnableIfCond<false, T> { /* empty */ }; - -template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; }; -template <typename T> struct DisableIfCond<true, T> { /* empty */ }; - -template <typename Condition, typename T = void> -struct EnableIf : EnableIfCond<Condition::Value, T> {}; - -template <typename Condition, typename T = void> -struct DisableIf : DisableIfCond<Condition::Value, T> {}; - -// SFINAE helpers -struct SfinaeTag {}; -template <typename T> struct RemoveSfinaeTag; -template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; }; - -#define RAPIDJSON_REMOVEFPTR_(type) \ - typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \ - < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type - -#define RAPIDJSON_ENABLEIF(cond) \ - typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ - <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL - -#define RAPIDJSON_DISABLEIF(cond) \ - typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ - <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL - -#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \ - typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ - <RAPIDJSON_REMOVEFPTR_(cond), \ - RAPIDJSON_REMOVEFPTR_(returntype)>::Type - -#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \ - typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ - <RAPIDJSON_REMOVEFPTR_(cond), \ - RAPIDJSON_REMOVEFPTR_(returntype)>::Type - -} // namespace internal -RAPIDJSON_NAMESPACE_END -//@endcond - -#if defined(__GNUC__) || defined(_MSC_VER) -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_INTERNAL_META_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/pow10.h b/ext/librethinkdbxx/src/rapidjson/internal/pow10.h deleted file mode 100644 index 02f475d7..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/pow10.h +++ /dev/null @@ -1,55 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_POW10_ -#define RAPIDJSON_POW10_ - -#include "../rapidjson.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -//! Computes integer powers of 10 in double (10.0^n). -/*! This function uses lookup table for fast and accurate results. - \param n non-negative exponent. Must <= 308. - \return 10.0^n -*/ -inline double Pow10(int n) { - static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes - 1e+0, - 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, - 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, - 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, - 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, - 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, - 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120, - 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140, - 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160, - 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180, - 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200, - 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220, - 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240, - 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260, - 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280, - 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300, - 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308 - }; - RAPIDJSON_ASSERT(n >= 0 && n <= 308); - return e[n]; -} - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_POW10_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/regex.h b/ext/librethinkdbxx/src/rapidjson/internal/regex.h deleted file mode 100644 index 422a5240..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/regex.h +++ /dev/null @@ -1,701 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_INTERNAL_REGEX_H_ -#define RAPIDJSON_INTERNAL_REGEX_H_ - -#include "../allocators.h" -#include "../stream.h" -#include "stack.h" - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -RAPIDJSON_DIAG_OFF(switch-enum) -RAPIDJSON_DIAG_OFF(implicit-fallthrough) -#endif - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated -#endif - -#ifndef RAPIDJSON_REGEX_VERBOSE -#define RAPIDJSON_REGEX_VERBOSE 0 -#endif - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -/////////////////////////////////////////////////////////////////////////////// -// GenericRegex - -static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1 -static const SizeType kRegexInvalidRange = ~SizeType(0); - -//! Regular expression engine with subset of ECMAscript grammar. -/*! - Supported regular expression syntax: - - \c ab Concatenation - - \c a|b Alternation - - \c a? Zero or one - - \c a* Zero or more - - \c a+ One or more - - \c a{3} Exactly 3 times - - \c a{3,} At least 3 times - - \c a{3,5} 3 to 5 times - - \c (ab) Grouping - - \c ^a At the beginning - - \c a$ At the end - - \c . Any character - - \c [abc] Character classes - - \c [a-c] Character class range - - \c [a-z0-9_] Character class combination - - \c [^abc] Negated character classes - - \c [^a-c] Negated character class range - - \c [\b] Backspace (U+0008) - - \c \\| \\\\ ... Escape characters - - \c \\f Form feed (U+000C) - - \c \\n Line feed (U+000A) - - \c \\r Carriage return (U+000D) - - \c \\t Tab (U+0009) - - \c \\v Vertical tab (U+000B) - - \note This is a Thompson NFA engine, implemented with reference to - Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).", - https://swtch.com/~rsc/regexp/regexp1.html -*/ -template <typename Encoding, typename Allocator = CrtAllocator> -class GenericRegex { -public: - typedef typename Encoding::Ch Ch; - - GenericRegex(const Ch* source, Allocator* allocator = 0) : - states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), - stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_() - { - GenericStringStream<Encoding> ss(source); - DecodedStream<GenericStringStream<Encoding> > ds(ss); - Parse(ds); - } - - ~GenericRegex() { - Allocator::Free(stateSet_); - } - - bool IsValid() const { - return root_ != kRegexInvalidState; - } - - template <typename InputStream> - bool Match(InputStream& is) const { - return SearchWithAnchoring(is, true, true); - } - - bool Match(const Ch* s) const { - GenericStringStream<Encoding> is(s); - return Match(is); - } - - template <typename InputStream> - bool Search(InputStream& is) const { - return SearchWithAnchoring(is, anchorBegin_, anchorEnd_); - } - - bool Search(const Ch* s) const { - GenericStringStream<Encoding> is(s); - return Search(is); - } - -private: - enum Operator { - kZeroOrOne, - kZeroOrMore, - kOneOrMore, - kConcatenation, - kAlternation, - kLeftParenthesis - }; - - static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.' - static const unsigned kRangeCharacterClass = 0xFFFFFFFE; - static const unsigned kRangeNegationFlag = 0x80000000; - - struct Range { - unsigned start; // - unsigned end; - SizeType next; - }; - - struct State { - SizeType out; //!< Equals to kInvalid for matching state - SizeType out1; //!< Equals to non-kInvalid for split - SizeType rangeStart; - unsigned codepoint; - }; - - struct Frag { - Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {} - SizeType start; - SizeType out; //!< link-list of all output states - SizeType minIndex; - }; - - template <typename SourceStream> - class DecodedStream { - public: - DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } - unsigned Peek() { return codepoint_; } - unsigned Take() { - unsigned c = codepoint_; - if (c) // No further decoding when '\0' - Decode(); - return c; - } - - private: - void Decode() { - if (!Encoding::Decode(ss_, &codepoint_)) - codepoint_ = 0; - } - - SourceStream& ss_; - unsigned codepoint_; - }; - - State& GetState(SizeType index) { - RAPIDJSON_ASSERT(index < stateCount_); - return states_.template Bottom<State>()[index]; - } - - const State& GetState(SizeType index) const { - RAPIDJSON_ASSERT(index < stateCount_); - return states_.template Bottom<State>()[index]; - } - - Range& GetRange(SizeType index) { - RAPIDJSON_ASSERT(index < rangeCount_); - return ranges_.template Bottom<Range>()[index]; - } - - const Range& GetRange(SizeType index) const { - RAPIDJSON_ASSERT(index < rangeCount_); - return ranges_.template Bottom<Range>()[index]; - } - - template <typename InputStream> - void Parse(DecodedStream<InputStream>& ds) { - Allocator allocator; - Stack<Allocator> operandStack(&allocator, 256); // Frag - Stack<Allocator> operatorStack(&allocator, 256); // Operator - Stack<Allocator> atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis) - - *atomCountStack.template Push<unsigned>() = 0; - - unsigned codepoint; - while (ds.Peek() != 0) { - switch (codepoint = ds.Take()) { - case '^': - anchorBegin_ = true; - break; - - case '$': - anchorEnd_ = true; - break; - - case '|': - while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation) - if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1))) - return; - *operatorStack.template Push<Operator>() = kAlternation; - *atomCountStack.template Top<unsigned>() = 0; - break; - - case '(': - *operatorStack.template Push<Operator>() = kLeftParenthesis; - *atomCountStack.template Push<unsigned>() = 0; - break; - - case ')': - while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis) - if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1))) - return; - if (operatorStack.Empty()) - return; - operatorStack.template Pop<Operator>(1); - atomCountStack.template Pop<unsigned>(1); - ImplicitConcatenation(atomCountStack, operatorStack); - break; - - case '?': - if (!Eval(operandStack, kZeroOrOne)) - return; - break; - - case '*': - if (!Eval(operandStack, kZeroOrMore)) - return; - break; - - case '+': - if (!Eval(operandStack, kOneOrMore)) - return; - break; - - case '{': - { - unsigned n, m; - if (!ParseUnsigned(ds, &n)) - return; - - if (ds.Peek() == ',') { - ds.Take(); - if (ds.Peek() == '}') - m = kInfinityQuantifier; - else if (!ParseUnsigned(ds, &m) || m < n) - return; - } - else - m = n; - - if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}') - return; - ds.Take(); - } - break; - - case '.': - PushOperand(operandStack, kAnyCharacterClass); - ImplicitConcatenation(atomCountStack, operatorStack); - break; - - case '[': - { - SizeType range; - if (!ParseRange(ds, &range)) - return; - SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass); - GetState(s).rangeStart = range; - *operandStack.template Push<Frag>() = Frag(s, s, s); - } - ImplicitConcatenation(atomCountStack, operatorStack); - break; - - case '\\': // Escape character - if (!CharacterEscape(ds, &codepoint)) - return; // Unsupported escape character - // fall through to default - - default: // Pattern character - PushOperand(operandStack, codepoint); - ImplicitConcatenation(atomCountStack, operatorStack); - } - } - - while (!operatorStack.Empty()) - if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1))) - return; - - // Link the operand to matching state. - if (operandStack.GetSize() == sizeof(Frag)) { - Frag* e = operandStack.template Pop<Frag>(1); - Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0)); - root_ = e->start; - -#if RAPIDJSON_REGEX_VERBOSE - printf("root: %d\n", root_); - for (SizeType i = 0; i < stateCount_ ; i++) { - State& s = GetState(i); - printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint); - } - printf("\n"); -#endif - } - - // Preallocate buffer for SearchWithAnchoring() - RAPIDJSON_ASSERT(stateSet_ == 0); - if (stateCount_ > 0) { - stateSet_ = static_cast<unsigned*>(states_.GetAllocator().Malloc(GetStateSetSize())); - state0_.template Reserve<SizeType>(stateCount_); - state1_.template Reserve<SizeType>(stateCount_); - } - } - - SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) { - State* s = states_.template Push<State>(); - s->out = out; - s->out1 = out1; - s->codepoint = codepoint; - s->rangeStart = kRegexInvalidRange; - return stateCount_++; - } - - void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) { - SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint); - *operandStack.template Push<Frag>() = Frag(s, s, s); - } - - void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) { - if (*atomCountStack.template Top<unsigned>()) - *operatorStack.template Push<Operator>() = kConcatenation; - (*atomCountStack.template Top<unsigned>())++; - } - - SizeType Append(SizeType l1, SizeType l2) { - SizeType old = l1; - while (GetState(l1).out != kRegexInvalidState) - l1 = GetState(l1).out; - GetState(l1).out = l2; - return old; - } - - void Patch(SizeType l, SizeType s) { - for (SizeType next; l != kRegexInvalidState; l = next) { - next = GetState(l).out; - GetState(l).out = s; - } - } - - bool Eval(Stack<Allocator>& operandStack, Operator op) { - switch (op) { - case kConcatenation: - RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2); - { - Frag e2 = *operandStack.template Pop<Frag>(1); - Frag e1 = *operandStack.template Pop<Frag>(1); - Patch(e1.out, e2.start); - *operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex)); - } - return true; - - case kAlternation: - if (operandStack.GetSize() >= sizeof(Frag) * 2) { - Frag e2 = *operandStack.template Pop<Frag>(1); - Frag e1 = *operandStack.template Pop<Frag>(1); - SizeType s = NewState(e1.start, e2.start, 0); - *operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex)); - return true; - } - return false; - - case kZeroOrOne: - if (operandStack.GetSize() >= sizeof(Frag)) { - Frag e = *operandStack.template Pop<Frag>(1); - SizeType s = NewState(kRegexInvalidState, e.start, 0); - *operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex); - return true; - } - return false; - - case kZeroOrMore: - if (operandStack.GetSize() >= sizeof(Frag)) { - Frag e = *operandStack.template Pop<Frag>(1); - SizeType s = NewState(kRegexInvalidState, e.start, 0); - Patch(e.out, s); - *operandStack.template Push<Frag>() = Frag(s, s, e.minIndex); - return true; - } - return false; - - default: - RAPIDJSON_ASSERT(op == kOneOrMore); - if (operandStack.GetSize() >= sizeof(Frag)) { - Frag e = *operandStack.template Pop<Frag>(1); - SizeType s = NewState(kRegexInvalidState, e.start, 0); - Patch(e.out, s); - *operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex); - return true; - } - return false; - } - } - - bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) { - RAPIDJSON_ASSERT(n <= m); - RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag)); - - if (n == 0) { - if (m == 0) // a{0} not support - return false; - else if (m == kInfinityQuantifier) - Eval(operandStack, kZeroOrMore); // a{0,} -> a* - else { - Eval(operandStack, kZeroOrOne); // a{0,5} -> a? - for (unsigned i = 0; i < m - 1; i++) - CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a? - for (unsigned i = 0; i < m - 1; i++) - Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a? - } - return true; - } - - for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a - CloneTopOperand(operandStack); - - if (m == kInfinityQuantifier) - Eval(operandStack, kOneOrMore); // a{3,} -> a a a+ - else if (m > n) { - CloneTopOperand(operandStack); // a{3,5} -> a a a a - Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a? - for (unsigned i = n; i < m - 1; i++) - CloneTopOperand(operandStack); // a{3,5} -> a a a a? a? - for (unsigned i = n; i < m; i++) - Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a? - } - - for (unsigned i = 0; i < n - 1; i++) - Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a? - - return true; - } - - static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; } - - void CloneTopOperand(Stack<Allocator>& operandStack) { - const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation - SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_) - State* s = states_.template Push<State>(count); - memcpy(s, &GetState(src.minIndex), count * sizeof(State)); - for (SizeType j = 0; j < count; j++) { - if (s[j].out != kRegexInvalidState) - s[j].out += count; - if (s[j].out1 != kRegexInvalidState) - s[j].out1 += count; - } - *operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count); - stateCount_ += count; - } - - template <typename InputStream> - bool ParseUnsigned(DecodedStream<InputStream>& ds, unsigned* u) { - unsigned r = 0; - if (ds.Peek() < '0' || ds.Peek() > '9') - return false; - while (ds.Peek() >= '0' && ds.Peek() <= '9') { - if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295 - return false; // overflow - r = r * 10 + (ds.Take() - '0'); - } - *u = r; - return true; - } - - template <typename InputStream> - bool ParseRange(DecodedStream<InputStream>& ds, SizeType* range) { - bool isBegin = true; - bool negate = false; - int step = 0; - SizeType start = kRegexInvalidRange; - SizeType current = kRegexInvalidRange; - unsigned codepoint; - while ((codepoint = ds.Take()) != 0) { - if (isBegin) { - isBegin = false; - if (codepoint == '^') { - negate = true; - continue; - } - } - - switch (codepoint) { - case ']': - if (start == kRegexInvalidRange) - return false; // Error: nothing inside [] - if (step == 2) { // Add trailing '-' - SizeType r = NewRange('-'); - RAPIDJSON_ASSERT(current != kRegexInvalidRange); - GetRange(current).next = r; - } - if (negate) - GetRange(start).start |= kRangeNegationFlag; - *range = start; - return true; - - case '\\': - if (ds.Peek() == 'b') { - ds.Take(); - codepoint = 0x0008; // Escape backspace character - } - else if (!CharacterEscape(ds, &codepoint)) - return false; - // fall through to default - - default: - switch (step) { - case 1: - if (codepoint == '-') { - step++; - break; - } - // fall through to step 0 for other characters - - case 0: - { - SizeType r = NewRange(codepoint); - if (current != kRegexInvalidRange) - GetRange(current).next = r; - if (start == kRegexInvalidRange) - start = r; - current = r; - } - step = 1; - break; - - default: - RAPIDJSON_ASSERT(step == 2); - GetRange(current).end = codepoint; - step = 0; - } - } - } - return false; - } - - SizeType NewRange(unsigned codepoint) { - Range* r = ranges_.template Push<Range>(); - r->start = r->end = codepoint; - r->next = kRegexInvalidRange; - return rangeCount_++; - } - - template <typename InputStream> - bool CharacterEscape(DecodedStream<InputStream>& ds, unsigned* escapedCodepoint) { - unsigned codepoint; - switch (codepoint = ds.Take()) { - case '^': - case '$': - case '|': - case '(': - case ')': - case '?': - case '*': - case '+': - case '.': - case '[': - case ']': - case '{': - case '}': - case '\\': - *escapedCodepoint = codepoint; return true; - case 'f': *escapedCodepoint = 0x000C; return true; - case 'n': *escapedCodepoint = 0x000A; return true; - case 'r': *escapedCodepoint = 0x000D; return true; - case 't': *escapedCodepoint = 0x0009; return true; - case 'v': *escapedCodepoint = 0x000B; return true; - default: - return false; // Unsupported escape character - } - } - - template <typename InputStream> - bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const { - RAPIDJSON_ASSERT(IsValid()); - DecodedStream<InputStream> ds(is); - - state0_.Clear(); - Stack<Allocator> *current = &state0_, *next = &state1_; - const size_t stateSetSize = GetStateSetSize(); - std::memset(stateSet_, 0, stateSetSize); - - bool matched = AddState(*current, root_); - unsigned codepoint; - while (!current->Empty() && (codepoint = ds.Take()) != 0) { - std::memset(stateSet_, 0, stateSetSize); - next->Clear(); - matched = false; - for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) { - const State& sr = GetState(*s); - if (sr.codepoint == codepoint || - sr.codepoint == kAnyCharacterClass || - (sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) - { - matched = AddState(*next, sr.out) || matched; - if (!anchorEnd && matched) - return true; - } - if (!anchorBegin) - AddState(*next, root_); - } - internal::Swap(current, next); - } - - return matched; - } - - size_t GetStateSetSize() const { - return (stateCount_ + 31) / 32 * 4; - } - - // Return whether the added states is a match state - bool AddState(Stack<Allocator>& l, SizeType index) const { - RAPIDJSON_ASSERT(index != kRegexInvalidState); - - const State& s = GetState(index); - if (s.out1 != kRegexInvalidState) { // Split - bool matched = AddState(l, s.out); - return AddState(l, s.out1) || matched; - } - else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) { - stateSet_[index >> 5] |= (1 << (index & 31)); - *l.template PushUnsafe<SizeType>() = index; - } - return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. - } - - bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { - bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0; - while (rangeIndex != kRegexInvalidRange) { - const Range& r = GetRange(rangeIndex); - if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end) - return yes; - rangeIndex = r.next; - } - return !yes; - } - - Stack<Allocator> states_; - Stack<Allocator> ranges_; - SizeType root_; - SizeType stateCount_; - SizeType rangeCount_; - - static const unsigned kInfinityQuantifier = ~0u; - - // For SearchWithAnchoring() - uint32_t* stateSet_; // allocated by states_.GetAllocator() - mutable Stack<Allocator> state0_; - mutable Stack<Allocator> state1_; - bool anchorBegin_; - bool anchorEnd_; -}; - -typedef GenericRegex<UTF8<> > Regex; - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_INTERNAL_REGEX_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/stack.h b/ext/librethinkdbxx/src/rapidjson/internal/stack.h deleted file mode 100644 index 022c9aab..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/stack.h +++ /dev/null @@ -1,230 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_INTERNAL_STACK_H_ -#define RAPIDJSON_INTERNAL_STACK_H_ - -#include "../allocators.h" -#include "swap.h" - -#if defined(__clang__) -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(c++98-compat) -#endif - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -/////////////////////////////////////////////////////////////////////////////// -// Stack - -//! A type-unsafe stack for storing different types of data. -/*! \tparam Allocator Allocator for allocating stack memory. -*/ -template <typename Allocator> -class Stack { -public: - // Optimization note: Do not allocate memory for stack_ in constructor. - // Do it lazily when first Push() -> Expand() -> Resize(). - Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) { - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - Stack(Stack&& rhs) - : allocator_(rhs.allocator_), - ownAllocator_(rhs.ownAllocator_), - stack_(rhs.stack_), - stackTop_(rhs.stackTop_), - stackEnd_(rhs.stackEnd_), - initialCapacity_(rhs.initialCapacity_) - { - rhs.allocator_ = 0; - rhs.ownAllocator_ = 0; - rhs.stack_ = 0; - rhs.stackTop_ = 0; - rhs.stackEnd_ = 0; - rhs.initialCapacity_ = 0; - } -#endif - - ~Stack() { - Destroy(); - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - Stack& operator=(Stack&& rhs) { - if (&rhs != this) - { - Destroy(); - - allocator_ = rhs.allocator_; - ownAllocator_ = rhs.ownAllocator_; - stack_ = rhs.stack_; - stackTop_ = rhs.stackTop_; - stackEnd_ = rhs.stackEnd_; - initialCapacity_ = rhs.initialCapacity_; - - rhs.allocator_ = 0; - rhs.ownAllocator_ = 0; - rhs.stack_ = 0; - rhs.stackTop_ = 0; - rhs.stackEnd_ = 0; - rhs.initialCapacity_ = 0; - } - return *this; - } -#endif - - void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT { - internal::Swap(allocator_, rhs.allocator_); - internal::Swap(ownAllocator_, rhs.ownAllocator_); - internal::Swap(stack_, rhs.stack_); - internal::Swap(stackTop_, rhs.stackTop_); - internal::Swap(stackEnd_, rhs.stackEnd_); - internal::Swap(initialCapacity_, rhs.initialCapacity_); - } - - void Clear() { stackTop_ = stack_; } - - void ShrinkToFit() { - if (Empty()) { - // If the stack is empty, completely deallocate the memory. - Allocator::Free(stack_); - stack_ = 0; - stackTop_ = 0; - stackEnd_ = 0; - } - else - Resize(GetSize()); - } - - // Optimization note: try to minimize the size of this function for force inline. - // Expansion is run very infrequently, so it is moved to another (probably non-inline) function. - template<typename T> - RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) { - // Expand the stack if needed - if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_)) - Expand<T>(count); - } - - template<typename T> - RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) { - Reserve<T>(count); - return PushUnsafe<T>(count); - } - - template<typename T> - RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) { - RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_); - T* ret = reinterpret_cast<T*>(stackTop_); - stackTop_ += sizeof(T) * count; - return ret; - } - - template<typename T> - T* Pop(size_t count) { - RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T)); - stackTop_ -= count * sizeof(T); - return reinterpret_cast<T*>(stackTop_); - } - - template<typename T> - T* Top() { - RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); - return reinterpret_cast<T*>(stackTop_ - sizeof(T)); - } - - template<typename T> - const T* Top() const { - RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); - return reinterpret_cast<T*>(stackTop_ - sizeof(T)); - } - - template<typename T> - T* End() { return reinterpret_cast<T*>(stackTop_); } - - template<typename T> - const T* End() const { return reinterpret_cast<T*>(stackTop_); } - - template<typename T> - T* Bottom() { return reinterpret_cast<T*>(stack_); } - - template<typename T> - const T* Bottom() const { return reinterpret_cast<T*>(stack_); } - - bool HasAllocator() const { - return allocator_ != 0; - } - - Allocator& GetAllocator() { - RAPIDJSON_ASSERT(allocator_); - return *allocator_; - } - - bool Empty() const { return stackTop_ == stack_; } - size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); } - size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); } - -private: - template<typename T> - void Expand(size_t count) { - // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity. - size_t newCapacity; - if (stack_ == 0) { - if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - newCapacity = initialCapacity_; - } else { - newCapacity = GetCapacity(); - newCapacity += (newCapacity + 1) / 2; - } - size_t newSize = GetSize() + sizeof(T) * count; - if (newCapacity < newSize) - newCapacity = newSize; - - Resize(newCapacity); - } - - void Resize(size_t newCapacity) { - const size_t size = GetSize(); // Backup the current size - stack_ = static_cast<char*>(allocator_->Realloc(stack_, GetCapacity(), newCapacity)); - stackTop_ = stack_ + size; - stackEnd_ = stack_ + newCapacity; - } - - void Destroy() { - Allocator::Free(stack_); - RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack - } - - // Prohibit copy constructor & assignment operator. - Stack(const Stack&); - Stack& operator=(const Stack&); - - Allocator* allocator_; - Allocator* ownAllocator_; - char *stack_; - char *stackTop_; - char *stackEnd_; - size_t initialCapacity_; -}; - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#if defined(__clang__) -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_STACK_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/strfunc.h b/ext/librethinkdbxx/src/rapidjson/internal/strfunc.h deleted file mode 100644 index 2edfae52..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/strfunc.h +++ /dev/null @@ -1,55 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_ -#define RAPIDJSON_INTERNAL_STRFUNC_H_ - -#include "../stream.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -//! Custom strlen() which works on different character types. -/*! \tparam Ch Character type (e.g. char, wchar_t, short) - \param s Null-terminated input string. - \return Number of characters in the string. - \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints. -*/ -template <typename Ch> -inline SizeType StrLen(const Ch* s) { - const Ch* p = s; - while (*p) ++p; - return SizeType(p - s); -} - -//! Returns number of code points in a encoded string. -template<typename Encoding> -bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) { - GenericStringStream<Encoding> is(s); - const typename Encoding::Ch* end = s + length; - SizeType count = 0; - while (is.src_ < end) { - unsigned codepoint; - if (!Encoding::Decode(is, &codepoint)) - return false; - count++; - } - *outCount = count; - return true; -} - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_INTERNAL_STRFUNC_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/strtod.h b/ext/librethinkdbxx/src/rapidjson/internal/strtod.h deleted file mode 100644 index 289c413b..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/strtod.h +++ /dev/null @@ -1,269 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_STRTOD_ -#define RAPIDJSON_STRTOD_ - -#include "ieee754.h" -#include "biginteger.h" -#include "diyfp.h" -#include "pow10.h" - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -inline double FastPath(double significand, int exp) { - if (exp < -308) - return 0.0; - else if (exp >= 0) - return significand * internal::Pow10(exp); - else - return significand / internal::Pow10(-exp); -} - -inline double StrtodNormalPrecision(double d, int p) { - if (p < -308) { - // Prevent expSum < -308, making Pow10(p) = 0 - d = FastPath(d, -308); - d = FastPath(d, p + 308); - } - else - d = FastPath(d, p); - return d; -} - -template <typename T> -inline T Min3(T a, T b, T c) { - T m = a; - if (m > b) m = b; - if (m > c) m = c; - return m; -} - -inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) { - const Double db(b); - const uint64_t bInt = db.IntegerSignificand(); - const int bExp = db.IntegerExponent(); - const int hExp = bExp - 1; - - int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0; - - // Adjust for decimal exponent - if (dExp >= 0) { - dS_Exp2 += dExp; - dS_Exp5 += dExp; - } - else { - bS_Exp2 -= dExp; - bS_Exp5 -= dExp; - hS_Exp2 -= dExp; - hS_Exp5 -= dExp; - } - - // Adjust for binary exponent - if (bExp >= 0) - bS_Exp2 += bExp; - else { - dS_Exp2 -= bExp; - hS_Exp2 -= bExp; - } - - // Adjust for half ulp exponent - if (hExp >= 0) - hS_Exp2 += hExp; - else { - dS_Exp2 -= hExp; - bS_Exp2 -= hExp; - } - - // Remove common power of two factor from all three scaled values - int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2); - dS_Exp2 -= common_Exp2; - bS_Exp2 -= common_Exp2; - hS_Exp2 -= common_Exp2; - - BigInteger dS = d; - dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2); - - BigInteger bS(bInt); - bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2); - - BigInteger hS(1); - hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2); - - BigInteger delta(0); - dS.Difference(bS, &delta); - - return delta.Compare(hS); -} - -inline bool StrtodFast(double d, int p, double* result) { - // Use fast path for string-to-double conversion if possible - // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ - if (p > 22 && p < 22 + 16) { - // Fast Path Cases In Disguise - d *= internal::Pow10(p - 22); - p = 22; - } - - if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1 - *result = FastPath(d, p); - return true; - } - else - return false; -} - -// Compute an approximation and see if it is within 1/2 ULP -inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) { - uint64_t significand = 0; - size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 - for (; i < length; i++) { - if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || - (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5')) - break; - significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0'); - } - - if (i < length && decimals[i] >= '5') // Rounding - significand++; - - size_t remaining = length - i; - const unsigned kUlpShift = 3; - const unsigned kUlp = 1 << kUlpShift; - int64_t error = (remaining == 0) ? 0 : kUlp / 2; - - DiyFp v(significand, 0); - v = v.Normalize(); - error <<= -v.e; - - const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(i) + exp; - - int actualExp; - DiyFp cachedPower = GetCachedPower10(dExp, &actualExp); - if (actualExp != dExp) { - static const DiyFp kPow10[] = { - DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1 - DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2 - DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3 - DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4 - DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5 - DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6 - DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7 - }; - int adjustment = dExp - actualExp - 1; - RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7); - v = v * kPow10[adjustment]; - if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit - error += kUlp / 2; - } - - v = v * cachedPower; - - error += kUlp + (error == 0 ? 0 : 1); - - const int oldExp = v.e; - v = v.Normalize(); - error <<= oldExp - v.e; - - const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); - unsigned precisionSize = 64 - effectiveSignificandSize; - if (precisionSize + kUlpShift >= 64) { - unsigned scaleExp = (precisionSize + kUlpShift) - 63; - v.f >>= scaleExp; - v.e += scaleExp; - error = (error >> scaleExp) + 1 + static_cast<int>(kUlp); - precisionSize -= scaleExp; - } - - DiyFp rounded(v.f >> precisionSize, v.e + static_cast<int>(precisionSize)); - const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp; - const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp; - if (precisionBits >= halfWay + static_cast<unsigned>(error)) { - rounded.f++; - if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340) - rounded.f >>= 1; - rounded.e++; - } - } - - *result = rounded.ToDouble(); - - return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error); -} - -inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) { - const BigInteger dInt(decimals, length); - const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(length) + exp; - Double a(approx); - int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp); - if (cmp < 0) - return a.Value(); // within half ULP - else if (cmp == 0) { - // Round towards even - if (a.Significand() & 1) - return a.NextPositiveDouble(); - else - return a.Value(); - } - else // adjustment - return a.NextPositiveDouble(); -} - -inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) { - RAPIDJSON_ASSERT(d >= 0.0); - RAPIDJSON_ASSERT(length >= 1); - - double result; - if (StrtodFast(d, p, &result)) - return result; - - // Trim leading zeros - while (*decimals == '0' && length > 1) { - length--; - decimals++; - decimalPosition--; - } - - // Trim trailing zeros - while (decimals[length - 1] == '0' && length > 1) { - length--; - decimalPosition--; - exp++; - } - - // Trim right-most digits - const int kMaxDecimalDigit = 780; - if (static_cast<int>(length) > kMaxDecimalDigit) { - int delta = (static_cast<int>(length) - kMaxDecimalDigit); - exp += delta; - decimalPosition -= static_cast<unsigned>(delta); - length = kMaxDecimalDigit; - } - - // If too small, underflow to zero - if (int(length) + exp < -324) - return 0.0; - - if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result)) - return result; - - // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison - return StrtodBigInteger(result, decimals, length, decimalPosition, exp); -} - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_STRTOD_ diff --git a/ext/librethinkdbxx/src/rapidjson/internal/swap.h b/ext/librethinkdbxx/src/rapidjson/internal/swap.h deleted file mode 100644 index 666e49f9..00000000 --- a/ext/librethinkdbxx/src/rapidjson/internal/swap.h +++ /dev/null @@ -1,46 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_INTERNAL_SWAP_H_ -#define RAPIDJSON_INTERNAL_SWAP_H_ - -#include "../rapidjson.h" - -#if defined(__clang__) -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(c++98-compat) -#endif - -RAPIDJSON_NAMESPACE_BEGIN -namespace internal { - -//! Custom swap() to avoid dependency on C++ <algorithm> header -/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only. - \note This has the same semantics as std::swap(). -*/ -template <typename T> -inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT { - T tmp = a; - a = b; - b = tmp; -} - -} // namespace internal -RAPIDJSON_NAMESPACE_END - -#if defined(__clang__) -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_INTERNAL_SWAP_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/istreamwrapper.h b/ext/librethinkdbxx/src/rapidjson/istreamwrapper.h deleted file mode 100644 index f5fe2897..00000000 --- a/ext/librethinkdbxx/src/rapidjson/istreamwrapper.h +++ /dev/null @@ -1,115 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_ISTREAMWRAPPER_H_ -#define RAPIDJSON_ISTREAMWRAPPER_H_ - -#include "stream.h" -#include <iosfwd> - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept. -/*! - The classes can be wrapped including but not limited to: - - - \c std::istringstream - - \c std::stringstream - - \c std::wistringstream - - \c std::wstringstream - - \c std::ifstream - - \c std::fstream - - \c std::wifstream - - \c std::wfstream - - \tparam StreamType Class derived from \c std::basic_istream. -*/ - -template <typename StreamType> -class BasicIStreamWrapper { -public: - typedef typename StreamType::char_type Ch; - BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {} - - Ch Peek() const { - typename StreamType::int_type c = stream_.peek(); - return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast<Ch>(c) : '\0'; - } - - Ch Take() { - typename StreamType::int_type c = stream_.get(); - if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) { - count_++; - return static_cast<Ch>(c); - } - else - return '\0'; - } - - // tellg() may return -1 when failed. So we count by ourself. - size_t Tell() const { return count_; } - - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - - // For encoding detection only. - const Ch* Peek4() const { - RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream. - int i; - bool hasError = false; - for (i = 0; i < 4; ++i) { - typename StreamType::int_type c = stream_.get(); - if (c == StreamType::traits_type::eof()) { - hasError = true; - stream_.clear(); - break; - } - peekBuffer_[i] = static_cast<Ch>(c); - } - for (--i; i >= 0; --i) - stream_.putback(peekBuffer_[i]); - return !hasError ? peekBuffer_ : 0; - } - -private: - BasicIStreamWrapper(const BasicIStreamWrapper&); - BasicIStreamWrapper& operator=(const BasicIStreamWrapper&); - - StreamType& stream_; - size_t count_; //!< Number of characters read. Note: - mutable Ch peekBuffer_[4]; -}; - -typedef BasicIStreamWrapper<std::istream> IStreamWrapper; -typedef BasicIStreamWrapper<std::wistream> WIStreamWrapper; - -#if defined(__clang__) || defined(_MSC_VER) -RAPIDJSON_DIAG_POP -#endif - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_ISTREAMWRAPPER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/memorybuffer.h b/ext/librethinkdbxx/src/rapidjson/memorybuffer.h deleted file mode 100644 index 39bee1de..00000000 --- a/ext/librethinkdbxx/src/rapidjson/memorybuffer.h +++ /dev/null @@ -1,70 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_MEMORYBUFFER_H_ -#define RAPIDJSON_MEMORYBUFFER_H_ - -#include "stream.h" -#include "internal/stack.h" - -RAPIDJSON_NAMESPACE_BEGIN - -//! Represents an in-memory output byte stream. -/*! - This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream. - - It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file. - - Differences between MemoryBuffer and StringBuffer: - 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer. - 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator. - - \tparam Allocator type for allocating memory buffer. - \note implements Stream concept -*/ -template <typename Allocator = CrtAllocator> -struct GenericMemoryBuffer { - typedef char Ch; // byte - - GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} - - void Put(Ch c) { *stack_.template Push<Ch>() = c; } - void Flush() {} - - void Clear() { stack_.Clear(); } - void ShrinkToFit() { stack_.ShrinkToFit(); } - Ch* Push(size_t count) { return stack_.template Push<Ch>(count); } - void Pop(size_t count) { stack_.template Pop<Ch>(count); } - - const Ch* GetBuffer() const { - return stack_.template Bottom<Ch>(); - } - - size_t GetSize() const { return stack_.GetSize(); } - - static const size_t kDefaultCapacity = 256; - mutable internal::Stack<Allocator> stack_; -}; - -typedef GenericMemoryBuffer<> MemoryBuffer; - -//! Implement specialized version of PutN() with memset() for better performance. -template<> -inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) { - std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c)); -} - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/memorystream.h b/ext/librethinkdbxx/src/rapidjson/memorystream.h deleted file mode 100644 index 1d71d8a4..00000000 --- a/ext/librethinkdbxx/src/rapidjson/memorystream.h +++ /dev/null @@ -1,71 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_MEMORYSTREAM_H_ -#define RAPIDJSON_MEMORYSTREAM_H_ - -#include "stream.h" - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(unreachable-code) -RAPIDJSON_DIAG_OFF(missing-noreturn) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Represents an in-memory input byte stream. -/*! - This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream. - - It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file. - - Differences between MemoryStream and StringStream: - 1. StringStream has encoding but MemoryStream is a byte stream. - 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source. - 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4(). - \note implements Stream concept -*/ -struct MemoryStream { - typedef char Ch; // byte - - MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {} - - Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; } - Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; } - size_t Tell() const { return static_cast<size_t>(src_ - begin_); } - - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - - // For encoding detection only. - const Ch* Peek4() const { - return Tell() + 4 <= size_ ? src_ : 0; - } - - const Ch* src_; //!< Current read position. - const Ch* begin_; //!< Original head of the string. - const Ch* end_; //!< End of stream. - size_t size_; //!< Size of the stream. -}; - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/msinttypes/inttypes.h b/ext/librethinkdbxx/src/rapidjson/msinttypes/inttypes.h deleted file mode 100644 index 18111286..00000000 --- a/ext/librethinkdbxx/src/rapidjson/msinttypes/inttypes.h +++ /dev/null @@ -1,316 +0,0 @@ -// ISO C9x compliant inttypes.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -// The above software in this distribution may have been modified by -// THL A29 Limited ("Tencent Modifications"). -// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_INTTYPES_H_ // [ -#define _MSC_INTTYPES_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include "stdint.h" - -// miloyip: VC supports inttypes.h since VC2013 -#if _MSC_VER >= 1800 -#include <inttypes.h> -#else - -// 7.8 Format conversion of integer types - -typedef struct { - intmax_t quot; - intmax_t rem; -} imaxdiv_t; - -// 7.8.1 Macros for format specifiers - -#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 - -// The fprintf macros for signed integers are: -#define PRId8 "d" -#define PRIi8 "i" -#define PRIdLEAST8 "d" -#define PRIiLEAST8 "i" -#define PRIdFAST8 "d" -#define PRIiFAST8 "i" - -#define PRId16 "hd" -#define PRIi16 "hi" -#define PRIdLEAST16 "hd" -#define PRIiLEAST16 "hi" -#define PRIdFAST16 "hd" -#define PRIiFAST16 "hi" - -#define PRId32 "I32d" -#define PRIi32 "I32i" -#define PRIdLEAST32 "I32d" -#define PRIiLEAST32 "I32i" -#define PRIdFAST32 "I32d" -#define PRIiFAST32 "I32i" - -#define PRId64 "I64d" -#define PRIi64 "I64i" -#define PRIdLEAST64 "I64d" -#define PRIiLEAST64 "I64i" -#define PRIdFAST64 "I64d" -#define PRIiFAST64 "I64i" - -#define PRIdMAX "I64d" -#define PRIiMAX "I64i" - -#define PRIdPTR "Id" -#define PRIiPTR "Ii" - -// The fprintf macros for unsigned integers are: -#define PRIo8 "o" -#define PRIu8 "u" -#define PRIx8 "x" -#define PRIX8 "X" -#define PRIoLEAST8 "o" -#define PRIuLEAST8 "u" -#define PRIxLEAST8 "x" -#define PRIXLEAST8 "X" -#define PRIoFAST8 "o" -#define PRIuFAST8 "u" -#define PRIxFAST8 "x" -#define PRIXFAST8 "X" - -#define PRIo16 "ho" -#define PRIu16 "hu" -#define PRIx16 "hx" -#define PRIX16 "hX" -#define PRIoLEAST16 "ho" -#define PRIuLEAST16 "hu" -#define PRIxLEAST16 "hx" -#define PRIXLEAST16 "hX" -#define PRIoFAST16 "ho" -#define PRIuFAST16 "hu" -#define PRIxFAST16 "hx" -#define PRIXFAST16 "hX" - -#define PRIo32 "I32o" -#define PRIu32 "I32u" -#define PRIx32 "I32x" -#define PRIX32 "I32X" -#define PRIoLEAST32 "I32o" -#define PRIuLEAST32 "I32u" -#define PRIxLEAST32 "I32x" -#define PRIXLEAST32 "I32X" -#define PRIoFAST32 "I32o" -#define PRIuFAST32 "I32u" -#define PRIxFAST32 "I32x" -#define PRIXFAST32 "I32X" - -#define PRIo64 "I64o" -#define PRIu64 "I64u" -#define PRIx64 "I64x" -#define PRIX64 "I64X" -#define PRIoLEAST64 "I64o" -#define PRIuLEAST64 "I64u" -#define PRIxLEAST64 "I64x" -#define PRIXLEAST64 "I64X" -#define PRIoFAST64 "I64o" -#define PRIuFAST64 "I64u" -#define PRIxFAST64 "I64x" -#define PRIXFAST64 "I64X" - -#define PRIoMAX "I64o" -#define PRIuMAX "I64u" -#define PRIxMAX "I64x" -#define PRIXMAX "I64X" - -#define PRIoPTR "Io" -#define PRIuPTR "Iu" -#define PRIxPTR "Ix" -#define PRIXPTR "IX" - -// The fscanf macros for signed integers are: -#define SCNd8 "d" -#define SCNi8 "i" -#define SCNdLEAST8 "d" -#define SCNiLEAST8 "i" -#define SCNdFAST8 "d" -#define SCNiFAST8 "i" - -#define SCNd16 "hd" -#define SCNi16 "hi" -#define SCNdLEAST16 "hd" -#define SCNiLEAST16 "hi" -#define SCNdFAST16 "hd" -#define SCNiFAST16 "hi" - -#define SCNd32 "ld" -#define SCNi32 "li" -#define SCNdLEAST32 "ld" -#define SCNiLEAST32 "li" -#define SCNdFAST32 "ld" -#define SCNiFAST32 "li" - -#define SCNd64 "I64d" -#define SCNi64 "I64i" -#define SCNdLEAST64 "I64d" -#define SCNiLEAST64 "I64i" -#define SCNdFAST64 "I64d" -#define SCNiFAST64 "I64i" - -#define SCNdMAX "I64d" -#define SCNiMAX "I64i" - -#ifdef _WIN64 // [ -# define SCNdPTR "I64d" -# define SCNiPTR "I64i" -#else // _WIN64 ][ -# define SCNdPTR "ld" -# define SCNiPTR "li" -#endif // _WIN64 ] - -// The fscanf macros for unsigned integers are: -#define SCNo8 "o" -#define SCNu8 "u" -#define SCNx8 "x" -#define SCNX8 "X" -#define SCNoLEAST8 "o" -#define SCNuLEAST8 "u" -#define SCNxLEAST8 "x" -#define SCNXLEAST8 "X" -#define SCNoFAST8 "o" -#define SCNuFAST8 "u" -#define SCNxFAST8 "x" -#define SCNXFAST8 "X" - -#define SCNo16 "ho" -#define SCNu16 "hu" -#define SCNx16 "hx" -#define SCNX16 "hX" -#define SCNoLEAST16 "ho" -#define SCNuLEAST16 "hu" -#define SCNxLEAST16 "hx" -#define SCNXLEAST16 "hX" -#define SCNoFAST16 "ho" -#define SCNuFAST16 "hu" -#define SCNxFAST16 "hx" -#define SCNXFAST16 "hX" - -#define SCNo32 "lo" -#define SCNu32 "lu" -#define SCNx32 "lx" -#define SCNX32 "lX" -#define SCNoLEAST32 "lo" -#define SCNuLEAST32 "lu" -#define SCNxLEAST32 "lx" -#define SCNXLEAST32 "lX" -#define SCNoFAST32 "lo" -#define SCNuFAST32 "lu" -#define SCNxFAST32 "lx" -#define SCNXFAST32 "lX" - -#define SCNo64 "I64o" -#define SCNu64 "I64u" -#define SCNx64 "I64x" -#define SCNX64 "I64X" -#define SCNoLEAST64 "I64o" -#define SCNuLEAST64 "I64u" -#define SCNxLEAST64 "I64x" -#define SCNXLEAST64 "I64X" -#define SCNoFAST64 "I64o" -#define SCNuFAST64 "I64u" -#define SCNxFAST64 "I64x" -#define SCNXFAST64 "I64X" - -#define SCNoMAX "I64o" -#define SCNuMAX "I64u" -#define SCNxMAX "I64x" -#define SCNXMAX "I64X" - -#ifdef _WIN64 // [ -# define SCNoPTR "I64o" -# define SCNuPTR "I64u" -# define SCNxPTR "I64x" -# define SCNXPTR "I64X" -#else // _WIN64 ][ -# define SCNoPTR "lo" -# define SCNuPTR "lu" -# define SCNxPTR "lx" -# define SCNXPTR "lX" -#endif // _WIN64 ] - -#endif // __STDC_FORMAT_MACROS ] - -// 7.8.2 Functions for greatest-width integer types - -// 7.8.2.1 The imaxabs function -#define imaxabs _abs64 - -// 7.8.2.2 The imaxdiv function - -// This is modified version of div() function from Microsoft's div.c found -// in %MSVC.NET%\crt\src\div.c -#ifdef STATIC_IMAXDIV // [ -static -#else // STATIC_IMAXDIV ][ -_inline -#endif // STATIC_IMAXDIV ] -imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) -{ - imaxdiv_t result; - - result.quot = numer / denom; - result.rem = numer % denom; - - if (numer < 0 && result.rem > 0) { - // did division wrong; must fix up - ++result.quot; - result.rem -= denom; - } - - return result; -} - -// 7.8.2.3 The strtoimax and strtoumax functions -#define strtoimax _strtoi64 -#define strtoumax _strtoui64 - -// 7.8.2.4 The wcstoimax and wcstoumax functions -#define wcstoimax _wcstoi64 -#define wcstoumax _wcstoui64 - -#endif // _MSC_VER >= 1800 - -#endif // _MSC_INTTYPES_H_ ] diff --git a/ext/librethinkdbxx/src/rapidjson/msinttypes/stdint.h b/ext/librethinkdbxx/src/rapidjson/msinttypes/stdint.h deleted file mode 100644 index 3d4477b9..00000000 --- a/ext/librethinkdbxx/src/rapidjson/msinttypes/stdint.h +++ /dev/null @@ -1,300 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -// The above software in this distribution may have been modified by -// THL A29 Limited ("Tencent Modifications"). -// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010. -#if _MSC_VER >= 1600 // [ -#include <stdint.h> - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -#undef INT8_C -#undef INT16_C -#undef INT32_C -#undef INT64_C -#undef UINT8_C -#undef UINT16_C -#undef UINT32_C -#undef UINT64_C - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>. -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#else // ] _MSC_VER >= 1700 [ - -#include <limits.h> - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we have to wrap <wchar.h> include with 'extern "C++" {}' -// or compiler would give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#if defined(__cplusplus) && !defined(_M_ARM) -extern "C" { -#endif -# include <wchar.h> -#if defined(__cplusplus) && !defined(_M_ARM) -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>. -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#endif // _MSC_VER >= 1600 ] - -#endif // _MSC_STDINT_H_ ] diff --git a/ext/librethinkdbxx/src/rapidjson/ostreamwrapper.h b/ext/librethinkdbxx/src/rapidjson/ostreamwrapper.h deleted file mode 100644 index 6f4667c0..00000000 --- a/ext/librethinkdbxx/src/rapidjson/ostreamwrapper.h +++ /dev/null @@ -1,81 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_OSTREAMWRAPPER_H_ -#define RAPIDJSON_OSTREAMWRAPPER_H_ - -#include "stream.h" -#include <iosfwd> - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept. -/*! - The classes can be wrapped including but not limited to: - - - \c std::ostringstream - - \c std::stringstream - - \c std::wpstringstream - - \c std::wstringstream - - \c std::ifstream - - \c std::fstream - - \c std::wofstream - - \c std::wfstream - - \tparam StreamType Class derived from \c std::basic_ostream. -*/ - -template <typename StreamType> -class BasicOStreamWrapper { -public: - typedef typename StreamType::char_type Ch; - BasicOStreamWrapper(StreamType& stream) : stream_(stream) {} - - void Put(Ch c) { - stream_.put(c); - } - - void Flush() { - stream_.flush(); - } - - // Not implemented - char Peek() const { RAPIDJSON_ASSERT(false); return 0; } - char Take() { RAPIDJSON_ASSERT(false); return 0; } - size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } - char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } - -private: - BasicOStreamWrapper(const BasicOStreamWrapper&); - BasicOStreamWrapper& operator=(const BasicOStreamWrapper&); - - StreamType& stream_; -}; - -typedef BasicOStreamWrapper<std::ostream> OStreamWrapper; -typedef BasicOStreamWrapper<std::wostream> WOStreamWrapper; - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_OSTREAMWRAPPER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/pointer.h b/ext/librethinkdbxx/src/rapidjson/pointer.h deleted file mode 100644 index 0206ac1c..00000000 --- a/ext/librethinkdbxx/src/rapidjson/pointer.h +++ /dev/null @@ -1,1358 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_POINTER_H_ -#define RAPIDJSON_POINTER_H_ - -#include "document.h" -#include "internal/itoa.h" - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(switch-enum) -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token - -//! Error code of parsing. -/*! \ingroup RAPIDJSON_ERRORS - \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode -*/ -enum PointerParseErrorCode { - kPointerParseErrorNone = 0, //!< The parse is successful - - kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/' - kPointerParseErrorInvalidEscape, //!< Invalid escape - kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment - kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment -}; - -/////////////////////////////////////////////////////////////////////////////// -// GenericPointer - -//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator. -/*! - This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer" - (https://tools.ietf.org/html/rfc6901). - - A JSON pointer is for identifying a specific value in a JSON document - (GenericDocument). It can simplify coding of DOM tree manipulation, because it - can access multiple-level depth of DOM tree with single API call. - - After it parses a string representation (e.g. "/foo/0" or URI fragment - representation (e.g. "#/foo/0") into its internal representation (tokens), - it can be used to resolve a specific value in multiple documents, or sub-tree - of documents. - - Contrary to GenericValue, Pointer can be copy constructed and copy assigned. - Apart from assignment, a Pointer cannot be modified after construction. - - Although Pointer is very convenient, please aware that constructing Pointer - involves parsing and dynamic memory allocation. A special constructor with user- - supplied tokens eliminates these. - - GenericPointer depends on GenericDocument and GenericValue. - - \tparam ValueType The value type of the DOM tree. E.g. GenericValue<UTF8<> > - \tparam Allocator The allocator type for allocating memory for internal representation. - - \note GenericPointer uses same encoding of ValueType. - However, Allocator of GenericPointer is independent of Allocator of Value. -*/ -template <typename ValueType, typename Allocator = CrtAllocator> -class GenericPointer { -public: - typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value - typedef typename ValueType::Ch Ch; //!< Character type from Value - - //! A token is the basic units of internal representation. - /*! - A JSON pointer string representation "/foo/123" is parsed to two tokens: - "foo" and 123. 123 will be represented in both numeric form and string form. - They are resolved according to the actual value type (object or array). - - For token that are not numbers, or the numeric value is out of bound - (greater than limits of SizeType), they are only treated as string form - (i.e. the token's index will be equal to kPointerInvalidIndex). - - This struct is public so that user can create a Pointer without parsing and - allocation, using a special constructor. - */ - struct Token { - const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character. - SizeType length; //!< Length of the name. - SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex. - }; - - //!@name Constructors and destructor. - //@{ - - //! Default constructor. - GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} - - //! Constructor that parses a string or URI fragment representation. - /*! - \param source A null-terminated, string or URI fragment representation of JSON pointer. - \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. - */ - explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { - Parse(source, internal::StrLen(source)); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Constructor that parses a string or URI fragment representation. - /*! - \param source A string or URI fragment representation of JSON pointer. - \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. - \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. - */ - explicit GenericPointer(const std::basic_string<Ch>& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { - Parse(source.c_str(), source.size()); - } -#endif - - //! Constructor that parses a string or URI fragment representation, with length of the source string. - /*! - \param source A string or URI fragment representation of JSON pointer. - \param length Length of source. - \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. - \note Slightly faster than the overload without length. - */ - GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { - Parse(source, length); - } - - //! Constructor with user-supplied tokens. - /*! - This constructor let user supplies const array of tokens. - This prevents the parsing process and eliminates allocation. - This is preferred for memory constrained environments. - - \param tokens An constant array of tokens representing the JSON pointer. - \param tokenCount Number of tokens. - - \b Example - \code - #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex } - #define INDEX(i) { #i, sizeof(#i) - 1, i } - - static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) }; - static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0])); - // Equivalent to static const Pointer p("/foo/123"); - - #undef NAME - #undef INDEX - \endcode - */ - GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast<Token*>(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} - - //! Copy constructor. - GenericPointer(const GenericPointer& rhs, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { - *this = rhs; - } - - //! Destructor. - ~GenericPointer() { - if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated. - Allocator::Free(tokens_); - RAPIDJSON_DELETE(ownAllocator_); - } - - //! Assignment operator. - GenericPointer& operator=(const GenericPointer& rhs) { - if (this != &rhs) { - // Do not delete ownAllcator - if (nameBuffer_) - Allocator::Free(tokens_); - - tokenCount_ = rhs.tokenCount_; - parseErrorOffset_ = rhs.parseErrorOffset_; - parseErrorCode_ = rhs.parseErrorCode_; - - if (rhs.nameBuffer_) - CopyFromRaw(rhs); // Normally parsed tokens. - else { - tokens_ = rhs.tokens_; // User supplied const tokens. - nameBuffer_ = 0; - } - } - return *this; - } - - //@} - - //!@name Append token - //@{ - - //! Append a token and return a new Pointer - /*! - \param token Token to be appended. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - GenericPointer Append(const Token& token, Allocator* allocator = 0) const { - GenericPointer r; - r.allocator_ = allocator; - Ch *p = r.CopyFromRaw(*this, 1, token.length + 1); - std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch)); - r.tokens_[tokenCount_].name = p; - r.tokens_[tokenCount_].length = token.length; - r.tokens_[tokenCount_].index = token.index; - return r; - } - - //! Append a name token with length, and return a new Pointer - /*! - \param name Name to be appended. - \param length Length of name. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const { - Token token = { name, length, kPointerInvalidIndex }; - return Append(token, allocator); - } - - //! Append a name token without length, and return a new Pointer - /*! - \param name Name (const Ch*) to be appended. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >), (GenericPointer)) - Append(T* name, Allocator* allocator = 0) const { - return Append(name, StrLen(name), allocator); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Append a name token, and return a new Pointer - /*! - \param name Name to be appended. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - GenericPointer Append(const std::basic_string<Ch>& name, Allocator* allocator = 0) const { - return Append(name.c_str(), static_cast<SizeType>(name.size()), allocator); - } -#endif - - //! Append a index token, and return a new Pointer - /*! - \param index Index to be appended. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - GenericPointer Append(SizeType index, Allocator* allocator = 0) const { - char buffer[21]; - char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer); - SizeType length = static_cast<SizeType>(end - buffer); - buffer[length] = '\0'; - - if (sizeof(Ch) == 1) { - Token token = { reinterpret_cast<Ch*>(buffer), length, index }; - return Append(token, allocator); - } - else { - Ch name[21]; - for (size_t i = 0; i <= length; i++) - name[i] = buffer[i]; - Token token = { name, length, index }; - return Append(token, allocator); - } - } - - //! Append a token by value, and return a new Pointer - /*! - \param token token to be appended. - \param allocator Allocator for the newly return Pointer. - \return A new Pointer with appended token. - */ - GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const { - if (token.IsString()) - return Append(token.GetString(), token.GetStringLength(), allocator); - else { - RAPIDJSON_ASSERT(token.IsUint64()); - RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0)); - return Append(static_cast<SizeType>(token.GetUint64()), allocator); - } - } - - //!@name Handling Parse Error - //@{ - - //! Check whether this is a valid pointer. - bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; } - - //! Get the parsing error offset in code unit. - size_t GetParseErrorOffset() const { return parseErrorOffset_; } - - //! Get the parsing error code. - PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; } - - //@} - - //! Get the allocator of this pointer. - Allocator& GetAllocator() { return *allocator_; } - - //!@name Tokens - //@{ - - //! Get the token array (const version only). - const Token* GetTokens() const { return tokens_; } - - //! Get the number of tokens. - size_t GetTokenCount() const { return tokenCount_; } - - //@} - - //!@name Equality/inequality operators - //@{ - - //! Equality operator. - /*! - \note When any pointers are invalid, always returns false. - */ - bool operator==(const GenericPointer& rhs) const { - if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_) - return false; - - for (size_t i = 0; i < tokenCount_; i++) { - if (tokens_[i].index != rhs.tokens_[i].index || - tokens_[i].length != rhs.tokens_[i].length || - (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0)) - { - return false; - } - } - - return true; - } - - //! Inequality operator. - /*! - \note When any pointers are invalid, always returns true. - */ - bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); } - - //@} - - //!@name Stringify - //@{ - - //! Stringify the pointer into string representation. - /*! - \tparam OutputStream Type of output stream. - \param os The output stream. - */ - template<typename OutputStream> - bool Stringify(OutputStream& os) const { - return Stringify<false, OutputStream>(os); - } - - //! Stringify the pointer into URI fragment representation. - /*! - \tparam OutputStream Type of output stream. - \param os The output stream. - */ - template<typename OutputStream> - bool StringifyUriFragment(OutputStream& os) const { - return Stringify<true, OutputStream>(os); - } - - //@} - - //!@name Create value - //@{ - - //! Create a value in a subtree. - /*! - If the value is not exist, it creates all parent values and a JSON Null value. - So it always succeed and return the newly created or existing value. - - Remind that it may change types of parents according to tokens, so it - potentially removes previously stored values. For example, if a document - was an array, and "/foo" is used to create a value, then the document - will be changed to an object, and all existing array elements are lost. - - \param root Root value of a DOM subtree to be resolved. It can be any value other than document root. - \param allocator Allocator for creating the values if the specified value or its parents are not exist. - \param alreadyExist If non-null, it stores whether the resolved value is already exist. - \return The resolved newly created (a JSON Null value), or already exists value. - */ - ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const { - RAPIDJSON_ASSERT(IsValid()); - ValueType* v = &root; - bool exist = true; - for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { - if (v->IsArray() && t->name[0] == '-' && t->length == 1) { - v->PushBack(ValueType().Move(), allocator); - v = &((*v)[v->Size() - 1]); - exist = false; - } - else { - if (t->index == kPointerInvalidIndex) { // must be object name - if (!v->IsObject()) - v->SetObject(); // Change to Object - } - else { // object name or array index - if (!v->IsArray() && !v->IsObject()) - v->SetArray(); // Change to Array - } - - if (v->IsArray()) { - if (t->index >= v->Size()) { - v->Reserve(t->index + 1, allocator); - while (t->index >= v->Size()) - v->PushBack(ValueType().Move(), allocator); - exist = false; - } - v = &((*v)[t->index]); - } - else { - typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length)); - if (m == v->MemberEnd()) { - v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator); - v = &(--v->MemberEnd())->value; // Assumes AddMember() appends at the end - exist = false; - } - else - v = &m->value; - } - } - } - - if (alreadyExist) - *alreadyExist = exist; - - return *v; - } - - //! Creates a value in a document. - /*! - \param document A document to be resolved. - \param alreadyExist If non-null, it stores whether the resolved value is already exist. - \return The resolved newly created, or already exists value. - */ - template <typename stackAllocator> - ValueType& Create(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, bool* alreadyExist = 0) const { - return Create(document, document.GetAllocator(), alreadyExist); - } - - //@} - - //!@name Query value - //@{ - - //! Query a value in a subtree. - /*! - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token. - \return Pointer to the value if it can be resolved. Otherwise null. - - \note - There are only 3 situations when a value cannot be resolved: - 1. A value in the path is not an array nor object. - 2. An object value does not contain the token. - 3. A token is out of range of an array value. - - Use unresolvedTokenIndex to retrieve the token index. - */ - ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const { - RAPIDJSON_ASSERT(IsValid()); - ValueType* v = &root; - for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { - switch (v->GetType()) { - case kObjectType: - { - typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length)); - if (m == v->MemberEnd()) - break; - v = &m->value; - } - continue; - case kArrayType: - if (t->index == kPointerInvalidIndex || t->index >= v->Size()) - break; - v = &((*v)[t->index]); - continue; - default: - break; - } - - // Error: unresolved token - if (unresolvedTokenIndex) - *unresolvedTokenIndex = static_cast<size_t>(t - tokens_); - return 0; - } - return v; - } - - //! Query a const value in a const subtree. - /*! - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \return Pointer to the value if it can be resolved. Otherwise null. - */ - const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const { - return Get(const_cast<ValueType&>(root), unresolvedTokenIndex); - } - - //@} - - //!@name Query a value with default - //@{ - - //! Query a value in a subtree with default value. - /*! - Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value. - So that this function always succeed. - - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \param defaultValue Default value to be cloned if the value was not exists. - \param allocator Allocator for creating the values if the specified value or its parents are not exist. - \see Create() - */ - ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const { - bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); - return alreadyExist ? v : v.CopyFrom(defaultValue, allocator); - } - - //! Query a value in a subtree with default null-terminated string. - ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const { - bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); - return alreadyExist ? v : v.SetString(defaultValue, allocator); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Query a value in a subtree with default std::basic_string. - ValueType& GetWithDefault(ValueType& root, const std::basic_string<Ch>& defaultValue, typename ValueType::AllocatorType& allocator) const { - bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); - return alreadyExist ? v : v.SetString(defaultValue, allocator); - } -#endif - - //! Query a value in a subtree with default primitive value. - /*! - \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) - GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const { - return GetWithDefault(root, ValueType(defaultValue).Move(), allocator); - } - - //! Query a value in a document with default value. - template <typename stackAllocator> - ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& defaultValue) const { - return GetWithDefault(document, defaultValue, document.GetAllocator()); - } - - //! Query a value in a document with default null-terminated string. - template <typename stackAllocator> - ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* defaultValue) const { - return GetWithDefault(document, defaultValue, document.GetAllocator()); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Query a value in a document with default std::basic_string. - template <typename stackAllocator> - ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& defaultValue) const { - return GetWithDefault(document, defaultValue, document.GetAllocator()); - } -#endif - - //! Query a value in a document with default primitive value. - /*! - \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool - */ - template <typename T, typename stackAllocator> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) - GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T defaultValue) const { - return GetWithDefault(document, defaultValue, document.GetAllocator()); - } - - //@} - - //!@name Set a value - //@{ - - //! Set a value in a subtree, with move semantics. - /*! - It creates all parents if they are not exist or types are different to the tokens. - So this function always succeeds but potentially remove existing values. - - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \param value Value to be set. - \param allocator Allocator for creating the values if the specified value or its parents are not exist. - \see Create() - */ - ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator) = value; - } - - //! Set a value in a subtree, with copy semantics. - ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator).CopyFrom(value, allocator); - } - - //! Set a null-terminated string in a subtree. - ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator) = ValueType(value, allocator).Move(); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Set a std::basic_string in a subtree. - ValueType& Set(ValueType& root, const std::basic_string<Ch>& value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator) = ValueType(value, allocator).Move(); - } -#endif - - //! Set a primitive value in a subtree. - /*! - \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool - */ - template <typename T> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) - Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator) = ValueType(value).Move(); - } - - //! Set a value in a document, with move semantics. - template <typename stackAllocator> - ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const { - return Create(document) = value; - } - - //! Set a value in a document, with copy semantics. - template <typename stackAllocator> - ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& value) const { - return Create(document).CopyFrom(value, document.GetAllocator()); - } - - //! Set a null-terminated string in a document. - template <typename stackAllocator> - ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* value) const { - return Create(document) = ValueType(value, document.GetAllocator()).Move(); - } - -#if RAPIDJSON_HAS_STDSTRING - //! Sets a std::basic_string in a document. - template <typename stackAllocator> - ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& value) const { - return Create(document) = ValueType(value, document.GetAllocator()).Move(); - } -#endif - - //! Set a primitive value in a document. - /*! - \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool - */ - template <typename T, typename stackAllocator> - RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) - Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T value) const { - return Create(document) = value; - } - - //@} - - //!@name Swap a value - //@{ - - //! Swap a value with a value in a subtree. - /*! - It creates all parents if they are not exist or types are different to the tokens. - So this function always succeeds but potentially remove existing values. - - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \param value Value to be swapped. - \param allocator Allocator for creating the values if the specified value or its parents are not exist. - \see Create() - */ - ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { - return Create(root, allocator).Swap(value); - } - - //! Swap a value with a value in a document. - template <typename stackAllocator> - ValueType& Swap(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const { - return Create(document).Swap(value); - } - - //@} - - //! Erase a value in a subtree. - /*! - \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. - \return Whether the resolved value is found and erased. - - \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false. - */ - bool Erase(ValueType& root) const { - RAPIDJSON_ASSERT(IsValid()); - if (tokenCount_ == 0) // Cannot erase the root - return false; - - ValueType* v = &root; - const Token* last = tokens_ + (tokenCount_ - 1); - for (const Token *t = tokens_; t != last; ++t) { - switch (v->GetType()) { - case kObjectType: - { - typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length)); - if (m == v->MemberEnd()) - return false; - v = &m->value; - } - break; - case kArrayType: - if (t->index == kPointerInvalidIndex || t->index >= v->Size()) - return false; - v = &((*v)[t->index]); - break; - default: - return false; - } - } - - switch (v->GetType()) { - case kObjectType: - return v->EraseMember(GenericStringRef<Ch>(last->name, last->length)); - case kArrayType: - if (last->index == kPointerInvalidIndex || last->index >= v->Size()) - return false; - v->Erase(v->Begin() + last->index); - return true; - default: - return false; - } - } - -private: - //! Clone the content from rhs to this. - /*! - \param rhs Source pointer. - \param extraToken Extra tokens to be allocated. - \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated. - \return Start of non-occupied name buffer, for storing extra names. - */ - Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) { - if (!allocator_) // allocator is independently owned. - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - - size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens - for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t) - nameBufferSize += t->length; - - tokenCount_ = rhs.tokenCount_ + extraToken; - tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch))); - nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_); - if (rhs.tokenCount_ > 0) { - std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token)); - } - if (nameBufferSize > 0) { - std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch)); - } - - // Adjust pointers to name buffer - std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_; - for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t) - t->name += diff; - - return nameBuffer_ + nameBufferSize; - } - - //! Check whether a character should be percent-encoded. - /*! - According to RFC 3986 2.3 Unreserved Characters. - \param c The character (code unit) to be tested. - */ - bool NeedPercentEncode(Ch c) const { - return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~'); - } - - //! Parse a JSON String or its URI fragment representation into tokens. -#ifndef __clang__ // -Wdocumentation - /*! - \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated. - \param length Length of the source string. - \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped. - */ -#endif - void Parse(const Ch* source, size_t length) { - RAPIDJSON_ASSERT(source != NULL); - RAPIDJSON_ASSERT(nameBuffer_ == 0); - RAPIDJSON_ASSERT(tokens_ == 0); - - // Create own allocator if user did not supply. - if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - - // Count number of '/' as tokenCount - tokenCount_ = 0; - for (const Ch* s = source; s != source + length; s++) - if (*s == '/') - tokenCount_++; - - Token* token = tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch))); - Ch* name = nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_); - size_t i = 0; - - // Detect if it is a URI fragment - bool uriFragment = false; - if (source[i] == '#') { - uriFragment = true; - i++; - } - - if (i != length && source[i] != '/') { - parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus; - goto error; - } - - while (i < length) { - RAPIDJSON_ASSERT(source[i] == '/'); - i++; // consumes '/' - - token->name = name; - bool isNumber = true; - - while (i < length && source[i] != '/') { - Ch c = source[i]; - if (uriFragment) { - // Decoding percent-encoding for URI fragment - if (c == '%') { - PercentDecodeStream is(&source[i], source + length); - GenericInsituStringStream<EncodingType> os(name); - Ch* begin = os.PutBegin(); - if (!Transcoder<UTF8<>, EncodingType>().Validate(is, os) || !is.IsValid()) { - parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding; - goto error; - } - size_t len = os.PutEnd(begin); - i += is.Tell() - 1; - if (len == 1) - c = *name; - else { - name += len; - isNumber = false; - i++; - continue; - } - } - else if (NeedPercentEncode(c)) { - parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode; - goto error; - } - } - - i++; - - // Escaping "~0" -> '~', "~1" -> '/' - if (c == '~') { - if (i < length) { - c = source[i]; - if (c == '0') c = '~'; - else if (c == '1') c = '/'; - else { - parseErrorCode_ = kPointerParseErrorInvalidEscape; - goto error; - } - i++; - } - else { - parseErrorCode_ = kPointerParseErrorInvalidEscape; - goto error; - } - } - - // First check for index: all of characters are digit - if (c < '0' || c > '9') - isNumber = false; - - *name++ = c; - } - token->length = static_cast<SizeType>(name - token->name); - if (token->length == 0) - isNumber = false; - *name++ = '\0'; // Null terminator - - // Second check for index: more than one digit cannot have leading zero - if (isNumber && token->length > 1 && token->name[0] == '0') - isNumber = false; - - // String to SizeType conversion - SizeType n = 0; - if (isNumber) { - for (size_t j = 0; j < token->length; j++) { - SizeType m = n * 10 + static_cast<SizeType>(token->name[j] - '0'); - if (m < n) { // overflow detection - isNumber = false; - break; - } - n = m; - } - } - - token->index = isNumber ? n : kPointerInvalidIndex; - token++; - } - - RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer - parseErrorCode_ = kPointerParseErrorNone; - return; - - error: - Allocator::Free(tokens_); - nameBuffer_ = 0; - tokens_ = 0; - tokenCount_ = 0; - parseErrorOffset_ = i; - return; - } - - //! Stringify to string or URI fragment representation. - /*! - \tparam uriFragment True for stringifying to URI fragment representation. False for string representation. - \tparam OutputStream type of output stream. - \param os The output stream. - */ - template<bool uriFragment, typename OutputStream> - bool Stringify(OutputStream& os) const { - RAPIDJSON_ASSERT(IsValid()); - - if (uriFragment) - os.Put('#'); - - for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { - os.Put('/'); - for (size_t j = 0; j < t->length; j++) { - Ch c = t->name[j]; - if (c == '~') { - os.Put('~'); - os.Put('0'); - } - else if (c == '/') { - os.Put('~'); - os.Put('1'); - } - else if (uriFragment && NeedPercentEncode(c)) { - // Transcode to UTF8 sequence - GenericStringStream<typename ValueType::EncodingType> source(&t->name[j]); - PercentEncodeStream<OutputStream> target(os); - if (!Transcoder<EncodingType, UTF8<> >().Validate(source, target)) - return false; - j += source.Tell() - 1; - } - else - os.Put(c); - } - } - return true; - } - - //! A helper stream for decoding a percent-encoded sequence into code unit. - /*! - This stream decodes %XY triplet into code unit (0-255). - If it encounters invalid characters, it sets output code unit as 0 and - mark invalid, and to be checked by IsValid(). - */ - class PercentDecodeStream { - public: - typedef typename ValueType::Ch Ch; - - //! Constructor - /*! - \param source Start of the stream - \param end Past-the-end of the stream. - */ - PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {} - - Ch Take() { - if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet - valid_ = false; - return 0; - } - src_++; - Ch c = 0; - for (int j = 0; j < 2; j++) { - c = static_cast<Ch>(c << 4); - Ch h = *src_; - if (h >= '0' && h <= '9') c = static_cast<Ch>(c + h - '0'); - else if (h >= 'A' && h <= 'F') c = static_cast<Ch>(c + h - 'A' + 10); - else if (h >= 'a' && h <= 'f') c = static_cast<Ch>(c + h - 'a' + 10); - else { - valid_ = false; - return 0; - } - src_++; - } - return c; - } - - size_t Tell() const { return static_cast<size_t>(src_ - head_); } - bool IsValid() const { return valid_; } - - private: - const Ch* src_; //!< Current read position. - const Ch* head_; //!< Original head of the string. - const Ch* end_; //!< Past-the-end position. - bool valid_; //!< Whether the parsing is valid. - }; - - //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence. - template <typename OutputStream> - class PercentEncodeStream { - public: - PercentEncodeStream(OutputStream& os) : os_(os) {} - void Put(char c) { // UTF-8 must be byte - unsigned char u = static_cast<unsigned char>(c); - static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; - os_.Put('%'); - os_.Put(hexDigits[u >> 4]); - os_.Put(hexDigits[u & 15]); - } - private: - OutputStream& os_; - }; - - Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_. - Allocator* ownAllocator_; //!< Allocator owned by this Pointer. - Ch* nameBuffer_; //!< A buffer containing all names in tokens. - Token* tokens_; //!< A list of tokens. - size_t tokenCount_; //!< Number of tokens in tokens_. - size_t parseErrorOffset_; //!< Offset in code unit when parsing fail. - PointerParseErrorCode parseErrorCode_; //!< Parsing error code. -}; - -//! GenericPointer for Value (UTF-8, default allocator). -typedef GenericPointer<Value> Pointer; - -//!@name Helper functions for GenericPointer -//@{ - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::AllocatorType& a) { - return pointer.Create(root, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Create(root, a); -} - -// No allocator parameter - -template <typename DocumentType> -typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer) { - return pointer.Create(document); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Create(document); -} - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -typename T::ValueType* GetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) { - return pointer.Get(root, unresolvedTokenIndex); -} - -template <typename T> -const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) { - return pointer.Get(root, unresolvedTokenIndex); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) { - return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex); -} - -template <typename T, typename CharType, size_t N> -const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) { - return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex); -} - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { - return pointer.GetWithDefault(root, defaultValue, a); -} - -template <typename T> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) { - return pointer.GetWithDefault(root, defaultValue, a); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename T> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) { - return pointer.GetWithDefault(root, defaultValue, a); -} -#endif - -template <typename T, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&)) -GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 defaultValue, typename T::AllocatorType& a) { - return pointer.GetWithDefault(root, defaultValue, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename T, typename CharType, size_t N> -typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a); -} -#endif - -template <typename T, typename CharType, size_t N, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&)) -GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a); -} - -// No allocator parameter - -template <typename DocumentType> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& defaultValue) { - return pointer.GetWithDefault(document, defaultValue); -} - -template <typename DocumentType> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* defaultValue) { - return pointer.GetWithDefault(document, defaultValue); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename DocumentType> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& defaultValue) { - return pointer.GetWithDefault(document, defaultValue); -} -#endif - -template <typename DocumentType, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&)) -GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 defaultValue) { - return pointer.GetWithDefault(document, defaultValue); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& defaultValue) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue); -} -#endif - -template <typename DocumentType, typename CharType, size_t N, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&)) -GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue); -} - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { - return pointer.Set(root, value, a); -} - -template <typename T> -typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) { - return pointer.Set(root, value, a); -} - -template <typename T> -typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* value, typename T::AllocatorType& a) { - return pointer.Set(root, value, a); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename T> -typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) { - return pointer.Set(root, value, a); -} -#endif - -template <typename T, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&)) -SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 value, typename T::AllocatorType& a) { - return pointer.Set(root, value, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename T, typename CharType, size_t N> -typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a); -} -#endif - -template <typename T, typename CharType, size_t N, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&)) -SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a); -} - -// No allocator parameter - -template <typename DocumentType> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) { - return pointer.Set(document, value); -} - -template <typename DocumentType> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& value) { - return pointer.Set(document, value); -} - -template <typename DocumentType> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* value) { - return pointer.Set(document, value); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename DocumentType> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& value) { - return pointer.Set(document, value); -} -#endif - -template <typename DocumentType, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&)) -SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 value) { - return pointer.Set(document, value); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value); -} - -#if RAPIDJSON_HAS_STDSTRING -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value); -} -#endif - -template <typename DocumentType, typename CharType, size_t N, typename T2> -RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&)) -SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value); -} - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { - return pointer.Swap(root, value, a); -} - -template <typename T, typename CharType, size_t N> -typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { - return GenericPointer<typename T::ValueType>(source, N - 1).Swap(root, value, a); -} - -template <typename DocumentType> -typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) { - return pointer.Swap(document, value); -} - -template <typename DocumentType, typename CharType, size_t N> -typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { - return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Swap(document, value); -} - -////////////////////////////////////////////////////////////////////////////// - -template <typename T> -bool EraseValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer) { - return pointer.Erase(root); -} - -template <typename T, typename CharType, size_t N> -bool EraseValueByPointer(T& root, const CharType(&source)[N]) { - return GenericPointer<typename T::ValueType>(source, N - 1).Erase(root); -} - -//@} - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_POINTER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/prettywriter.h b/ext/librethinkdbxx/src/rapidjson/prettywriter.h deleted file mode 100644 index 75dc474f..00000000 --- a/ext/librethinkdbxx/src/rapidjson/prettywriter.h +++ /dev/null @@ -1,249 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_PRETTYWRITER_H_ -#define RAPIDJSON_PRETTYWRITER_H_ - -#include "writer.h" - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Combination of PrettyWriter format flags. -/*! \see PrettyWriter::SetFormatOptions - */ -enum PrettyFormatOptions { - kFormatDefault = 0, //!< Default pretty formatting. - kFormatSingleLineArray = 1 //!< Format arrays on a single line. -}; - -//! Writer with indentation and spacing. -/*! - \tparam OutputStream Type of ouptut os. - \tparam SourceEncoding Encoding of source string. - \tparam TargetEncoding Encoding of output stream. - \tparam StackAllocator Type of allocator for allocating memory of stack. -*/ -template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> -class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> { -public: - typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator> Base; - typedef typename Base::Ch Ch; - - //! Constructor - /*! \param os Output stream. - \param allocator User supplied allocator. If it is null, it will create a private one. - \param levelDepth Initial capacity of stack. - */ - explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : - Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {} - - - explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : - Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {} - - //! Set custom indentation. - /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r'). - \param indentCharCount Number of indent characters for each indentation level. - \note The default indentation is 4 spaces. - */ - PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) { - RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r'); - indentChar_ = indentChar; - indentCharCount_ = indentCharCount; - return *this; - } - - //! Set pretty writer formatting options. - /*! \param options Formatting options. - */ - PrettyWriter& SetFormatOptions(PrettyFormatOptions options) { - formatOptions_ = options; - return *this; - } - - /*! @name Implementation of Handler - \see Handler - */ - //@{ - - bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); } - bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); } - bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); } - bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); } - bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); } - bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); } - bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); } - - bool RawNumber(const Ch* str, SizeType length, bool copy = false) { - (void)copy; - PrettyPrefix(kNumberType); - return Base::WriteString(str, length); - } - - bool String(const Ch* str, SizeType length, bool copy = false) { - (void)copy; - PrettyPrefix(kStringType); - return Base::WriteString(str, length); - } - -#if RAPIDJSON_HAS_STDSTRING - bool String(const std::basic_string<Ch>& str) { - return String(str.data(), SizeType(str.size())); - } -#endif - - bool StartObject() { - PrettyPrefix(kObjectType); - new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false); - return Base::WriteStartObject(); - } - - bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } - - bool EndObject(SizeType memberCount = 0) { - (void)memberCount; - RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); - RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray); - bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0; - - if (!empty) { - Base::os_->Put('\n'); - WriteIndent(); - } - bool ret = Base::WriteEndObject(); - (void)ret; - RAPIDJSON_ASSERT(ret == true); - if (Base::level_stack_.Empty()) // end of json text - Base::os_->Flush(); - return true; - } - - bool StartArray() { - PrettyPrefix(kArrayType); - new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true); - return Base::WriteStartArray(); - } - - bool EndArray(SizeType memberCount = 0) { - (void)memberCount; - RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); - RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray); - bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0; - - if (!empty && !(formatOptions_ & kFormatSingleLineArray)) { - Base::os_->Put('\n'); - WriteIndent(); - } - bool ret = Base::WriteEndArray(); - (void)ret; - RAPIDJSON_ASSERT(ret == true); - if (Base::level_stack_.Empty()) // end of json text - Base::os_->Flush(); - return true; - } - - //@} - - /*! @name Convenience extensions */ - //@{ - - //! Simpler but slower overload. - bool String(const Ch* str) { return String(str, internal::StrLen(str)); } - bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } - - //@} - - //! Write a raw JSON value. - /*! - For user to write a stringified JSON as a value. - - \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. - \param length Length of the json. - \param type Type of the root of json. - \note When using PrettyWriter::RawValue(), the result json may not be indented correctly. - */ - bool RawValue(const Ch* json, size_t length, Type type) { PrettyPrefix(type); return Base::WriteRawValue(json, length); } - -protected: - void PrettyPrefix(Type type) { - (void)type; - if (Base::level_stack_.GetSize() != 0) { // this value is not at root - typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>(); - - if (level->inArray) { - if (level->valueCount > 0) { - Base::os_->Put(','); // add comma if it is not the first element in array - if (formatOptions_ & kFormatSingleLineArray) - Base::os_->Put(' '); - } - - if (!(formatOptions_ & kFormatSingleLineArray)) { - Base::os_->Put('\n'); - WriteIndent(); - } - } - else { // in object - if (level->valueCount > 0) { - if (level->valueCount % 2 == 0) { - Base::os_->Put(','); - Base::os_->Put('\n'); - } - else { - Base::os_->Put(':'); - Base::os_->Put(' '); - } - } - else - Base::os_->Put('\n'); - - if (level->valueCount % 2 == 0) - WriteIndent(); - } - if (!level->inArray && level->valueCount % 2 == 0) - RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name - level->valueCount++; - } - else { - RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root. - Base::hasRoot_ = true; - } - } - - void WriteIndent() { - size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_; - PutN(*Base::os_, static_cast<typename TargetEncoding::Ch>(indentChar_), count); - } - - Ch indentChar_; - unsigned indentCharCount_; - PrettyFormatOptions formatOptions_; - -private: - // Prohibit copy constructor & assignment operator. - PrettyWriter(const PrettyWriter&); - PrettyWriter& operator=(const PrettyWriter&); -}; - -RAPIDJSON_NAMESPACE_END - -#ifdef __GNUC__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/rapidjson.h b/ext/librethinkdbxx/src/rapidjson/rapidjson.h deleted file mode 100644 index d666f202..00000000 --- a/ext/librethinkdbxx/src/rapidjson/rapidjson.h +++ /dev/null @@ -1,615 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_RAPIDJSON_H_ -#define RAPIDJSON_RAPIDJSON_H_ - -/*!\file rapidjson.h - \brief common definitions and configuration - - \see RAPIDJSON_CONFIG - */ - -/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration - \brief Configuration macros for library features - - Some RapidJSON features are configurable to adapt the library to a wide - variety of platforms, environments and usage scenarios. Most of the - features can be configured in terms of overriden or predefined - preprocessor macros at compile-time. - - Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. - - \note These macros should be given on the compiler command-line - (where applicable) to avoid inconsistent values when compiling - different translation units of a single application. - */ - -#include <cstdlib> // malloc(), realloc(), free(), size_t -#include <cstring> // memset(), memcpy(), memmove(), memcmp() - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_VERSION_STRING -// -// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt. -// - -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -// token stringification -#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) -#define RAPIDJSON_DO_STRINGIFY(x) #x -//!@endcond - -/*! \def RAPIDJSON_MAJOR_VERSION - \ingroup RAPIDJSON_CONFIG - \brief Major version of RapidJSON in integer. -*/ -/*! \def RAPIDJSON_MINOR_VERSION - \ingroup RAPIDJSON_CONFIG - \brief Minor version of RapidJSON in integer. -*/ -/*! \def RAPIDJSON_PATCH_VERSION - \ingroup RAPIDJSON_CONFIG - \brief Patch version of RapidJSON in integer. -*/ -/*! \def RAPIDJSON_VERSION_STRING - \ingroup RAPIDJSON_CONFIG - \brief Version of RapidJSON in "<major>.<minor>.<patch>" string format. -*/ -#define RAPIDJSON_MAJOR_VERSION 1 -#define RAPIDJSON_MINOR_VERSION 0 -#define RAPIDJSON_PATCH_VERSION 2 -#define RAPIDJSON_VERSION_STRING \ - RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION) - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_NAMESPACE_(BEGIN|END) -/*! \def RAPIDJSON_NAMESPACE - \ingroup RAPIDJSON_CONFIG - \brief provide custom rapidjson namespace - - In order to avoid symbol clashes and/or "One Definition Rule" errors - between multiple inclusions of (different versions of) RapidJSON in - a single binary, users can customize the name of the main RapidJSON - namespace. - - In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE - to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple - levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref - RAPIDJSON_NAMESPACE_END need to be defined as well: - - \code - // in some .cpp file - #define RAPIDJSON_NAMESPACE my::rapidjson - #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson { - #define RAPIDJSON_NAMESPACE_END } } - #include "rapidjson/..." - \endcode - - \see rapidjson - */ -/*! \def RAPIDJSON_NAMESPACE_BEGIN - \ingroup RAPIDJSON_CONFIG - \brief provide custom rapidjson namespace (opening expression) - \see RAPIDJSON_NAMESPACE -*/ -/*! \def RAPIDJSON_NAMESPACE_END - \ingroup RAPIDJSON_CONFIG - \brief provide custom rapidjson namespace (closing expression) - \see RAPIDJSON_NAMESPACE -*/ -#ifndef RAPIDJSON_NAMESPACE -#define RAPIDJSON_NAMESPACE rapidjson -#endif -#ifndef RAPIDJSON_NAMESPACE_BEGIN -#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE { -#endif -#ifndef RAPIDJSON_NAMESPACE_END -#define RAPIDJSON_NAMESPACE_END } -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_HAS_STDSTRING - -#ifndef RAPIDJSON_HAS_STDSTRING -#ifdef RAPIDJSON_DOXYGEN_RUNNING -#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation -#else -#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default -#endif -/*! \def RAPIDJSON_HAS_STDSTRING - \ingroup RAPIDJSON_CONFIG - \brief Enable RapidJSON support for \c std::string - - By defining this preprocessor symbol to \c 1, several convenience functions for using - \ref rapidjson::GenericValue with \c std::string are enabled, especially - for construction and comparison. - - \hideinitializer -*/ -#endif // !defined(RAPIDJSON_HAS_STDSTRING) - -#if RAPIDJSON_HAS_STDSTRING -#include <string> -#endif // RAPIDJSON_HAS_STDSTRING - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_NO_INT64DEFINE - -/*! \def RAPIDJSON_NO_INT64DEFINE - \ingroup RAPIDJSON_CONFIG - \brief Use external 64-bit integer types. - - RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types - to be available at global scope. - - If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to - prevent RapidJSON from defining its own types. -*/ -#ifndef RAPIDJSON_NO_INT64DEFINE -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013 -#include "msinttypes/stdint.h" -#include "msinttypes/inttypes.h" -#else -// Other compilers should have this. -#include <stdint.h> -#include <inttypes.h> -#endif -//!@endcond -#ifdef RAPIDJSON_DOXYGEN_RUNNING -#define RAPIDJSON_NO_INT64DEFINE -#endif -#endif // RAPIDJSON_NO_INT64TYPEDEF - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_FORCEINLINE - -#ifndef RAPIDJSON_FORCEINLINE -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#if defined(_MSC_VER) && defined(NDEBUG) -#define RAPIDJSON_FORCEINLINE __forceinline -#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG) -#define RAPIDJSON_FORCEINLINE __attribute__((always_inline)) -#else -#define RAPIDJSON_FORCEINLINE -#endif -//!@endcond -#endif // RAPIDJSON_FORCEINLINE - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_ENDIAN -#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine -#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine - -//! Endianness of the machine. -/*! - \def RAPIDJSON_ENDIAN - \ingroup RAPIDJSON_CONFIG - - GCC 4.6 provided macro for detecting endianness of the target machine. But other - compilers may not have this. User can define RAPIDJSON_ENDIAN to either - \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN. - - Default detection implemented with reference to - \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html - \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp -*/ -#ifndef RAPIDJSON_ENDIAN -// Detect with GCC 4.6's macro -# ifdef __BYTE_ORDER__ -# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN -# else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. -# endif // __BYTE_ORDER__ -// Detect with GLIBC's endian.h -# elif defined(__GLIBC__) -# include <endian.h> -# if (__BYTE_ORDER == __LITTLE_ENDIAN) -# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif (__BYTE_ORDER == __BIG_ENDIAN) -# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN -# else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. -# endif // __GLIBC__ -// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro -# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) -# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) -# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN -// Detect with architecture macros -# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) -# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN -# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) -# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif defined(_MSC_VER) && defined(_M_ARM) -# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif defined(RAPIDJSON_DOXYGEN_RUNNING) -# define RAPIDJSON_ENDIAN -# else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. -# endif -#endif // RAPIDJSON_ENDIAN - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_64BIT - -//! Whether using 64-bit architecture -#ifndef RAPIDJSON_64BIT -#if defined(__LP64__) || defined(_WIN64) || defined(__EMSCRIPTEN__) -#define RAPIDJSON_64BIT 1 -#else -#define RAPIDJSON_64BIT 0 -#endif -#endif // RAPIDJSON_64BIT - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_ALIGN - -//! Data alignment of the machine. -/*! \ingroup RAPIDJSON_CONFIG - \param x pointer to align - - Some machines require strict data alignment. Currently the default uses 4 bytes - alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms. - User can customize by defining the RAPIDJSON_ALIGN function macro. -*/ -#ifndef RAPIDJSON_ALIGN -#if RAPIDJSON_64BIT == 1 -#define RAPIDJSON_ALIGN(x) (((x) + static_cast<uint64_t>(7u)) & ~static_cast<uint64_t>(7u)) -#else -#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u) -#endif -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_UINT64_C2 - -//! Construct a 64-bit literal by a pair of 32-bit integer. -/*! - 64-bit literal with or without ULL suffix is prone to compiler warnings. - UINT64_C() is C macro which cause compilation problems. - Use this macro to define 64-bit constants by a pair of 32-bit integer. -*/ -#ifndef RAPIDJSON_UINT64_C2 -#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32)) -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_48BITPOINTER_OPTIMIZATION - -//! Use only lower 48-bit address for some pointers. -/*! - \ingroup RAPIDJSON_CONFIG - - This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address. - The higher 16-bit can be used for storing other data. - \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture. -*/ -#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION -#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) -#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1 -#else -#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0 -#endif -#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION - -#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1 -#if RAPIDJSON_64BIT != 1 -#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1 -#endif -#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast<type *>((reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast<uintptr_t>(reinterpret_cast<const void*>(x)))) -#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast<type *>(reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF)))) -#else -#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x)) -#define RAPIDJSON_GETPOINTER(type, p) (p) -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD - -/*! \def RAPIDJSON_SIMD - \ingroup RAPIDJSON_CONFIG - \brief Enable SSE2/SSE4.2 optimization. - - RapidJSON supports optimized implementations for some parsing operations - based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible - processors. - - To enable these optimizations, two different symbols can be defined; - \code - // Enable SSE2 optimization. - #define RAPIDJSON_SSE2 - - // Enable SSE4.2 optimization. - #define RAPIDJSON_SSE42 - \endcode - - \c RAPIDJSON_SSE42 takes precedence, if both are defined. - - If any of these symbols is defined, RapidJSON defines the macro - \c RAPIDJSON_SIMD to indicate the availability of the optimized code. -*/ -#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ - || defined(RAPIDJSON_DOXYGEN_RUNNING) -#define RAPIDJSON_SIMD -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_NO_SIZETYPEDEFINE - -#ifndef RAPIDJSON_NO_SIZETYPEDEFINE -/*! \def RAPIDJSON_NO_SIZETYPEDEFINE - \ingroup RAPIDJSON_CONFIG - \brief User-provided \c SizeType definition. - - In order to avoid using 32-bit size types for indexing strings and arrays, - define this preprocessor symbol and provide the type rapidjson::SizeType - before including RapidJSON: - \code - #define RAPIDJSON_NO_SIZETYPEDEFINE - namespace rapidjson { typedef ::std::size_t SizeType; } - #include "rapidjson/..." - \endcode - - \see rapidjson::SizeType -*/ -#ifdef RAPIDJSON_DOXYGEN_RUNNING -#define RAPIDJSON_NO_SIZETYPEDEFINE -#endif -RAPIDJSON_NAMESPACE_BEGIN -//! Size type (for string lengths, array sizes, etc.) -/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms, - instead of using \c size_t. Users may override the SizeType by defining - \ref RAPIDJSON_NO_SIZETYPEDEFINE. -*/ -typedef unsigned SizeType; -RAPIDJSON_NAMESPACE_END -#endif - -// always import std::size_t to rapidjson namespace -RAPIDJSON_NAMESPACE_BEGIN -using std::size_t; -RAPIDJSON_NAMESPACE_END - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_ASSERT - -//! Assertion. -/*! \ingroup RAPIDJSON_CONFIG - By default, rapidjson uses C \c assert() for internal assertions. - User can override it by defining RAPIDJSON_ASSERT(x) macro. - - \note Parsing errors are handled and can be customized by the - \ref RAPIDJSON_ERRORS APIs. -*/ -#ifndef RAPIDJSON_ASSERT -#include <cassert> -#define RAPIDJSON_ASSERT(x) assert(x) -#endif // RAPIDJSON_ASSERT - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_STATIC_ASSERT - -// Adopt from boost -#ifndef RAPIDJSON_STATIC_ASSERT -#ifndef __clang__ -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#endif -RAPIDJSON_NAMESPACE_BEGIN -template <bool x> struct STATIC_ASSERTION_FAILURE; -template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; }; -template<int x> struct StaticAssertTest {}; -RAPIDJSON_NAMESPACE_END - -#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) -#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) -#define RAPIDJSON_DO_JOIN2(X, Y) X##Y - -#if defined(__GNUC__) -#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) -#else -#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE -#endif -#ifndef __clang__ -//!@endcond -#endif - -/*! \def RAPIDJSON_STATIC_ASSERT - \brief (Internal) macro to check for conditions at compile-time - \param x compile-time condition - \hideinitializer - */ -#define RAPIDJSON_STATIC_ASSERT(x) \ - typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ - sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \ - RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE -#endif - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY - -//! Compiler branching hint for expression with high probability to be true. -/*! - \ingroup RAPIDJSON_CONFIG - \param x Boolean expression likely to be true. -*/ -#ifndef RAPIDJSON_LIKELY -#if defined(__GNUC__) || defined(__clang__) -#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1) -#else -#define RAPIDJSON_LIKELY(x) (x) -#endif -#endif - -//! Compiler branching hint for expression with low probability to be true. -/*! - \ingroup RAPIDJSON_CONFIG - \param x Boolean expression unlikely to be true. -*/ -#ifndef RAPIDJSON_UNLIKELY -#if defined(__GNUC__) || defined(__clang__) -#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define RAPIDJSON_UNLIKELY(x) (x) -#endif -#endif - -/////////////////////////////////////////////////////////////////////////////// -// Helpers - -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN - -#define RAPIDJSON_MULTILINEMACRO_BEGIN do { -#define RAPIDJSON_MULTILINEMACRO_END \ -} while((void)0, 0) - -// adopted from Boost -#define RAPIDJSON_VERSION_CODE(x,y,z) \ - (((x)*100000) + ((y)*100) + (z)) - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF - -#if defined(__GNUC__) -#define RAPIDJSON_GNUC \ - RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) -#endif - -#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0)) - -#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x)) -#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x) -#define RAPIDJSON_DIAG_OFF(x) \ - RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x))) - -// push/pop support in Clang and GCC>=4.6 -#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) -#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) -#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) -#else // GCC >= 4.2, < 4.6 -#define RAPIDJSON_DIAG_PUSH /* ignored */ -#define RAPIDJSON_DIAG_POP /* ignored */ -#endif - -#elif defined(_MSC_VER) - -// pragma (MSVC specific) -#define RAPIDJSON_PRAGMA(x) __pragma(x) -#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x)) - -#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x) -#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) -#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) - -#else - -#define RAPIDJSON_DIAG_OFF(x) /* ignored */ -#define RAPIDJSON_DIAG_PUSH /* ignored */ -#define RAPIDJSON_DIAG_POP /* ignored */ - -#endif // RAPIDJSON_DIAG_* - -/////////////////////////////////////////////////////////////////////////////// -// C++11 features - -#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS -#if defined(__clang__) -#if __has_feature(cxx_rvalue_references) && \ - (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) -#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 -#else -#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 -#endif -#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ - (defined(_MSC_VER) && _MSC_VER >= 1600) - -#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 -#else -#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 -#endif -#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS - -#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT -#if defined(__clang__) -#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) -#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) -// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported -#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 -#else -#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 -#endif -#endif -#if RAPIDJSON_HAS_CXX11_NOEXCEPT -#define RAPIDJSON_NOEXCEPT noexcept -#else -#define RAPIDJSON_NOEXCEPT /* noexcept */ -#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT - -// no automatic detection, yet -#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS -#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 -#endif - -#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR -#if defined(__clang__) -#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) -#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ - (defined(_MSC_VER) && _MSC_VER >= 1700) -#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 -#else -#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 -#endif -#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR - -//!@endcond - -/////////////////////////////////////////////////////////////////////////////// -// new/delete - -#ifndef RAPIDJSON_NEW -///! customization point for global \c new -#define RAPIDJSON_NEW(x) new x -#endif -#ifndef RAPIDJSON_DELETE -///! customization point for global \c delete -#define RAPIDJSON_DELETE(x) delete x -#endif - -/////////////////////////////////////////////////////////////////////////////// -// Type - -/*! \namespace rapidjson - \brief main RapidJSON namespace - \see RAPIDJSON_NAMESPACE -*/ -RAPIDJSON_NAMESPACE_BEGIN - -//! Type of JSON value -enum Type { - kNullType = 0, //!< null - kFalseType = 1, //!< false - kTrueType = 2, //!< true - kObjectType = 3, //!< object - kArrayType = 4, //!< array - kStringType = 5, //!< string - kNumberType = 6 //!< number -}; - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/reader.h b/ext/librethinkdbxx/src/rapidjson/reader.h deleted file mode 100644 index 19f8849b..00000000 --- a/ext/librethinkdbxx/src/rapidjson/reader.h +++ /dev/null @@ -1,1879 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_READER_H_ -#define RAPIDJSON_READER_H_ - -/*! \file reader.h */ - -#include "allocators.h" -#include "stream.h" -#include "encodedstream.h" -#include "internal/meta.h" -#include "internal/stack.h" -#include "internal/strtod.h" -#include <limits> - -#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) -#include <intrin.h> -#pragma intrinsic(_BitScanForward) -#endif -#ifdef RAPIDJSON_SSE42 -#include <nmmintrin.h> -#elif defined(RAPIDJSON_SSE2) -#include <emmintrin.h> -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant -RAPIDJSON_DIAG_OFF(4702) // unreachable code -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(old-style-cast) -RAPIDJSON_DIAG_OFF(padded) -RAPIDJSON_DIAG_OFF(switch-enum) -#endif - -#ifdef __GNUC__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(effc++) -#endif - -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#define RAPIDJSON_NOTHING /* deliberately empty */ -#ifndef RAPIDJSON_PARSE_ERROR_EARLY_RETURN -#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN(value) \ - RAPIDJSON_MULTILINEMACRO_BEGIN \ - if (RAPIDJSON_UNLIKELY(HasParseError())) { return value; } \ - RAPIDJSON_MULTILINEMACRO_END -#endif -#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID \ - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(RAPIDJSON_NOTHING) -//!@endcond - -/*! \def RAPIDJSON_PARSE_ERROR_NORETURN - \ingroup RAPIDJSON_ERRORS - \brief Macro to indicate a parse error. - \param parseErrorCode \ref rapidjson::ParseErrorCode of the error - \param offset position of the error in JSON input (\c size_t) - - This macros can be used as a customization point for the internal - error handling mechanism of RapidJSON. - - A common usage model is to throw an exception instead of requiring the - caller to explicitly check the \ref rapidjson::GenericReader::Parse's - return value: - - \code - #define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode,offset) \ - throw ParseException(parseErrorCode, #parseErrorCode, offset) - - #include <stdexcept> // std::runtime_error - #include "rapidjson/error/error.h" // rapidjson::ParseResult - - struct ParseException : std::runtime_error, rapidjson::ParseResult { - ParseException(rapidjson::ParseErrorCode code, const char* msg, size_t offset) - : std::runtime_error(msg), ParseResult(code, offset) {} - }; - - #include "rapidjson/reader.h" - \endcode - - \see RAPIDJSON_PARSE_ERROR, rapidjson::GenericReader::Parse - */ -#ifndef RAPIDJSON_PARSE_ERROR_NORETURN -#define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset) \ - RAPIDJSON_MULTILINEMACRO_BEGIN \ - RAPIDJSON_ASSERT(!HasParseError()); /* Error can only be assigned once */ \ - SetParseError(parseErrorCode, offset); \ - RAPIDJSON_MULTILINEMACRO_END -#endif - -/*! \def RAPIDJSON_PARSE_ERROR - \ingroup RAPIDJSON_ERRORS - \brief (Internal) macro to indicate and handle a parse error. - \param parseErrorCode \ref rapidjson::ParseErrorCode of the error - \param offset position of the error in JSON input (\c size_t) - - Invokes RAPIDJSON_PARSE_ERROR_NORETURN and stops the parsing. - - \see RAPIDJSON_PARSE_ERROR_NORETURN - \hideinitializer - */ -#ifndef RAPIDJSON_PARSE_ERROR -#define RAPIDJSON_PARSE_ERROR(parseErrorCode, offset) \ - RAPIDJSON_MULTILINEMACRO_BEGIN \ - RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset); \ - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; \ - RAPIDJSON_MULTILINEMACRO_END -#endif - -#include "error/error.h" // ParseErrorCode, ParseResult - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// ParseFlag - -/*! \def RAPIDJSON_PARSE_DEFAULT_FLAGS - \ingroup RAPIDJSON_CONFIG - \brief User-defined kParseDefaultFlags definition. - - User can define this as any \c ParseFlag combinations. -*/ -#ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS -#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags -#endif - -//! Combination of parseFlags -/*! \see Reader::Parse, Document::Parse, Document::ParseInsitu, Document::ParseStream - */ -enum ParseFlag { - kParseNoFlags = 0, //!< No flags are set. - kParseInsituFlag = 1, //!< In-situ(destructive) parsing. - kParseValidateEncodingFlag = 2, //!< Validate encoding of JSON strings. - kParseIterativeFlag = 4, //!< Iterative(constant complexity in terms of function call stack size) parsing. - kParseStopWhenDoneFlag = 8, //!< After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate kParseErrorDocumentRootNotSingular error. - kParseFullPrecisionFlag = 16, //!< Parse number in full precision (but slower). - kParseCommentsFlag = 32, //!< Allow one-line (//) and multi-line (/**/) comments. - kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings. - kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays. - kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles. - kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS -}; - -/////////////////////////////////////////////////////////////////////////////// -// Handler - -/*! \class rapidjson::Handler - \brief Concept for receiving events from GenericReader upon parsing. - The functions return true if no error occurs. If they return false, - the event publisher should terminate the process. -\code -concept Handler { - typename Ch; - - bool Null(); - bool Bool(bool b); - bool Int(int i); - bool Uint(unsigned i); - bool Int64(int64_t i); - bool Uint64(uint64_t i); - bool Double(double d); - /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) - bool RawNumber(const Ch* str, SizeType length, bool copy); - bool String(const Ch* str, SizeType length, bool copy); - bool StartObject(); - bool Key(const Ch* str, SizeType length, bool copy); - bool EndObject(SizeType memberCount); - bool StartArray(); - bool EndArray(SizeType elementCount); -}; -\endcode -*/ -/////////////////////////////////////////////////////////////////////////////// -// BaseReaderHandler - -//! Default implementation of Handler. -/*! This can be used as base class of any reader handler. - \note implements Handler concept -*/ -template<typename Encoding = UTF8<>, typename Derived = void> -struct BaseReaderHandler { - typedef typename Encoding::Ch Ch; - - typedef typename internal::SelectIf<internal::IsSame<Derived, void>, BaseReaderHandler, Derived>::Type Override; - - bool Default() { return true; } - bool Null() { return static_cast<Override&>(*this).Default(); } - bool Bool(bool) { return static_cast<Override&>(*this).Default(); } - bool Int(int) { return static_cast<Override&>(*this).Default(); } - bool Uint(unsigned) { return static_cast<Override&>(*this).Default(); } - bool Int64(int64_t) { return static_cast<Override&>(*this).Default(); } - bool Uint64(uint64_t) { return static_cast<Override&>(*this).Default(); } - bool Double(double) { return static_cast<Override&>(*this).Default(); } - /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) - bool RawNumber(const Ch* str, SizeType len, bool copy) { return static_cast<Override&>(*this).String(str, len, copy); } - bool String(const Ch*, SizeType, bool) { return static_cast<Override&>(*this).Default(); } - bool StartObject() { return static_cast<Override&>(*this).Default(); } - bool Key(const Ch* str, SizeType len, bool copy) { return static_cast<Override&>(*this).String(str, len, copy); } - bool EndObject(SizeType) { return static_cast<Override&>(*this).Default(); } - bool StartArray() { return static_cast<Override&>(*this).Default(); } - bool EndArray(SizeType) { return static_cast<Override&>(*this).Default(); } -}; - -/////////////////////////////////////////////////////////////////////////////// -// StreamLocalCopy - -namespace internal { - -template<typename Stream, int = StreamTraits<Stream>::copyOptimization> -class StreamLocalCopy; - -//! Do copy optimization. -template<typename Stream> -class StreamLocalCopy<Stream, 1> { -public: - StreamLocalCopy(Stream& original) : s(original), original_(original) {} - ~StreamLocalCopy() { original_ = s; } - - Stream s; - -private: - StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; - - Stream& original_; -}; - -//! Keep reference. -template<typename Stream> -class StreamLocalCopy<Stream, 0> { -public: - StreamLocalCopy(Stream& original) : s(original) {} - - Stream& s; - -private: - StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; -}; - -} // namespace internal - -/////////////////////////////////////////////////////////////////////////////// -// SkipWhitespace - -//! Skip the JSON white spaces in a stream. -/*! \param is A input stream for skipping white spaces. - \note This function has SSE2/SSE4.2 specialization. -*/ -template<typename InputStream> -void SkipWhitespace(InputStream& is) { - internal::StreamLocalCopy<InputStream> copy(is); - InputStream& s(copy.s); - - typename InputStream::Ch c; - while ((c = s.Peek()) == ' ' || c == '\n' || c == '\r' || c == '\t') - s.Take(); -} - -inline const char* SkipWhitespace(const char* p, const char* end) { - while (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) - ++p; - return p; -} - -#ifdef RAPIDJSON_SSE42 -//! Skip whitespace with SSE 4.2 pcmpistrm instruction, testing 16 8-byte characters at once. -inline const char *SkipWhitespace_SIMD(const char* p) { - // Fast return for single non-whitespace - if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') - ++p; - else - return p; - - // 16-byte align to the next boundary - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - while (p != nextAligned) - if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') - ++p; - else - return p; - - // The rest of string using SIMD - static const char whitespace[16] = " \n\r\t"; - const __m128i w = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespace[0])); - - for (;; p += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); - if (r != 0) { // some of characters is non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } - } -} - -inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { - // Fast return for single non-whitespace - if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) - ++p; - else - return p; - - // The middle of string using SIMD - static const char whitespace[16] = " \n\r\t"; - const __m128i w = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespace[0])); - - for (; p <= end - 16; p += 16) { - const __m128i s = _mm_loadu_si128(reinterpret_cast<const __m128i *>(p)); - const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); - if (r != 0) { // some of characters is non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } - } - - return SkipWhitespace(p, end); -} - -#elif defined(RAPIDJSON_SSE2) - -//! Skip whitespace with SSE2 instructions, testing 16 8-byte characters at once. -inline const char *SkipWhitespace_SIMD(const char* p) { - // Fast return for single non-whitespace - if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') - ++p; - else - return p; - - // 16-byte align to the next boundary - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - while (p != nextAligned) - if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') - ++p; - else - return p; - - // The rest of string - #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } - static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; - #undef C16 - - const __m128i w0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[0][0])); - const __m128i w1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[1][0])); - const __m128i w2 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[2][0])); - const __m128i w3 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[3][0])); - - for (;; p += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - __m128i x = _mm_cmpeq_epi8(s, w0); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); - unsigned short r = static_cast<unsigned short>(~_mm_movemask_epi8(x)); - if (r != 0) { // some of characters may be non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } - } -} - -inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { - // Fast return for single non-whitespace - if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) - ++p; - else - return p; - - // The rest of string - #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } - static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; - #undef C16 - - const __m128i w0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[0][0])); - const __m128i w1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[1][0])); - const __m128i w2 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[2][0])); - const __m128i w3 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[3][0])); - - for (; p <= end - 16; p += 16) { - const __m128i s = _mm_loadu_si128(reinterpret_cast<const __m128i *>(p)); - __m128i x = _mm_cmpeq_epi8(s, w0); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); - x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); - unsigned short r = static_cast<unsigned short>(~_mm_movemask_epi8(x)); - if (r != 0) { // some of characters may be non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } - } - - return SkipWhitespace(p, end); -} - -#endif // RAPIDJSON_SSE2 - -#ifdef RAPIDJSON_SIMD -//! Template function specialization for InsituStringStream -template<> inline void SkipWhitespace(InsituStringStream& is) { - is.src_ = const_cast<char*>(SkipWhitespace_SIMD(is.src_)); -} - -//! Template function specialization for StringStream -template<> inline void SkipWhitespace(StringStream& is) { - is.src_ = SkipWhitespace_SIMD(is.src_); -} - -template<> inline void SkipWhitespace(EncodedInputStream<UTF8<>, MemoryStream>& is) { - is.is_.src_ = SkipWhitespace_SIMD(is.is_.src_, is.is_.end_); -} -#endif // RAPIDJSON_SIMD - -/////////////////////////////////////////////////////////////////////////////// -// GenericReader - -//! SAX-style JSON parser. Use \ref Reader for UTF8 encoding and default allocator. -/*! GenericReader parses JSON text from a stream, and send events synchronously to an - object implementing Handler concept. - - It needs to allocate a stack for storing a single decoded string during - non-destructive parsing. - - For in-situ parsing, the decoded string is directly written to the source - text string, no temporary buffer is required. - - A GenericReader object can be reused for parsing multiple JSON text. - - \tparam SourceEncoding Encoding of the input stream. - \tparam TargetEncoding Encoding of the parse output. - \tparam StackAllocator Allocator type for stack. -*/ -template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator = CrtAllocator> -class GenericReader { -public: - typedef typename SourceEncoding::Ch Ch; //!< SourceEncoding character type - - //! Constructor. - /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing) - \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing) - */ - GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : stack_(stackAllocator, stackCapacity), parseResult_() {} - - //! Parse JSON text. - /*! \tparam parseFlags Combination of \ref ParseFlag. - \tparam InputStream Type of input stream, implementing Stream concept. - \tparam Handler Type of handler, implementing Handler concept. - \param is Input stream to be parsed. - \param handler The handler to receive events. - \return Whether the parsing is successful. - */ - template <unsigned parseFlags, typename InputStream, typename Handler> - ParseResult Parse(InputStream& is, Handler& handler) { - if (parseFlags & kParseIterativeFlag) - return IterativeParse<parseFlags>(is, handler); - - parseResult_.Clear(); - - ClearStackOnExit scope(*this); - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - - if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentEmpty, is.Tell()); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - } - else { - ParseValue<parseFlags>(is, handler); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - - if (!(parseFlags & kParseStopWhenDoneFlag)) { - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - - if (RAPIDJSON_UNLIKELY(is.Peek() != '\0')) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentRootNotSingular, is.Tell()); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - } - } - } - - return parseResult_; - } - - //! Parse JSON text (with \ref kParseDefaultFlags) - /*! \tparam InputStream Type of input stream, implementing Stream concept - \tparam Handler Type of handler, implementing Handler concept. - \param is Input stream to be parsed. - \param handler The handler to receive events. - \return Whether the parsing is successful. - */ - template <typename InputStream, typename Handler> - ParseResult Parse(InputStream& is, Handler& handler) { - return Parse<kParseDefaultFlags>(is, handler); - } - - //! Whether a parse error has occured in the last parsing. - bool HasParseError() const { return parseResult_.IsError(); } - - //! Get the \ref ParseErrorCode of last parsing. - ParseErrorCode GetParseErrorCode() const { return parseResult_.Code(); } - - //! Get the position of last parsing error in input, 0 otherwise. - size_t GetErrorOffset() const { return parseResult_.Offset(); } - -protected: - void SetParseError(ParseErrorCode code, size_t offset) { parseResult_.Set(code, offset); } - -private: - // Prohibit copy constructor & assignment operator. - GenericReader(const GenericReader&); - GenericReader& operator=(const GenericReader&); - - void ClearStack() { stack_.Clear(); } - - // clear stack on any exit from ParseStream, e.g. due to exception - struct ClearStackOnExit { - explicit ClearStackOnExit(GenericReader& r) : r_(r) {} - ~ClearStackOnExit() { r_.ClearStack(); } - private: - GenericReader& r_; - ClearStackOnExit(const ClearStackOnExit&); - ClearStackOnExit& operator=(const ClearStackOnExit&); - }; - - template<unsigned parseFlags, typename InputStream> - void SkipWhitespaceAndComments(InputStream& is) { - SkipWhitespace(is); - - if (parseFlags & kParseCommentsFlag) { - while (RAPIDJSON_UNLIKELY(Consume(is, '/'))) { - if (Consume(is, '*')) { - while (true) { - if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) - RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); - else if (Consume(is, '*')) { - if (Consume(is, '/')) - break; - } - else - is.Take(); - } - } - else if (RAPIDJSON_LIKELY(Consume(is, '/'))) - while (is.Peek() != '\0' && is.Take() != '\n'); - else - RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); - - SkipWhitespace(is); - } - } - } - - // Parse object: { string : value, ... } - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseObject(InputStream& is, Handler& handler) { - RAPIDJSON_ASSERT(is.Peek() == '{'); - is.Take(); // Skip '{' - - if (RAPIDJSON_UNLIKELY(!handler.StartObject())) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - if (Consume(is, '}')) { - if (RAPIDJSON_UNLIKELY(!handler.EndObject(0))) // empty object - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - return; - } - - for (SizeType memberCount = 0;;) { - if (RAPIDJSON_UNLIKELY(is.Peek() != '"')) - RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); - - ParseString<parseFlags>(is, handler, true); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - if (RAPIDJSON_UNLIKELY(!Consume(is, ':'))) - RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - ParseValue<parseFlags>(is, handler); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - ++memberCount; - - switch (is.Peek()) { - case ',': - is.Take(); - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - break; - case '}': - is.Take(); - if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - return; - default: - RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); break; // This useless break is only for making warning and coverage happy - } - - if (parseFlags & kParseTrailingCommasFlag) { - if (is.Peek() == '}') { - if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - is.Take(); - return; - } - } - } - } - - // Parse array: [ value, ... ] - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseArray(InputStream& is, Handler& handler) { - RAPIDJSON_ASSERT(is.Peek() == '['); - is.Take(); // Skip '[' - - if (RAPIDJSON_UNLIKELY(!handler.StartArray())) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - if (Consume(is, ']')) { - if (RAPIDJSON_UNLIKELY(!handler.EndArray(0))) // empty array - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - return; - } - - for (SizeType elementCount = 0;;) { - ParseValue<parseFlags>(is, handler); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - ++elementCount; - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - - if (Consume(is, ',')) { - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - } - else if (Consume(is, ']')) { - if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - return; - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); - - if (parseFlags & kParseTrailingCommasFlag) { - if (is.Peek() == ']') { - if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - is.Take(); - return; - } - } - } - } - - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseNull(InputStream& is, Handler& handler) { - RAPIDJSON_ASSERT(is.Peek() == 'n'); - is.Take(); - - if (RAPIDJSON_LIKELY(Consume(is, 'u') && Consume(is, 'l') && Consume(is, 'l'))) { - if (RAPIDJSON_UNLIKELY(!handler.Null())) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); - } - - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseTrue(InputStream& is, Handler& handler) { - RAPIDJSON_ASSERT(is.Peek() == 't'); - is.Take(); - - if (RAPIDJSON_LIKELY(Consume(is, 'r') && Consume(is, 'u') && Consume(is, 'e'))) { - if (RAPIDJSON_UNLIKELY(!handler.Bool(true))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); - } - - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseFalse(InputStream& is, Handler& handler) { - RAPIDJSON_ASSERT(is.Peek() == 'f'); - is.Take(); - - if (RAPIDJSON_LIKELY(Consume(is, 'a') && Consume(is, 'l') && Consume(is, 's') && Consume(is, 'e'))) { - if (RAPIDJSON_UNLIKELY(!handler.Bool(false))) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); - } - - template<typename InputStream> - RAPIDJSON_FORCEINLINE static bool Consume(InputStream& is, typename InputStream::Ch expect) { - if (RAPIDJSON_LIKELY(is.Peek() == expect)) { - is.Take(); - return true; - } - else - return false; - } - - // Helper function to parse four hexidecimal digits in \uXXXX in ParseString(). - template<typename InputStream> - unsigned ParseHex4(InputStream& is, size_t escapeOffset) { - unsigned codepoint = 0; - for (int i = 0; i < 4; i++) { - Ch c = is.Peek(); - codepoint <<= 4; - codepoint += static_cast<unsigned>(c); - if (c >= '0' && c <= '9') - codepoint -= '0'; - else if (c >= 'A' && c <= 'F') - codepoint -= 'A' - 10; - else if (c >= 'a' && c <= 'f') - codepoint -= 'a' - 10; - else { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorStringUnicodeEscapeInvalidHex, escapeOffset); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(0); - } - is.Take(); - } - return codepoint; - } - - template <typename CharType> - class StackStream { - public: - typedef CharType Ch; - - StackStream(internal::Stack<StackAllocator>& stack) : stack_(stack), length_(0) {} - RAPIDJSON_FORCEINLINE void Put(Ch c) { - *stack_.template Push<Ch>() = c; - ++length_; - } - - RAPIDJSON_FORCEINLINE void* Push(SizeType count) { - length_ += count; - return stack_.template Push<Ch>(count); - } - - size_t Length() const { return length_; } - - Ch* Pop() { - return stack_.template Pop<Ch>(length_); - } - - private: - StackStream(const StackStream&); - StackStream& operator=(const StackStream&); - - internal::Stack<StackAllocator>& stack_; - SizeType length_; - }; - - // Parse string and generate String event. Different code paths for kParseInsituFlag. - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseString(InputStream& is, Handler& handler, bool isKey = false) { - internal::StreamLocalCopy<InputStream> copy(is); - InputStream& s(copy.s); - - RAPIDJSON_ASSERT(s.Peek() == '\"'); - s.Take(); // Skip '\"' - - bool success = false; - if (parseFlags & kParseInsituFlag) { - typename InputStream::Ch *head = s.PutBegin(); - ParseStringToStream<parseFlags, SourceEncoding, SourceEncoding>(s, s); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - size_t length = s.PutEnd(head) - 1; - RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); - const typename TargetEncoding::Ch* const str = reinterpret_cast<typename TargetEncoding::Ch*>(head); - success = (isKey ? handler.Key(str, SizeType(length), false) : handler.String(str, SizeType(length), false)); - } - else { - StackStream<typename TargetEncoding::Ch> stackStream(stack_); - ParseStringToStream<parseFlags, SourceEncoding, TargetEncoding>(s, stackStream); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - SizeType length = static_cast<SizeType>(stackStream.Length()) - 1; - const typename TargetEncoding::Ch* const str = stackStream.Pop(); - success = (isKey ? handler.Key(str, length, true) : handler.String(str, length, true)); - } - if (RAPIDJSON_UNLIKELY(!success)) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, s.Tell()); - } - - // Parse string to an output is - // This function handles the prefix/suffix double quotes, escaping, and optional encoding validation. - template<unsigned parseFlags, typename SEncoding, typename TEncoding, typename InputStream, typename OutputStream> - RAPIDJSON_FORCEINLINE void ParseStringToStream(InputStream& is, OutputStream& os) { -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 - static const char escape[256] = { - Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'/', - Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, - 0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0, - 0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 - }; -#undef Z16 -//!@endcond - - for (;;) { - // Scan and copy string before "\\\"" or < 0x20. This is an optional optimzation. - if (!(parseFlags & kParseValidateEncodingFlag)) - ScanCopyUnescapedString(is, os); - - Ch c = is.Peek(); - if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape - size_t escapeOffset = is.Tell(); // For invalid escaping, report the inital '\\' as error offset - is.Take(); - Ch e = is.Peek(); - if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast<unsigned char>(e)])) { - is.Take(); - os.Put(static_cast<typename TEncoding::Ch>(escape[static_cast<unsigned char>(e)])); - } - else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode - is.Take(); - unsigned codepoint = ParseHex4(is, escapeOffset); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDBFF)) { - // Handle UTF-16 surrogate pair - if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u'))) - RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); - unsigned codepoint2 = ParseHex4(is, escapeOffset); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; - if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF)) - RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); - codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000; - } - TEncoding::Encode(os, codepoint); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, escapeOffset); - } - else if (RAPIDJSON_UNLIKELY(c == '"')) { // Closing double quote - is.Take(); - os.Put('\0'); // null-terminate the string - return; - } - else if (RAPIDJSON_UNLIKELY(static_cast<unsigned>(c) < 0x20)) { // RFC 4627: unescaped = %x20-21 / %x23-5B / %x5D-10FFFF - if (c == '\0') - RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell()); - else - RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, is.Tell()); - } - else { - size_t offset = is.Tell(); - if (RAPIDJSON_UNLIKELY((parseFlags & kParseValidateEncodingFlag ? - !Transcoder<SEncoding, TEncoding>::Validate(is, os) : - !Transcoder<SEncoding, TEncoding>::Transcode(is, os)))) - RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, offset); - } - } - } - - template<typename InputStream, typename OutputStream> - static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InputStream&, OutputStream&) { - // Do nothing for generic version - } - -#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) - // StringStream -> StackStream<char> - static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream<char>& os) { - const char* p = is.src_; - - // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - while (p != nextAligned) - if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) { - is.src_ = p; - return; - } - else - os.Put(*p++); - - // The rest of string using SIMD - static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; - static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; - const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0])); - const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0])); - const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0])); - - for (;; p += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - const __m128i t1 = _mm_cmpeq_epi8(s, dq); - const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 - const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); - unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x)); - if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped - SizeType length; - #ifdef _MSC_VER // Find the index of first escaped - unsigned long offset; - _BitScanForward(&offset, r); - length = offset; - #else - length = static_cast<SizeType>(__builtin_ffs(r) - 1); - #endif - char* q = reinterpret_cast<char*>(os.Push(length)); - for (size_t i = 0; i < length; i++) - q[i] = p[i]; - - p += length; - break; - } - _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s); - } - - is.src_ = p; - } - - // InsituStringStream -> InsituStringStream - static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { - RAPIDJSON_ASSERT(&is == &os); - (void)os; - - if (is.src_ == is.dst_) { - SkipUnescapedString(is); - return; - } - - char* p = is.src_; - char *q = is.dst_; - - // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - while (p != nextAligned) - if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) { - is.src_ = p; - is.dst_ = q; - return; - } - else - *q++ = *p++; - - // The rest of string using SIMD - static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; - static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; - const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0])); - const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0])); - const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0])); - - for (;; p += 16, q += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - const __m128i t1 = _mm_cmpeq_epi8(s, dq); - const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 - const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); - unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x)); - if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped - size_t length; -#ifdef _MSC_VER // Find the index of first escaped - unsigned long offset; - _BitScanForward(&offset, r); - length = offset; -#else - length = static_cast<size_t>(__builtin_ffs(r) - 1); -#endif - for (const char* pend = p + length; p != pend; ) - *q++ = *p++; - break; - } - _mm_storeu_si128(reinterpret_cast<__m128i *>(q), s); - } - - is.src_ = p; - is.dst_ = q; - } - - // When read/write pointers are the same for insitu stream, just skip unescaped characters - static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { - RAPIDJSON_ASSERT(is.src_ == is.dst_); - char* p = is.src_; - - // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - for (; p != nextAligned; p++) - if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) { - is.src_ = is.dst_ = p; - return; - } - - // The rest of string using SIMD - static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; - static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; - const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0])); - const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0])); - const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0])); - - for (;; p += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - const __m128i t1 = _mm_cmpeq_epi8(s, dq); - const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 - const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); - unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x)); - if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped - size_t length; -#ifdef _MSC_VER // Find the index of first escaped - unsigned long offset; - _BitScanForward(&offset, r); - length = offset; -#else - length = static_cast<size_t>(__builtin_ffs(r) - 1); -#endif - p += length; - break; - } - } - - is.src_ = is.dst_ = p; - } -#endif - - template<typename InputStream, bool backup, bool pushOnTake> - class NumberStream; - - template<typename InputStream> - class NumberStream<InputStream, false, false> { - public: - typedef typename InputStream::Ch Ch; - - NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; } - ~NumberStream() {} - - RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); } - RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); } - RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); } - RAPIDJSON_FORCEINLINE void Push(char) {} - - size_t Tell() { return is.Tell(); } - size_t Length() { return 0; } - const char* Pop() { return 0; } - - protected: - NumberStream& operator=(const NumberStream&); - - InputStream& is; - }; - - template<typename InputStream> - class NumberStream<InputStream, true, false> : public NumberStream<InputStream, false, false> { - typedef NumberStream<InputStream, false, false> Base; - public: - NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {} - ~NumberStream() {} - - RAPIDJSON_FORCEINLINE Ch TakePush() { - stackStream.Put(static_cast<char>(Base::is.Peek())); - return Base::is.Take(); - } - - RAPIDJSON_FORCEINLINE void Push(char c) { - stackStream.Put(c); - } - - size_t Length() { return stackStream.Length(); } - - const char* Pop() { - stackStream.Put('\0'); - return stackStream.Pop(); - } - - private: - StackStream<char> stackStream; - }; - - template<typename InputStream> - class NumberStream<InputStream, true, true> : public NumberStream<InputStream, true, false> { - typedef NumberStream<InputStream, true, false> Base; - public: - NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {} - ~NumberStream() {} - - RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); } - }; - - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseNumber(InputStream& is, Handler& handler) { - internal::StreamLocalCopy<InputStream> copy(is); - NumberStream<InputStream, - ((parseFlags & kParseNumbersAsStringsFlag) != 0) ? - ((parseFlags & kParseInsituFlag) == 0) : - ((parseFlags & kParseFullPrecisionFlag) != 0), - (parseFlags & kParseNumbersAsStringsFlag) != 0 && - (parseFlags & kParseInsituFlag) == 0> s(*this, copy.s); - - size_t startOffset = s.Tell(); - double d = 0.0; - bool useNanOrInf = false; - - // Parse minus - bool minus = Consume(s, '-'); - - // Parse int: zero / ( digit1-9 *DIGIT ) - unsigned i = 0; - uint64_t i64 = 0; - bool use64bit = false; - int significandDigit = 0; - if (RAPIDJSON_UNLIKELY(s.Peek() == '0')) { - i = 0; - s.TakePush(); - } - else if (RAPIDJSON_LIKELY(s.Peek() >= '1' && s.Peek() <= '9')) { - i = static_cast<unsigned>(s.TakePush() - '0'); - - if (minus) - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(i >= 214748364)) { // 2^31 = 2147483648 - if (RAPIDJSON_LIKELY(i != 214748364 || s.Peek() > '8')) { - i64 = i; - use64bit = true; - break; - } - } - i = i * 10 + static_cast<unsigned>(s.TakePush() - '0'); - significandDigit++; - } - else - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(i >= 429496729)) { // 2^32 - 1 = 4294967295 - if (RAPIDJSON_LIKELY(i != 429496729 || s.Peek() > '5')) { - i64 = i; - use64bit = true; - break; - } - } - i = i * 10 + static_cast<unsigned>(s.TakePush() - '0'); - significandDigit++; - } - } - // Parse NaN or Infinity here - else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) { - useNanOrInf = true; - if (RAPIDJSON_LIKELY(Consume(s, 'N') && Consume(s, 'a') && Consume(s, 'N'))) { - d = std::numeric_limits<double>::quiet_NaN(); - } - else if (RAPIDJSON_LIKELY(Consume(s, 'I') && Consume(s, 'n') && Consume(s, 'f'))) { - d = (minus ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity()); - if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') - && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); - - // Parse 64bit int - bool useDouble = false; - if (use64bit) { - if (minus) - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC))) // 2^63 = 9223372036854775808 - if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC) || s.Peek() > '8')) { - d = static_cast<double>(i64); - useDouble = true; - break; - } - i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0'); - significandDigit++; - } - else - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x19999999, 0x99999999))) // 2^64 - 1 = 18446744073709551615 - if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || s.Peek() > '5')) { - d = static_cast<double>(i64); - useDouble = true; - break; - } - i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0'); - significandDigit++; - } - } - - // Force double for big integer - if (useDouble) { - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(d >= 1.7976931348623157e307)) // DBL_MAX / 10.0 - RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); - d = d * 10 + (s.TakePush() - '0'); - } - } - - // Parse frac = decimal-point 1*DIGIT - int expFrac = 0; - size_t decimalPosition; - if (Consume(s, '.')) { - decimalPosition = s.Length(); - - if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9'))) - RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissFraction, s.Tell()); - - if (!useDouble) { -#if RAPIDJSON_64BIT - // Use i64 to store significand in 64-bit architecture - if (!use64bit) - i64 = i; - - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (i64 > RAPIDJSON_UINT64_C2(0x1FFFFF, 0xFFFFFFFF)) // 2^53 - 1 for fast path - break; - else { - i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0'); - --expFrac; - if (i64 != 0) - significandDigit++; - } - } - - d = static_cast<double>(i64); -#else - // Use double to store significand in 32-bit architecture - d = static_cast<double>(use64bit ? i64 : i); -#endif - useDouble = true; - } - - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (significandDigit < 17) { - d = d * 10.0 + (s.TakePush() - '0'); - --expFrac; - if (RAPIDJSON_LIKELY(d > 0.0)) - significandDigit++; - } - else - s.TakePush(); - } - } - else - decimalPosition = s.Length(); // decimal position at the end of integer. - - // Parse exp = e [ minus / plus ] 1*DIGIT - int exp = 0; - if (Consume(s, 'e') || Consume(s, 'E')) { - if (!useDouble) { - d = static_cast<double>(use64bit ? i64 : i); - useDouble = true; - } - - bool expMinus = false; - if (Consume(s, '+')) - ; - else if (Consume(s, '-')) - expMinus = true; - - if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - exp = static_cast<int>(s.Take() - '0'); - if (expMinus) { - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - exp = exp * 10 + static_cast<int>(s.Take() - '0'); - if (exp >= 214748364) { // Issue #313: prevent overflow exponent - while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent - s.Take(); - } - } - } - else { // positive exp - int maxExp = 308 - expFrac; - while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - exp = exp * 10 + static_cast<int>(s.Take() - '0'); - if (RAPIDJSON_UNLIKELY(exp > maxExp)) - RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); - } - } - } - else - RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissExponent, s.Tell()); - - if (expMinus) - exp = -exp; - } - - // Finish parsing, call event according to the type of number. - bool cont = true; - - if (parseFlags & kParseNumbersAsStringsFlag) { - if (parseFlags & kParseInsituFlag) { - s.Pop(); // Pop stack no matter if it will be used or not. - typename InputStream::Ch* head = is.PutBegin(); - const size_t length = s.Tell() - startOffset; - RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); - // unable to insert the \0 character here, it will erase the comma after this number - const typename TargetEncoding::Ch* const str = reinterpret_cast<typename TargetEncoding::Ch*>(head); - cont = handler.RawNumber(str, SizeType(length), false); - } - else { - SizeType numCharsToCopy = static_cast<SizeType>(s.Length()); - StringStream srcStream(s.Pop()); - StackStream<typename TargetEncoding::Ch> dstStream(stack_); - while (numCharsToCopy--) { - Transcoder<UTF8<>, TargetEncoding>::Transcode(srcStream, dstStream); - } - dstStream.Put('\0'); - const typename TargetEncoding::Ch* str = dstStream.Pop(); - const SizeType length = static_cast<SizeType>(dstStream.Length()) - 1; - cont = handler.RawNumber(str, SizeType(length), true); - } - } - else { - size_t length = s.Length(); - const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not. - - if (useDouble) { - int p = exp + expFrac; - if (parseFlags & kParseFullPrecisionFlag) - d = internal::StrtodFullPrecision(d, p, decimal, length, decimalPosition, exp); - else - d = internal::StrtodNormalPrecision(d, p); - - cont = handler.Double(minus ? -d : d); - } - else if (useNanOrInf) { - cont = handler.Double(d); - } - else { - if (use64bit) { - if (minus) - cont = handler.Int64(static_cast<int64_t>(~i64 + 1)); - else - cont = handler.Uint64(i64); - } - else { - if (minus) - cont = handler.Int(static_cast<int32_t>(~i + 1)); - else - cont = handler.Uint(i); - } - } - } - if (RAPIDJSON_UNLIKELY(!cont)) - RAPIDJSON_PARSE_ERROR(kParseErrorTermination, startOffset); - } - - // Parse any JSON value - template<unsigned parseFlags, typename InputStream, typename Handler> - void ParseValue(InputStream& is, Handler& handler) { - switch (is.Peek()) { - case 'n': ParseNull <parseFlags>(is, handler); break; - case 't': ParseTrue <parseFlags>(is, handler); break; - case 'f': ParseFalse <parseFlags>(is, handler); break; - case '"': ParseString<parseFlags>(is, handler); break; - case '{': ParseObject<parseFlags>(is, handler); break; - case '[': ParseArray <parseFlags>(is, handler); break; - default : - ParseNumber<parseFlags>(is, handler); - break; - - } - } - - // Iterative Parsing - - // States - enum IterativeParsingState { - IterativeParsingStartState = 0, - IterativeParsingFinishState, - IterativeParsingErrorState, - - // Object states - IterativeParsingObjectInitialState, - IterativeParsingMemberKeyState, - IterativeParsingKeyValueDelimiterState, - IterativeParsingMemberValueState, - IterativeParsingMemberDelimiterState, - IterativeParsingObjectFinishState, - - // Array states - IterativeParsingArrayInitialState, - IterativeParsingElementState, - IterativeParsingElementDelimiterState, - IterativeParsingArrayFinishState, - - // Single value state - IterativeParsingValueState - }; - - enum { cIterativeParsingStateCount = IterativeParsingValueState + 1 }; - - // Tokens - enum Token { - LeftBracketToken = 0, - RightBracketToken, - - LeftCurlyBracketToken, - RightCurlyBracketToken, - - CommaToken, - ColonToken, - - StringToken, - FalseToken, - TrueToken, - NullToken, - NumberToken, - - kTokenCount - }; - - RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) { - -//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN -#define N NumberToken -#define N16 N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N - // Maps from ASCII to Token - static const unsigned char tokenMap[256] = { - N16, // 00~0F - N16, // 10~1F - N, N, StringToken, N, N, N, N, N, N, N, N, N, CommaToken, N, N, N, // 20~2F - N, N, N, N, N, N, N, N, N, N, ColonToken, N, N, N, N, N, // 30~3F - N16, // 40~4F - N, N, N, N, N, N, N, N, N, N, N, LeftBracketToken, N, RightBracketToken, N, N, // 50~5F - N, N, N, N, N, N, FalseToken, N, N, N, N, N, N, N, NullToken, N, // 60~6F - N, N, N, N, TrueToken, N, N, N, N, N, N, LeftCurlyBracketToken, N, RightCurlyBracketToken, N, N, // 70~7F - N16, N16, N16, N16, N16, N16, N16, N16 // 80~FF - }; -#undef N -#undef N16 -//!@endcond - - if (sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256) - return static_cast<Token>(tokenMap[static_cast<unsigned char>(c)]); - else - return NumberToken; - } - - RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) { - // current state x one lookahead token -> new state - static const char G[cIterativeParsingStateCount][kTokenCount] = { - // Start - { - IterativeParsingArrayInitialState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingObjectInitialState, // Left curly bracket - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingValueState, // String - IterativeParsingValueState, // False - IterativeParsingValueState, // True - IterativeParsingValueState, // Null - IterativeParsingValueState // Number - }, - // Finish(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, - // Error(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, - // ObjectInitial - { - IterativeParsingErrorState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingObjectFinishState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingMemberKeyState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, - // MemberKey - { - IterativeParsingErrorState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingKeyValueDelimiterState, // Colon - IterativeParsingErrorState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, - // KeyValueDelimiter - { - IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) - IterativeParsingErrorState, // Right bracket - IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingMemberValueState, // String - IterativeParsingMemberValueState, // False - IterativeParsingMemberValueState, // True - IterativeParsingMemberValueState, // Null - IterativeParsingMemberValueState // Number - }, - // MemberValue - { - IterativeParsingErrorState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingObjectFinishState, // Right curly bracket - IterativeParsingMemberDelimiterState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingErrorState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, - // MemberDelimiter - { - IterativeParsingErrorState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingObjectFinishState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingMemberKeyState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, - // ObjectFinish(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, - // ArrayInitial - { - IterativeParsingArrayInitialState, // Left bracket(push Element state) - IterativeParsingArrayFinishState, // Right bracket - IterativeParsingObjectInitialState, // Left curly bracket(push Element state) - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingElementState, // String - IterativeParsingElementState, // False - IterativeParsingElementState, // True - IterativeParsingElementState, // Null - IterativeParsingElementState // Number - }, - // Element - { - IterativeParsingErrorState, // Left bracket - IterativeParsingArrayFinishState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingErrorState, // Right curly bracket - IterativeParsingElementDelimiterState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingErrorState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, - // ElementDelimiter - { - IterativeParsingArrayInitialState, // Left bracket(push Element state) - IterativeParsingArrayFinishState, // Right bracket - IterativeParsingObjectInitialState, // Left curly bracket(push Element state) - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingElementState, // String - IterativeParsingElementState, // False - IterativeParsingElementState, // True - IterativeParsingElementState, // Null - IterativeParsingElementState // Number - }, - // ArrayFinish(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, - // Single Value (sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - } - }; // End of G - - return static_cast<IterativeParsingState>(G[state][token]); - } - - // Make an advance in the token stream and state based on the candidate destination state which was returned by Transit(). - // May return a new state on state pop. - template <unsigned parseFlags, typename InputStream, typename Handler> - RAPIDJSON_FORCEINLINE IterativeParsingState Transit(IterativeParsingState src, Token token, IterativeParsingState dst, InputStream& is, Handler& handler) { - (void)token; - - switch (dst) { - case IterativeParsingErrorState: - return dst; - - case IterativeParsingObjectInitialState: - case IterativeParsingArrayInitialState: - { - // Push the state(Element or MemeberValue) if we are nested in another array or value of member. - // In this way we can get the correct state on ObjectFinish or ArrayFinish by frame pop. - IterativeParsingState n = src; - if (src == IterativeParsingArrayInitialState || src == IterativeParsingElementDelimiterState) - n = IterativeParsingElementState; - else if (src == IterativeParsingKeyValueDelimiterState) - n = IterativeParsingMemberValueState; - // Push current state. - *stack_.template Push<SizeType>(1) = n; - // Initialize and push the member/element count. - *stack_.template Push<SizeType>(1) = 0; - // Call handler - bool hr = (dst == IterativeParsingObjectInitialState) ? handler.StartObject() : handler.StartArray(); - // On handler short circuits the parsing. - if (!hr) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); - return IterativeParsingErrorState; - } - else { - is.Take(); - return dst; - } - } - - case IterativeParsingMemberKeyState: - ParseString<parseFlags>(is, handler, true); - if (HasParseError()) - return IterativeParsingErrorState; - else - return dst; - - case IterativeParsingKeyValueDelimiterState: - RAPIDJSON_ASSERT(token == ColonToken); - is.Take(); - return dst; - - case IterativeParsingMemberValueState: - // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. - ParseValue<parseFlags>(is, handler); - if (HasParseError()) { - return IterativeParsingErrorState; - } - return dst; - - case IterativeParsingElementState: - // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. - ParseValue<parseFlags>(is, handler); - if (HasParseError()) { - return IterativeParsingErrorState; - } - return dst; - - case IterativeParsingMemberDelimiterState: - case IterativeParsingElementDelimiterState: - is.Take(); - // Update member/element count. - *stack_.template Top<SizeType>() = *stack_.template Top<SizeType>() + 1; - return dst; - - case IterativeParsingObjectFinishState: - { - // Transit from delimiter is only allowed when trailing commas are enabled - if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingMemberDelimiterState) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorObjectMissName, is.Tell()); - return IterativeParsingErrorState; - } - // Get member count. - SizeType c = *stack_.template Pop<SizeType>(1); - // If the object is not empty, count the last member. - if (src == IterativeParsingMemberValueState) - ++c; - // Restore the state. - IterativeParsingState n = static_cast<IterativeParsingState>(*stack_.template Pop<SizeType>(1)); - // Transit to Finish state if this is the topmost scope. - if (n == IterativeParsingStartState) - n = IterativeParsingFinishState; - // Call handler - bool hr = handler.EndObject(c); - // On handler short circuits the parsing. - if (!hr) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); - return IterativeParsingErrorState; - } - else { - is.Take(); - return n; - } - } - - case IterativeParsingArrayFinishState: - { - // Transit from delimiter is only allowed when trailing commas are enabled - if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingElementDelimiterState) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorValueInvalid, is.Tell()); - return IterativeParsingErrorState; - } - // Get element count. - SizeType c = *stack_.template Pop<SizeType>(1); - // If the array is not empty, count the last element. - if (src == IterativeParsingElementState) - ++c; - // Restore the state. - IterativeParsingState n = static_cast<IterativeParsingState>(*stack_.template Pop<SizeType>(1)); - // Transit to Finish state if this is the topmost scope. - if (n == IterativeParsingStartState) - n = IterativeParsingFinishState; - // Call handler - bool hr = handler.EndArray(c); - // On handler short circuits the parsing. - if (!hr) { - RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); - return IterativeParsingErrorState; - } - else { - is.Take(); - return n; - } - } - - default: - // This branch is for IterativeParsingValueState actually. - // Use `default:` rather than - // `case IterativeParsingValueState:` is for code coverage. - - // The IterativeParsingStartState is not enumerated in this switch-case. - // It is impossible for that case. And it can be caught by following assertion. - - // The IterativeParsingFinishState is not enumerated in this switch-case either. - // It is a "derivative" state which cannot triggered from Predict() directly. - // Therefore it cannot happen here. And it can be caught by following assertion. - RAPIDJSON_ASSERT(dst == IterativeParsingValueState); - - // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. - ParseValue<parseFlags>(is, handler); - if (HasParseError()) { - return IterativeParsingErrorState; - } - return IterativeParsingFinishState; - } - } - - template <typename InputStream> - void HandleError(IterativeParsingState src, InputStream& is) { - if (HasParseError()) { - // Error flag has been set. - return; - } - - switch (src) { - case IterativeParsingStartState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentEmpty, is.Tell()); return; - case IterativeParsingFinishState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentRootNotSingular, is.Tell()); return; - case IterativeParsingObjectInitialState: - case IterativeParsingMemberDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); return; - case IterativeParsingMemberKeyState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); return; - case IterativeParsingMemberValueState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); return; - case IterativeParsingKeyValueDelimiterState: - case IterativeParsingArrayInitialState: - case IterativeParsingElementDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); return; - default: RAPIDJSON_ASSERT(src == IterativeParsingElementState); RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); return; - } - } - - template <unsigned parseFlags, typename InputStream, typename Handler> - ParseResult IterativeParse(InputStream& is, Handler& handler) { - parseResult_.Clear(); - ClearStackOnExit scope(*this); - IterativeParsingState state = IterativeParsingStartState; - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - while (is.Peek() != '\0') { - Token t = Tokenize(is.Peek()); - IterativeParsingState n = Predict(state, t); - IterativeParsingState d = Transit<parseFlags>(state, t, n, is, handler); - - if (d == IterativeParsingErrorState) { - HandleError(state, is); - break; - } - - state = d; - - // Do not further consume streams if a root JSON has been parsed. - if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState) - break; - - SkipWhitespaceAndComments<parseFlags>(is); - RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); - } - - // Handle the end of file. - if (state != IterativeParsingFinishState) - HandleError(state, is); - - return parseResult_; - } - - static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string. - internal::Stack<StackAllocator> stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing. - ParseResult parseResult_; -}; // class GenericReader - -//! Reader with UTF8 encoding and default allocator. -typedef GenericReader<UTF8<>, UTF8<> > Reader; - -RAPIDJSON_NAMESPACE_END - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - - -#ifdef __GNUC__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_READER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/schema.h b/ext/librethinkdbxx/src/rapidjson/schema.h deleted file mode 100644 index b182aa27..00000000 --- a/ext/librethinkdbxx/src/rapidjson/schema.h +++ /dev/null @@ -1,2006 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available-> -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved-> -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License-> You may obtain a copy of the License at -// -// http://opensource->org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied-> See the License for the -// specific language governing permissions and limitations under the License-> - -#ifndef RAPIDJSON_SCHEMA_H_ -#define RAPIDJSON_SCHEMA_H_ - -#include "document.h" -#include "pointer.h" -#include <cmath> // abs, floor - -#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX) -#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1 -#else -#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0 -#endif - -#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && !defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) -#define RAPIDJSON_SCHEMA_USE_STDREGEX 1 -#else -#define RAPIDJSON_SCHEMA_USE_STDREGEX 0 -#endif - -#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX -#include "internal/regex.h" -#elif RAPIDJSON_SCHEMA_USE_STDREGEX -#include <regex> -#endif - -#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX -#define RAPIDJSON_SCHEMA_HAS_REGEX 1 -#else -#define RAPIDJSON_SCHEMA_HAS_REGEX 0 -#endif - -#ifndef RAPIDJSON_SCHEMA_VERBOSE -#define RAPIDJSON_SCHEMA_VERBOSE 0 -#endif - -#if RAPIDJSON_SCHEMA_VERBOSE -#include "stringbuffer.h" -#endif - -RAPIDJSON_DIAG_PUSH - -#if defined(__GNUC__) -RAPIDJSON_DIAG_OFF(effc++) -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_OFF(weak-vtables) -RAPIDJSON_DIAG_OFF(exit-time-destructors) -RAPIDJSON_DIAG_OFF(c++98-compat-pedantic) -RAPIDJSON_DIAG_OFF(variadic-macros) -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// Verbose Utilities - -#if RAPIDJSON_SCHEMA_VERBOSE - -namespace internal { - -inline void PrintInvalidKeyword(const char* keyword) { - printf("Fail keyword: %s\n", keyword); -} - -inline void PrintInvalidKeyword(const wchar_t* keyword) { - wprintf(L"Fail keyword: %ls\n", keyword); -} - -inline void PrintInvalidDocument(const char* document) { - printf("Fail document: %s\n\n", document); -} - -inline void PrintInvalidDocument(const wchar_t* document) { - wprintf(L"Fail document: %ls\n\n", document); -} - -inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) { - printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d); -} - -inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) { - wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d); -} - -} // namespace internal - -#endif // RAPIDJSON_SCHEMA_VERBOSE - -/////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_INVALID_KEYWORD_RETURN - -#if RAPIDJSON_SCHEMA_VERBOSE -#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword) -#else -#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) -#endif - -#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\ -RAPIDJSON_MULTILINEMACRO_BEGIN\ - context.invalidKeyword = keyword.GetString();\ - RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\ - return false;\ -RAPIDJSON_MULTILINEMACRO_END - -/////////////////////////////////////////////////////////////////////////////// -// Forward declarations - -template <typename ValueType, typename Allocator> -class GenericSchemaDocument; - -namespace internal { - -template <typename SchemaDocumentType> -class Schema; - -/////////////////////////////////////////////////////////////////////////////// -// ISchemaValidator - -class ISchemaValidator { -public: - virtual ~ISchemaValidator() {} - virtual bool IsValid() const = 0; -}; - -/////////////////////////////////////////////////////////////////////////////// -// ISchemaStateFactory - -template <typename SchemaType> -class ISchemaStateFactory { -public: - virtual ~ISchemaStateFactory() {} - virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0; - virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0; - virtual void* CreateHasher() = 0; - virtual uint64_t GetHashCode(void* hasher) = 0; - virtual void DestroryHasher(void* hasher) = 0; - virtual void* MallocState(size_t size) = 0; - virtual void FreeState(void* p) = 0; -}; - -/////////////////////////////////////////////////////////////////////////////// -// Hasher - -// For comparison of compound value -template<typename Encoding, typename Allocator> -class Hasher { -public: - typedef typename Encoding::Ch Ch; - - Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {} - - bool Null() { return WriteType(kNullType); } - bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); } - bool Int(int i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); } - bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); } - bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); } - bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); } - bool Double(double d) { - Number n; - if (d < 0) n.u.i = static_cast<int64_t>(d); - else n.u.u = static_cast<uint64_t>(d); - n.d = d; - return WriteNumber(n); - } - - bool RawNumber(const Ch* str, SizeType len, bool) { - WriteBuffer(kNumberType, str, len * sizeof(Ch)); - return true; - } - - bool String(const Ch* str, SizeType len, bool) { - WriteBuffer(kStringType, str, len * sizeof(Ch)); - return true; - } - - bool StartObject() { return true; } - bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); } - bool EndObject(SizeType memberCount) { - uint64_t h = Hash(0, kObjectType); - uint64_t* kv = stack_.template Pop<uint64_t>(memberCount * 2); - for (SizeType i = 0; i < memberCount; i++) - h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive - *stack_.template Push<uint64_t>() = h; - return true; - } - - bool StartArray() { return true; } - bool EndArray(SizeType elementCount) { - uint64_t h = Hash(0, kArrayType); - uint64_t* e = stack_.template Pop<uint64_t>(elementCount); - for (SizeType i = 0; i < elementCount; i++) - h = Hash(h, e[i]); // Use hash to achieve element order sensitive - *stack_.template Push<uint64_t>() = h; - return true; - } - - bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); } - - uint64_t GetHashCode() const { - RAPIDJSON_ASSERT(IsValid()); - return *stack_.template Top<uint64_t>(); - } - -private: - static const size_t kDefaultSize = 256; - struct Number { - union U { - uint64_t u; - int64_t i; - }u; - double d; - }; - - bool WriteType(Type type) { return WriteBuffer(type, 0, 0); } - - bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); } - - bool WriteBuffer(Type type, const void* data, size_t len) { - // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/ - uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type); - const unsigned char* d = static_cast<const unsigned char*>(data); - for (size_t i = 0; i < len; i++) - h = Hash(h, d[i]); - *stack_.template Push<uint64_t>() = h; - return true; - } - - static uint64_t Hash(uint64_t h, uint64_t d) { - static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3); - h ^= d; - h *= kPrime; - return h; - } - - Stack<Allocator> stack_; -}; - -/////////////////////////////////////////////////////////////////////////////// -// SchemaValidationContext - -template <typename SchemaDocumentType> -struct SchemaValidationContext { - typedef Schema<SchemaDocumentType> SchemaType; - typedef ISchemaStateFactory<SchemaType> SchemaValidatorFactoryType; - typedef typename SchemaType::ValueType ValueType; - typedef typename ValueType::Ch Ch; - - enum PatternValidatorType { - kPatternValidatorOnly, - kPatternValidatorWithProperty, - kPatternValidatorWithAdditionalProperty - }; - - SchemaValidationContext(SchemaValidatorFactoryType& f, const SchemaType* s) : - factory(f), - schema(s), - valueSchema(), - invalidKeyword(), - hasher(), - arrayElementHashCodes(), - validators(), - validatorCount(), - patternPropertiesValidators(), - patternPropertiesValidatorCount(), - patternPropertiesSchemas(), - patternPropertiesSchemaCount(), - valuePatternValidatorType(kPatternValidatorOnly), - propertyExist(), - inArray(false), - valueUniqueness(false), - arrayUniqueness(false) - { - } - - ~SchemaValidationContext() { - if (hasher) - factory.DestroryHasher(hasher); - if (validators) { - for (SizeType i = 0; i < validatorCount; i++) - factory.DestroySchemaValidator(validators[i]); - factory.FreeState(validators); - } - if (patternPropertiesValidators) { - for (SizeType i = 0; i < patternPropertiesValidatorCount; i++) - factory.DestroySchemaValidator(patternPropertiesValidators[i]); - factory.FreeState(patternPropertiesValidators); - } - if (patternPropertiesSchemas) - factory.FreeState(patternPropertiesSchemas); - if (propertyExist) - factory.FreeState(propertyExist); - } - - SchemaValidatorFactoryType& factory; - const SchemaType* schema; - const SchemaType* valueSchema; - const Ch* invalidKeyword; - void* hasher; // Only validator access - void* arrayElementHashCodes; // Only validator access this - ISchemaValidator** validators; - SizeType validatorCount; - ISchemaValidator** patternPropertiesValidators; - SizeType patternPropertiesValidatorCount; - const SchemaType** patternPropertiesSchemas; - SizeType patternPropertiesSchemaCount; - PatternValidatorType valuePatternValidatorType; - PatternValidatorType objectPatternValidatorType; - SizeType arrayElementIndex; - bool* propertyExist; - bool inArray; - bool valueUniqueness; - bool arrayUniqueness; -}; - -/////////////////////////////////////////////////////////////////////////////// -// Schema - -template <typename SchemaDocumentType> -class Schema { -public: - typedef typename SchemaDocumentType::ValueType ValueType; - typedef typename SchemaDocumentType::AllocatorType AllocatorType; - typedef typename SchemaDocumentType::PointerType PointerType; - typedef typename ValueType::EncodingType EncodingType; - typedef typename EncodingType::Ch Ch; - typedef SchemaValidationContext<SchemaDocumentType> Context; - typedef Schema<SchemaDocumentType> SchemaType; - typedef GenericValue<EncodingType, AllocatorType> SValue; - friend class GenericSchemaDocument<ValueType, AllocatorType>; - - Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) : - allocator_(allocator), - enum_(), - enumCount_(), - not_(), - type_((1 << kTotalSchemaType) - 1), // typeless - validatorCount_(), - properties_(), - additionalPropertiesSchema_(), - patternProperties_(), - patternPropertyCount_(), - propertyCount_(), - minProperties_(), - maxProperties_(SizeType(~0)), - additionalProperties_(true), - hasDependencies_(), - hasRequired_(), - hasSchemaDependencies_(), - additionalItemsSchema_(), - itemsList_(), - itemsTuple_(), - itemsTupleCount_(), - minItems_(), - maxItems_(SizeType(~0)), - additionalItems_(true), - uniqueItems_(false), - pattern_(), - minLength_(0), - maxLength_(~SizeType(0)), - exclusiveMinimum_(false), - exclusiveMaximum_(false) - { - typedef typename SchemaDocumentType::ValueType ValueType; - typedef typename ValueType::ConstValueIterator ConstValueIterator; - typedef typename ValueType::ConstMemberIterator ConstMemberIterator; - - if (!value.IsObject()) - return; - - if (const ValueType* v = GetMember(value, GetTypeString())) { - type_ = 0; - if (v->IsString()) - AddType(*v); - else if (v->IsArray()) - for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) - AddType(*itr); - } - - if (const ValueType* v = GetMember(value, GetEnumString())) - if (v->IsArray() && v->Size() > 0) { - enum_ = static_cast<uint64_t*>(allocator_->Malloc(sizeof(uint64_t) * v->Size())); - for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) { - typedef Hasher<EncodingType, MemoryPoolAllocator<> > EnumHasherType; - char buffer[256 + 24]; - MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer)); - EnumHasherType h(&hasherAllocator, 256); - itr->Accept(h); - enum_[enumCount_++] = h.GetHashCode(); - } - } - - if (schemaDocument) { - AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document); - AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document); - AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document); - } - - if (const ValueType* v = GetMember(value, GetNotString())) { - schemaDocument->CreateSchema(¬_, p.Append(GetNotString(), allocator_), *v, document); - notValidatorIndex_ = validatorCount_; - validatorCount_++; - } - - // Object - - const ValueType* properties = GetMember(value, GetPropertiesString()); - const ValueType* required = GetMember(value, GetRequiredString()); - const ValueType* dependencies = GetMember(value, GetDependenciesString()); - { - // Gather properties from properties/required/dependencies - SValue allProperties(kArrayType); - - if (properties && properties->IsObject()) - for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) - AddUniqueElement(allProperties, itr->name); - - if (required && required->IsArray()) - for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) - if (itr->IsString()) - AddUniqueElement(allProperties, *itr); - - if (dependencies && dependencies->IsObject()) - for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { - AddUniqueElement(allProperties, itr->name); - if (itr->value.IsArray()) - for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i) - if (i->IsString()) - AddUniqueElement(allProperties, *i); - } - - if (allProperties.Size() > 0) { - propertyCount_ = allProperties.Size(); - properties_ = static_cast<Property*>(allocator_->Malloc(sizeof(Property) * propertyCount_)); - for (SizeType i = 0; i < propertyCount_; i++) { - new (&properties_[i]) Property(); - properties_[i].name = allProperties[i]; - properties_[i].schema = GetTypeless(); - } - } - } - - if (properties && properties->IsObject()) { - PointerType q = p.Append(GetPropertiesString(), allocator_); - for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) { - SizeType index; - if (FindPropertyIndex(itr->name, &index)) - schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document); - } - } - - if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) { - PointerType q = p.Append(GetPatternPropertiesString(), allocator_); - patternProperties_ = static_cast<PatternProperty*>(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount())); - patternPropertyCount_ = 0; - - for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) { - new (&patternProperties_[patternPropertyCount_]) PatternProperty(); - patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name); - schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document); - patternPropertyCount_++; - } - } - - if (required && required->IsArray()) - for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) - if (itr->IsString()) { - SizeType index; - if (FindPropertyIndex(*itr, &index)) { - properties_[index].required = true; - hasRequired_ = true; - } - } - - if (dependencies && dependencies->IsObject()) { - PointerType q = p.Append(GetDependenciesString(), allocator_); - hasDependencies_ = true; - for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { - SizeType sourceIndex; - if (FindPropertyIndex(itr->name, &sourceIndex)) { - if (itr->value.IsArray()) { - properties_[sourceIndex].dependencies = static_cast<bool*>(allocator_->Malloc(sizeof(bool) * propertyCount_)); - std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_); - for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) { - SizeType targetIndex; - if (FindPropertyIndex(*targetItr, &targetIndex)) - properties_[sourceIndex].dependencies[targetIndex] = true; - } - } - else if (itr->value.IsObject()) { - hasSchemaDependencies_ = true; - schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document); - properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_; - validatorCount_++; - } - } - } - } - - if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) { - if (v->IsBool()) - additionalProperties_ = v->GetBool(); - else if (v->IsObject()) - schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document); - } - - AssignIfExist(minProperties_, value, GetMinPropertiesString()); - AssignIfExist(maxProperties_, value, GetMaxPropertiesString()); - - // Array - if (const ValueType* v = GetMember(value, GetItemsString())) { - PointerType q = p.Append(GetItemsString(), allocator_); - if (v->IsObject()) // List validation - schemaDocument->CreateSchema(&itemsList_, q, *v, document); - else if (v->IsArray()) { // Tuple validation - itemsTuple_ = static_cast<const Schema**>(allocator_->Malloc(sizeof(const Schema*) * v->Size())); - SizeType index = 0; - for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++) - schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document); - } - } - - AssignIfExist(minItems_, value, GetMinItemsString()); - AssignIfExist(maxItems_, value, GetMaxItemsString()); - - if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) { - if (v->IsBool()) - additionalItems_ = v->GetBool(); - else if (v->IsObject()) - schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document); - } - - AssignIfExist(uniqueItems_, value, GetUniqueItemsString()); - - // String - AssignIfExist(minLength_, value, GetMinLengthString()); - AssignIfExist(maxLength_, value, GetMaxLengthString()); - - if (const ValueType* v = GetMember(value, GetPatternString())) - pattern_ = CreatePattern(*v); - - // Number - if (const ValueType* v = GetMember(value, GetMinimumString())) - if (v->IsNumber()) - minimum_.CopyFrom(*v, *allocator_); - - if (const ValueType* v = GetMember(value, GetMaximumString())) - if (v->IsNumber()) - maximum_.CopyFrom(*v, *allocator_); - - AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString()); - AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString()); - - if (const ValueType* v = GetMember(value, GetMultipleOfString())) - if (v->IsNumber() && v->GetDouble() > 0.0) - multipleOf_.CopyFrom(*v, *allocator_); - } - - ~Schema() { - if (allocator_) { - allocator_->Free(enum_); - } - if (properties_) { - for (SizeType i = 0; i < propertyCount_; i++) - properties_[i].~Property(); - AllocatorType::Free(properties_); - } - if (patternProperties_) { - for (SizeType i = 0; i < patternPropertyCount_; i++) - patternProperties_[i].~PatternProperty(); - AllocatorType::Free(patternProperties_); - } - AllocatorType::Free(itemsTuple_); -#if RAPIDJSON_SCHEMA_HAS_REGEX - if (pattern_) { - pattern_->~RegexType(); - allocator_->Free(pattern_); - } -#endif - } - - bool BeginValue(Context& context) const { - if (context.inArray) { - if (uniqueItems_) - context.valueUniqueness = true; - - if (itemsList_) - context.valueSchema = itemsList_; - else if (itemsTuple_) { - if (context.arrayElementIndex < itemsTupleCount_) - context.valueSchema = itemsTuple_[context.arrayElementIndex]; - else if (additionalItemsSchema_) - context.valueSchema = additionalItemsSchema_; - else if (additionalItems_) - context.valueSchema = GetTypeless(); - else - RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString()); - } - else - context.valueSchema = GetTypeless(); - - context.arrayElementIndex++; - } - return true; - } - - RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const { - if (context.patternPropertiesValidatorCount > 0) { - bool otherValid = false; - SizeType count = context.patternPropertiesValidatorCount; - if (context.objectPatternValidatorType != Context::kPatternValidatorOnly) - otherValid = context.patternPropertiesValidators[--count]->IsValid(); - - bool patternValid = true; - for (SizeType i = 0; i < count; i++) - if (!context.patternPropertiesValidators[i]->IsValid()) { - patternValid = false; - break; - } - - if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) { - if (!patternValid) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); - } - else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) { - if (!patternValid || !otherValid) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); - } - else if (!patternValid && !otherValid) // kPatternValidatorWithAdditionalProperty) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); - } - - if (enum_) { - const uint64_t h = context.factory.GetHashCode(context.hasher); - for (SizeType i = 0; i < enumCount_; i++) - if (enum_[i] == h) - goto foundEnum; - RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString()); - foundEnum:; - } - - if (allOf_.schemas) - for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++) - if (!context.validators[i]->IsValid()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString()); - - if (anyOf_.schemas) { - for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++) - if (context.validators[i]->IsValid()) - goto foundAny; - RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString()); - foundAny:; - } - - if (oneOf_.schemas) { - bool oneValid = false; - for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++) - if (context.validators[i]->IsValid()) { - if (oneValid) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); - else - oneValid = true; - } - if (!oneValid) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); - } - - if (not_ && context.validators[notValidatorIndex_]->IsValid()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString()); - - return true; - } - - bool Null(Context& context) const { - if (!(type_ & (1 << kNullSchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - return CreateParallelValidator(context); - } - - bool Bool(Context& context, bool) const { - if (!(type_ & (1 << kBooleanSchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - return CreateParallelValidator(context); - } - - bool Int(Context& context, int i) const { - if (!CheckInt(context, i)) - return false; - return CreateParallelValidator(context); - } - - bool Uint(Context& context, unsigned u) const { - if (!CheckUint(context, u)) - return false; - return CreateParallelValidator(context); - } - - bool Int64(Context& context, int64_t i) const { - if (!CheckInt(context, i)) - return false; - return CreateParallelValidator(context); - } - - bool Uint64(Context& context, uint64_t u) const { - if (!CheckUint(context, u)) - return false; - return CreateParallelValidator(context); - } - - bool Double(Context& context, double d) const { - if (!(type_ & (1 << kNumberSchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d)) - return false; - - if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d)) - return false; - - if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d)) - return false; - - return CreateParallelValidator(context); - } - - bool String(Context& context, const Ch* str, SizeType length, bool) const { - if (!(type_ & (1 << kStringSchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - if (minLength_ != 0 || maxLength_ != SizeType(~0)) { - SizeType count; - if (internal::CountStringCodePoint<EncodingType>(str, length, &count)) { - if (count < minLength_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString()); - if (count > maxLength_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString()); - } - } - - if (pattern_ && !IsPatternMatch(pattern_, str, length)) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString()); - - return CreateParallelValidator(context); - } - - bool StartObject(Context& context) const { - if (!(type_ & (1 << kObjectSchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - if (hasDependencies_ || hasRequired_) { - context.propertyExist = static_cast<bool*>(context.factory.MallocState(sizeof(bool) * propertyCount_)); - std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_); - } - - if (patternProperties_) { // pre-allocate schema array - SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType - context.patternPropertiesSchemas = static_cast<const SchemaType**>(context.factory.MallocState(sizeof(const SchemaType*) * count)); - context.patternPropertiesSchemaCount = 0; - std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count); - } - - return CreateParallelValidator(context); - } - - bool Key(Context& context, const Ch* str, SizeType len, bool) const { - if (patternProperties_) { - context.patternPropertiesSchemaCount = 0; - for (SizeType i = 0; i < patternPropertyCount_; i++) - if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) - context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema; - } - - SizeType index; - if (FindPropertyIndex(ValueType(str, len).Move(), &index)) { - if (context.patternPropertiesSchemaCount > 0) { - context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema; - context.valueSchema = GetTypeless(); - context.valuePatternValidatorType = Context::kPatternValidatorWithProperty; - } - else - context.valueSchema = properties_[index].schema; - - if (context.propertyExist) - context.propertyExist[index] = true; - - return true; - } - - if (additionalPropertiesSchema_) { - if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) { - context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_; - context.valueSchema = GetTypeless(); - context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty; - } - else - context.valueSchema = additionalPropertiesSchema_; - return true; - } - else if (additionalProperties_) { - context.valueSchema = GetTypeless(); - return true; - } - - if (context.patternPropertiesSchemaCount == 0) // patternProperties are not additional properties - RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString()); - - return true; - } - - bool EndObject(Context& context, SizeType memberCount) const { - if (hasRequired_) - for (SizeType index = 0; index < propertyCount_; index++) - if (properties_[index].required) - if (!context.propertyExist[index]) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); - - if (memberCount < minProperties_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString()); - - if (memberCount > maxProperties_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString()); - - if (hasDependencies_) { - for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) - if (context.propertyExist[sourceIndex]) { - if (properties_[sourceIndex].dependencies) { - for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++) - if (properties_[sourceIndex].dependencies[targetIndex] && !context.propertyExist[targetIndex]) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); - } - else if (properties_[sourceIndex].dependenciesSchema) - if (!context.validators[properties_[sourceIndex].dependenciesValidatorIndex]->IsValid()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); - } - } - - return true; - } - - bool StartArray(Context& context) const { - if (!(type_ & (1 << kArraySchemaType))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - context.arrayElementIndex = 0; - context.inArray = true; - - return CreateParallelValidator(context); - } - - bool EndArray(Context& context, SizeType elementCount) const { - context.inArray = false; - - if (elementCount < minItems_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString()); - - if (elementCount > maxItems_) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString()); - - return true; - } - - // Generate functions for string literal according to Ch -#define RAPIDJSON_STRING_(name, ...) \ - static const ValueType& Get##name##String() {\ - static const Ch s[] = { __VA_ARGS__, '\0' };\ - static const ValueType v(s, sizeof(s) / sizeof(Ch) - 1);\ - return v;\ - } - - RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l') - RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n') - RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't') - RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y') - RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g') - RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r') - RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r') - RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e') - RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm') - RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f') - RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f') - RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f') - RAPIDJSON_STRING_(Not, 'n', 'o', 't') - RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') - RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd') - RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's') - RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') - RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') - RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') - RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') - RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's') - RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's') - RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's') - RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's') - RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's') - RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h') - RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h') - RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n') - RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm') - RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm') - RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm') - RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm') - RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f') - -#undef RAPIDJSON_STRING_ - -private: - enum SchemaValueType { - kNullSchemaType, - kBooleanSchemaType, - kObjectSchemaType, - kArraySchemaType, - kStringSchemaType, - kNumberSchemaType, - kIntegerSchemaType, - kTotalSchemaType - }; - -#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX - typedef internal::GenericRegex<EncodingType> RegexType; -#elif RAPIDJSON_SCHEMA_USE_STDREGEX - typedef std::basic_regex<Ch> RegexType; -#else - typedef char RegexType; -#endif - - struct SchemaArray { - SchemaArray() : schemas(), count() {} - ~SchemaArray() { AllocatorType::Free(schemas); } - const SchemaType** schemas; - SizeType begin; // begin index of context.validators - SizeType count; - }; - - static const SchemaType* GetTypeless() { - static SchemaType typeless(0, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), 0); - return &typeless; - } - - template <typename V1, typename V2> - void AddUniqueElement(V1& a, const V2& v) { - for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) - if (*itr == v) - return; - V1 c(v, *allocator_); - a.PushBack(c, *allocator_); - } - - static const ValueType* GetMember(const ValueType& value, const ValueType& name) { - typename ValueType::ConstMemberIterator itr = value.FindMember(name); - return itr != value.MemberEnd() ? &(itr->value) : 0; - } - - static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) { - if (const ValueType* v = GetMember(value, name)) - if (v->IsBool()) - out = v->GetBool(); - } - - static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) { - if (const ValueType* v = GetMember(value, name)) - if (v->IsUint64() && v->GetUint64() <= SizeType(~0)) - out = static_cast<SizeType>(v->GetUint64()); - } - - void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) { - if (const ValueType* v = GetMember(value, name)) { - if (v->IsArray() && v->Size() > 0) { - PointerType q = p.Append(name, allocator_); - out.count = v->Size(); - out.schemas = static_cast<const Schema**>(allocator_->Malloc(out.count * sizeof(const Schema*))); - memset(out.schemas, 0, sizeof(Schema*)* out.count); - for (SizeType i = 0; i < out.count; i++) - schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document); - out.begin = validatorCount_; - validatorCount_ += out.count; - } - } - } - -#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX - template <typename ValueType> - RegexType* CreatePattern(const ValueType& value) { - if (value.IsString()) { - RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString()); - if (!r->IsValid()) { - r->~RegexType(); - AllocatorType::Free(r); - r = 0; - } - return r; - } - return 0; - } - - static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) { - return pattern->Search(str); - } -#elif RAPIDJSON_SCHEMA_USE_STDREGEX - template <typename ValueType> - RegexType* CreatePattern(const ValueType& value) { - if (value.IsString()) - try { - return new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); - } - catch (const std::regex_error&) { - } - return 0; - } - - static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) { - std::match_results<const Ch*> r; - return std::regex_search(str, str + length, r, *pattern); - } -#else - template <typename ValueType> - RegexType* CreatePattern(const ValueType&) { return 0; } - - static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; } -#endif // RAPIDJSON_SCHEMA_USE_STDREGEX - - void AddType(const ValueType& type) { - if (type == GetNullString() ) type_ |= 1 << kNullSchemaType; - else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType; - else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType; - else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType; - else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType; - else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType; - else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType); - } - - bool CreateParallelValidator(Context& context) const { - if (enum_ || context.arrayUniqueness) - context.hasher = context.factory.CreateHasher(); - - if (validatorCount_) { - RAPIDJSON_ASSERT(context.validators == 0); - context.validators = static_cast<ISchemaValidator**>(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_)); - context.validatorCount = validatorCount_; - - if (allOf_.schemas) - CreateSchemaValidators(context, allOf_); - - if (anyOf_.schemas) - CreateSchemaValidators(context, anyOf_); - - if (oneOf_.schemas) - CreateSchemaValidators(context, oneOf_); - - if (not_) - context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_); - - if (hasSchemaDependencies_) { - for (SizeType i = 0; i < propertyCount_; i++) - if (properties_[i].dependenciesSchema) - context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema); - } - } - - return true; - } - - void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const { - for (SizeType i = 0; i < schemas.count; i++) - context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]); - } - - // O(n) - bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const { - SizeType len = name.GetStringLength(); - const Ch* str = name.GetString(); - for (SizeType index = 0; index < propertyCount_; index++) - if (properties_[index].name.GetStringLength() == len && - (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0)) - { - *outIndex = index; - return true; - } - return false; - } - - bool CheckInt(Context& context, int64_t i) const { - if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - if (!minimum_.IsNull()) { - if (minimum_.IsInt64()) { - if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); - } - else if (minimum_.IsUint64()) { - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64() - } - else if (!CheckDoubleMinimum(context, static_cast<double>(i))) - return false; - } - - if (!maximum_.IsNull()) { - if (maximum_.IsInt64()) { - if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); - } - else if (maximum_.IsUint64()) - /* do nothing */; // i <= max(int64_t) < maximum_.GetUint64() - else if (!CheckDoubleMaximum(context, static_cast<double>(i))) - return false; - } - - if (!multipleOf_.IsNull()) { - if (multipleOf_.IsUint64()) { - if (static_cast<uint64_t>(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); - } - else if (!CheckDoubleMultipleOf(context, static_cast<double>(i))) - return false; - } - - return true; - } - - bool CheckUint(Context& context, uint64_t i) const { - if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); - - if (!minimum_.IsNull()) { - if (minimum_.IsUint64()) { - if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); - } - else if (minimum_.IsInt64()) - /* do nothing */; // i >= 0 > minimum.Getint64() - else if (!CheckDoubleMinimum(context, static_cast<double>(i))) - return false; - } - - if (!maximum_.IsNull()) { - if (maximum_.IsUint64()) { - if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); - } - else if (maximum_.IsInt64()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_ - else if (!CheckDoubleMaximum(context, static_cast<double>(i))) - return false; - } - - if (!multipleOf_.IsNull()) { - if (multipleOf_.IsUint64()) { - if (i % multipleOf_.GetUint64() != 0) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); - } - else if (!CheckDoubleMultipleOf(context, static_cast<double>(i))) - return false; - } - - return true; - } - - bool CheckDoubleMinimum(Context& context, double d) const { - if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); - return true; - } - - bool CheckDoubleMaximum(Context& context, double d) const { - if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); - return true; - } - - bool CheckDoubleMultipleOf(Context& context, double d) const { - double a = std::abs(d), b = std::abs(multipleOf_.GetDouble()); - double q = std::floor(a / b); - double r = a - q * b; - if (r > 0.0) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); - return true; - } - - struct Property { - Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {} - ~Property() { AllocatorType::Free(dependencies); } - SValue name; - const SchemaType* schema; - const SchemaType* dependenciesSchema; - SizeType dependenciesValidatorIndex; - bool* dependencies; - bool required; - }; - - struct PatternProperty { - PatternProperty() : schema(), pattern() {} - ~PatternProperty() { - if (pattern) { - pattern->~RegexType(); - AllocatorType::Free(pattern); - } - } - const SchemaType* schema; - RegexType* pattern; - }; - - AllocatorType* allocator_; - uint64_t* enum_; - SizeType enumCount_; - SchemaArray allOf_; - SchemaArray anyOf_; - SchemaArray oneOf_; - const SchemaType* not_; - unsigned type_; // bitmask of kSchemaType - SizeType validatorCount_; - SizeType notValidatorIndex_; - - Property* properties_; - const SchemaType* additionalPropertiesSchema_; - PatternProperty* patternProperties_; - SizeType patternPropertyCount_; - SizeType propertyCount_; - SizeType minProperties_; - SizeType maxProperties_; - bool additionalProperties_; - bool hasDependencies_; - bool hasRequired_; - bool hasSchemaDependencies_; - - const SchemaType* additionalItemsSchema_; - const SchemaType* itemsList_; - const SchemaType** itemsTuple_; - SizeType itemsTupleCount_; - SizeType minItems_; - SizeType maxItems_; - bool additionalItems_; - bool uniqueItems_; - - RegexType* pattern_; - SizeType minLength_; - SizeType maxLength_; - - SValue minimum_; - SValue maximum_; - SValue multipleOf_; - bool exclusiveMinimum_; - bool exclusiveMaximum_; -}; - -template<typename Stack, typename Ch> -struct TokenHelper { - RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { - *documentStack.template Push<Ch>() = '/'; - char buffer[21]; - size_t length = static_cast<size_t>((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer); - for (size_t i = 0; i < length; i++) - *documentStack.template Push<Ch>() = buffer[i]; - } -}; - -// Partial specialized version for char to prevent buffer copying. -template <typename Stack> -struct TokenHelper<Stack, char> { - RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { - if (sizeof(SizeType) == 4) { - char *buffer = documentStack.template Push<char>(1 + 10); // '/' + uint - *buffer++ = '/'; - const char* end = internal::u32toa(index, buffer); - documentStack.template Pop<char>(static_cast<size_t>(10 - (end - buffer))); - } - else { - char *buffer = documentStack.template Push<char>(1 + 20); // '/' + uint64 - *buffer++ = '/'; - const char* end = internal::u64toa(index, buffer); - documentStack.template Pop<char>(static_cast<size_t>(20 - (end - buffer))); - } - } -}; - -} // namespace internal - -/////////////////////////////////////////////////////////////////////////////// -// IGenericRemoteSchemaDocumentProvider - -template <typename SchemaDocumentType> -class IGenericRemoteSchemaDocumentProvider { -public: - typedef typename SchemaDocumentType::Ch Ch; - - virtual ~IGenericRemoteSchemaDocumentProvider() {} - virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0; -}; - -/////////////////////////////////////////////////////////////////////////////// -// GenericSchemaDocument - -//! JSON schema document. -/*! - A JSON schema document is a compiled version of a JSON schema. - It is basically a tree of internal::Schema. - - \note This is an immutable class (i.e. its instance cannot be modified after construction). - \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding. - \tparam Allocator Allocator type for allocating memory of this document. -*/ -template <typename ValueT, typename Allocator = CrtAllocator> -class GenericSchemaDocument { -public: - typedef ValueT ValueType; - typedef IGenericRemoteSchemaDocumentProvider<GenericSchemaDocument> IRemoteSchemaDocumentProviderType; - typedef Allocator AllocatorType; - typedef typename ValueType::EncodingType EncodingType; - typedef typename EncodingType::Ch Ch; - typedef internal::Schema<GenericSchemaDocument> SchemaType; - typedef GenericPointer<ValueType, Allocator> PointerType; - friend class internal::Schema<GenericSchemaDocument>; - template <typename, typename, typename> - friend class GenericSchemaValidator; - - //! Constructor. - /*! - Compile a JSON document into schema document. - - \param document A JSON document as source. - \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null. - \param allocator An optional allocator instance for allocating memory. Can be null. - */ - explicit GenericSchemaDocument(const ValueType& document, IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : - remoteProvider_(remoteProvider), - allocator_(allocator), - ownAllocator_(), - root_(), - schemaMap_(allocator, kInitialSchemaMapSize), - schemaRef_(allocator, kInitialSchemaRefSize) - { - if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); - - // Generate root schema, it will call CreateSchema() to create sub-schemas, - // And call AddRefSchema() if there are $ref. - CreateSchemaRecursive(&root_, PointerType(), document, document); - - // Resolve $ref - while (!schemaRef_.Empty()) { - SchemaRefEntry* refEntry = schemaRef_.template Pop<SchemaRefEntry>(1); - if (const SchemaType* s = GetSchema(refEntry->target)) { - if (refEntry->schema) - *refEntry->schema = s; - - // Create entry in map if not exist - if (!GetSchema(refEntry->source)) { - new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(refEntry->source, const_cast<SchemaType*>(s), false, allocator_); - } - } - refEntry->~SchemaRefEntry(); - } - - RAPIDJSON_ASSERT(root_ != 0); - - schemaRef_.ShrinkToFit(); // Deallocate all memory for ref - } - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - //! Move constructor in C++11 - GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT : - remoteProvider_(rhs.remoteProvider_), - allocator_(rhs.allocator_), - ownAllocator_(rhs.ownAllocator_), - root_(rhs.root_), - schemaMap_(std::move(rhs.schemaMap_)), - schemaRef_(std::move(rhs.schemaRef_)) - { - rhs.remoteProvider_ = 0; - rhs.allocator_ = 0; - rhs.ownAllocator_ = 0; - } -#endif - - //! Destructor - ~GenericSchemaDocument() { - while (!schemaMap_.Empty()) - schemaMap_.template Pop<SchemaEntry>(1)->~SchemaEntry(); - - RAPIDJSON_DELETE(ownAllocator_); - } - - //! Get the root schema. - const SchemaType& GetRoot() const { return *root_; } - -private: - //! Prohibit copying - GenericSchemaDocument(const GenericSchemaDocument&); - //! Prohibit assignment - GenericSchemaDocument& operator=(const GenericSchemaDocument&); - - struct SchemaRefEntry { - SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {} - PointerType source; - PointerType target; - const SchemaType** schema; - }; - - struct SchemaEntry { - SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {} - ~SchemaEntry() { - if (owned) { - schema->~SchemaType(); - Allocator::Free(schema); - } - } - PointerType pointer; - SchemaType* schema; - bool owned; - }; - - void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { - if (schema) - *schema = SchemaType::GetTypeless(); - - if (v.GetType() == kObjectType) { - const SchemaType* s = GetSchema(pointer); - if (!s) - CreateSchema(schema, pointer, v, document); - - for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr) - CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document); - } - else if (v.GetType() == kArrayType) - for (SizeType i = 0; i < v.Size(); i++) - CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document); - } - - void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { - RAPIDJSON_ASSERT(pointer.IsValid()); - if (v.IsObject()) { - if (!HandleRefSchema(pointer, schema, v, document)) { - SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_); - new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(pointer, s, true, allocator_); - if (schema) - *schema = s; - } - } - } - - bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) { - static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' }; - static const ValueType kRefValue(kRefString, 4); - - typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue); - if (itr == v.MemberEnd()) - return false; - - if (itr->value.IsString()) { - SizeType len = itr->value.GetStringLength(); - if (len > 0) { - const Ch* s = itr->value.GetString(); - SizeType i = 0; - while (i < len && s[i] != '#') // Find the first # - i++; - - if (i > 0) { // Remote reference, resolve immediately - if (remoteProvider_) { - if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i - 1)) { - PointerType pointer(&s[i], len - i, allocator_); - if (pointer.IsValid()) { - if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) { - if (schema) - *schema = sc; - return true; - } - } - } - } - } - else if (s[i] == '#') { // Local reference, defer resolution - PointerType pointer(&s[i], len - i, allocator_); - if (pointer.IsValid()) { - if (const ValueType* nv = pointer.Get(document)) - if (HandleRefSchema(source, schema, *nv, document)) - return true; - - new (schemaRef_.template Push<SchemaRefEntry>()) SchemaRefEntry(source, pointer, schema, allocator_); - return true; - } - } - } - } - return false; - } - - const SchemaType* GetSchema(const PointerType& pointer) const { - for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target) - if (pointer == target->pointer) - return target->schema; - return 0; - } - - PointerType GetPointer(const SchemaType* schema) const { - for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target) - if (schema == target->schema) - return target->pointer; - return PointerType(); - } - - static const size_t kInitialSchemaMapSize = 64; - static const size_t kInitialSchemaRefSize = 64; - - IRemoteSchemaDocumentProviderType* remoteProvider_; - Allocator *allocator_; - Allocator *ownAllocator_; - const SchemaType* root_; //!< Root schema. - internal::Stack<Allocator> schemaMap_; // Stores created Pointer -> Schemas - internal::Stack<Allocator> schemaRef_; // Stores Pointer from $ref and schema which holds the $ref -}; - -//! GenericSchemaDocument using Value type. -typedef GenericSchemaDocument<Value> SchemaDocument; -//! IGenericRemoteSchemaDocumentProvider using SchemaDocument. -typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider; - -/////////////////////////////////////////////////////////////////////////////// -// GenericSchemaValidator - -//! JSON Schema Validator. -/*! - A SAX style JSON schema validator. - It uses a \c GenericSchemaDocument to validate SAX events. - It delegates the incoming SAX events to an output handler. - The default output handler does nothing. - It can be reused multiple times by calling \c Reset(). - - \tparam SchemaDocumentType Type of schema document. - \tparam OutputHandler Type of output handler. Default handler does nothing. - \tparam StateAllocator Allocator for storing the internal validation states. -*/ -template < - typename SchemaDocumentType, - typename OutputHandler = BaseReaderHandler<typename SchemaDocumentType::SchemaType::EncodingType>, - typename StateAllocator = CrtAllocator> -class GenericSchemaValidator : - public internal::ISchemaStateFactory<typename SchemaDocumentType::SchemaType>, - public internal::ISchemaValidator -{ -public: - typedef typename SchemaDocumentType::SchemaType SchemaType; - typedef typename SchemaDocumentType::PointerType PointerType; - typedef typename SchemaType::EncodingType EncodingType; - typedef typename EncodingType::Ch Ch; - - //! Constructor without output handler. - /*! - \param schemaDocument The schema document to conform to. - \param allocator Optional allocator for storing internal validation states. - \param schemaStackCapacity Optional initial capacity of schema path stack. - \param documentStackCapacity Optional initial capacity of document path stack. - */ - GenericSchemaValidator( - const SchemaDocumentType& schemaDocument, - StateAllocator* allocator = 0, - size_t schemaStackCapacity = kDefaultSchemaStackCapacity, - size_t documentStackCapacity = kDefaultDocumentStackCapacity) - : - schemaDocument_(&schemaDocument), - root_(schemaDocument.GetRoot()), - outputHandler_(GetNullHandler()), - stateAllocator_(allocator), - ownStateAllocator_(0), - schemaStack_(allocator, schemaStackCapacity), - documentStack_(allocator, documentStackCapacity), - valid_(true) -#if RAPIDJSON_SCHEMA_VERBOSE - , depth_(0) -#endif - { - } - - //! Constructor with output handler. - /*! - \param schemaDocument The schema document to conform to. - \param allocator Optional allocator for storing internal validation states. - \param schemaStackCapacity Optional initial capacity of schema path stack. - \param documentStackCapacity Optional initial capacity of document path stack. - */ - GenericSchemaValidator( - const SchemaDocumentType& schemaDocument, - OutputHandler& outputHandler, - StateAllocator* allocator = 0, - size_t schemaStackCapacity = kDefaultSchemaStackCapacity, - size_t documentStackCapacity = kDefaultDocumentStackCapacity) - : - schemaDocument_(&schemaDocument), - root_(schemaDocument.GetRoot()), - outputHandler_(outputHandler), - stateAllocator_(allocator), - ownStateAllocator_(0), - schemaStack_(allocator, schemaStackCapacity), - documentStack_(allocator, documentStackCapacity), - valid_(true) -#if RAPIDJSON_SCHEMA_VERBOSE - , depth_(0) -#endif - { - } - - //! Destructor. - ~GenericSchemaValidator() { - Reset(); - RAPIDJSON_DELETE(ownStateAllocator_); - } - - //! Reset the internal states. - void Reset() { - while (!schemaStack_.Empty()) - PopSchema(); - documentStack_.Clear(); - valid_ = true; - } - - //! Checks whether the current state is valid. - // Implementation of ISchemaValidator - virtual bool IsValid() const { return valid_; } - - //! Gets the JSON pointer pointed to the invalid schema. - PointerType GetInvalidSchemaPointer() const { - return schemaStack_.Empty() ? PointerType() : schemaDocument_->GetPointer(&CurrentSchema()); - } - - //! Gets the keyword of invalid schema. - const Ch* GetInvalidSchemaKeyword() const { - return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword; - } - - //! Gets the JSON pointer pointed to the invalid value. - PointerType GetInvalidDocumentPointer() const { - return documentStack_.Empty() ? PointerType() : PointerType(documentStack_.template Bottom<Ch>(), documentStack_.GetSize() / sizeof(Ch)); - } - -#if RAPIDJSON_SCHEMA_VERBOSE -#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \ -RAPIDJSON_MULTILINEMACRO_BEGIN\ - *documentStack_.template Push<Ch>() = '\0';\ - documentStack_.template Pop<Ch>(1);\ - internal::PrintInvalidDocument(documentStack_.template Bottom<Ch>());\ -RAPIDJSON_MULTILINEMACRO_END -#else -#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() -#endif - -#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\ - if (!valid_) return false; \ - if (!BeginValue() || !CurrentSchema().method arg1) {\ - RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\ - return valid_ = false;\ - } - -#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\ - for (Context* context = schemaStack_.template Bottom<Context>(); context != schemaStack_.template End<Context>(); context++) {\ - if (context->hasher)\ - static_cast<HasherType*>(context->hasher)->method arg2;\ - if (context->validators)\ - for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\ - static_cast<GenericSchemaValidator*>(context->validators[i_])->method arg2;\ - if (context->patternPropertiesValidators)\ - for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\ - static_cast<GenericSchemaValidator*>(context->patternPropertiesValidators[i_])->method arg2;\ - } - -#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\ - return valid_ = EndValue() && outputHandler_.method arg2 - -#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \ - RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\ - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\ - RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2) - - bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext() ), ( )); } - bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); } - bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); } - bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); } - bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); } - bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); } - bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); } - bool RawNumber(const Ch* str, SizeType length, bool copy) - { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } - bool String(const Ch* str, SizeType length, bool copy) - { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } - - bool StartObject() { - RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext())); - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ()); - return valid_ = outputHandler_.StartObject(); - } - - bool Key(const Ch* str, SizeType len, bool copy) { - if (!valid_) return false; - AppendToken(str, len); - if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false; - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy)); - return valid_ = outputHandler_.Key(str, len, copy); - } - - bool EndObject(SizeType memberCount) { - if (!valid_) return false; - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount)); - if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false; - RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount)); - } - - bool StartArray() { - RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext())); - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ()); - return valid_ = outputHandler_.StartArray(); - } - - bool EndArray(SizeType elementCount) { - if (!valid_) return false; - RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount)); - if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false; - RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount)); - } - -#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_ -#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_ -#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_ -#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_ - - // Implementation of ISchemaStateFactory<SchemaType> - virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) { - return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, -#if RAPIDJSON_SCHEMA_VERBOSE - depth_ + 1, -#endif - &GetStateAllocator()); - } - - virtual void DestroySchemaValidator(ISchemaValidator* validator) { - GenericSchemaValidator* v = static_cast<GenericSchemaValidator*>(validator); - v->~GenericSchemaValidator(); - StateAllocator::Free(v); - } - - virtual void* CreateHasher() { - return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator()); - } - - virtual uint64_t GetHashCode(void* hasher) { - return static_cast<HasherType*>(hasher)->GetHashCode(); - } - - virtual void DestroryHasher(void* hasher) { - HasherType* h = static_cast<HasherType*>(hasher); - h->~HasherType(); - StateAllocator::Free(h); - } - - virtual void* MallocState(size_t size) { - return GetStateAllocator().Malloc(size); - } - - virtual void FreeState(void* p) { - return StateAllocator::Free(p); - } - -private: - typedef typename SchemaType::Context Context; - typedef GenericValue<UTF8<>, StateAllocator> HashCodeArray; - typedef internal::Hasher<EncodingType, StateAllocator> HasherType; - - GenericSchemaValidator( - const SchemaDocumentType& schemaDocument, - const SchemaType& root, -#if RAPIDJSON_SCHEMA_VERBOSE - unsigned depth, -#endif - StateAllocator* allocator = 0, - size_t schemaStackCapacity = kDefaultSchemaStackCapacity, - size_t documentStackCapacity = kDefaultDocumentStackCapacity) - : - schemaDocument_(&schemaDocument), - root_(root), - outputHandler_(GetNullHandler()), - stateAllocator_(allocator), - ownStateAllocator_(0), - schemaStack_(allocator, schemaStackCapacity), - documentStack_(allocator, documentStackCapacity), - valid_(true) -#if RAPIDJSON_SCHEMA_VERBOSE - , depth_(depth) -#endif - { - } - - StateAllocator& GetStateAllocator() { - if (!stateAllocator_) - stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator()); - return *stateAllocator_; - } - - bool BeginValue() { - if (schemaStack_.Empty()) - PushSchema(root_); - else { - if (CurrentContext().inArray) - internal::TokenHelper<internal::Stack<StateAllocator>, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex); - - if (!CurrentSchema().BeginValue(CurrentContext())) - return false; - - SizeType count = CurrentContext().patternPropertiesSchemaCount; - const SchemaType** sa = CurrentContext().patternPropertiesSchemas; - typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType; - bool valueUniqueness = CurrentContext().valueUniqueness; - if (CurrentContext().valueSchema) - PushSchema(*CurrentContext().valueSchema); - - if (count > 0) { - CurrentContext().objectPatternValidatorType = patternValidatorType; - ISchemaValidator**& va = CurrentContext().patternPropertiesValidators; - SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount; - va = static_cast<ISchemaValidator**>(MallocState(sizeof(ISchemaValidator*) * count)); - for (SizeType i = 0; i < count; i++) - va[validatorCount++] = CreateSchemaValidator(*sa[i]); - } - - CurrentContext().arrayUniqueness = valueUniqueness; - } - return true; - } - - bool EndValue() { - if (!CurrentSchema().EndValue(CurrentContext())) - return false; - -#if RAPIDJSON_SCHEMA_VERBOSE - GenericStringBuffer<EncodingType> sb; - schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb); - - *documentStack_.template Push<Ch>() = '\0'; - documentStack_.template Pop<Ch>(1); - internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom<Ch>()); -#endif - - uint64_t h = CurrentContext().arrayUniqueness ? static_cast<HasherType*>(CurrentContext().hasher)->GetHashCode() : 0; - - PopSchema(); - - if (!schemaStack_.Empty()) { - Context& context = CurrentContext(); - if (context.valueUniqueness) { - HashCodeArray* a = static_cast<HashCodeArray*>(context.arrayElementHashCodes); - if (!a) - CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType); - for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr) - if (itr->GetUint64() == h) - RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString()); - a->PushBack(h, GetStateAllocator()); - } - } - - // Remove the last token of document pointer - while (!documentStack_.Empty() && *documentStack_.template Pop<Ch>(1) != '/') - ; - - return true; - } - - void AppendToken(const Ch* str, SizeType len) { - documentStack_.template Reserve<Ch>(1 + len * 2); // worst case all characters are escaped as two characters - *documentStack_.template PushUnsafe<Ch>() = '/'; - for (SizeType i = 0; i < len; i++) { - if (str[i] == '~') { - *documentStack_.template PushUnsafe<Ch>() = '~'; - *documentStack_.template PushUnsafe<Ch>() = '0'; - } - else if (str[i] == '/') { - *documentStack_.template PushUnsafe<Ch>() = '~'; - *documentStack_.template PushUnsafe<Ch>() = '1'; - } - else - *documentStack_.template PushUnsafe<Ch>() = str[i]; - } - } - - RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push<Context>()) Context(*this, &schema); } - - RAPIDJSON_FORCEINLINE void PopSchema() { - Context* c = schemaStack_.template Pop<Context>(1); - if (HashCodeArray* a = static_cast<HashCodeArray*>(c->arrayElementHashCodes)) { - a->~HashCodeArray(); - StateAllocator::Free(a); - } - c->~Context(); - } - - const SchemaType& CurrentSchema() const { return *schemaStack_.template Top<Context>()->schema; } - Context& CurrentContext() { return *schemaStack_.template Top<Context>(); } - const Context& CurrentContext() const { return *schemaStack_.template Top<Context>(); } - - static OutputHandler& GetNullHandler() { - static OutputHandler nullHandler; - return nullHandler; - } - - static const size_t kDefaultSchemaStackCapacity = 1024; - static const size_t kDefaultDocumentStackCapacity = 256; - const SchemaDocumentType* schemaDocument_; - const SchemaType& root_; - OutputHandler& outputHandler_; - StateAllocator* stateAllocator_; - StateAllocator* ownStateAllocator_; - internal::Stack<StateAllocator> schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *) - internal::Stack<StateAllocator> documentStack_; //!< stack to store the current path of validating document (Ch) - bool valid_; -#if RAPIDJSON_SCHEMA_VERBOSE - unsigned depth_; -#endif -}; - -typedef GenericSchemaValidator<SchemaDocument> SchemaValidator; - -/////////////////////////////////////////////////////////////////////////////// -// SchemaValidatingReader - -//! A helper class for parsing with validation. -/*! - This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate(). - - \tparam parseFlags Combination of \ref ParseFlag. - \tparam InputStream Type of input stream, implementing Stream concept. - \tparam SourceEncoding Encoding of the input stream. - \tparam SchemaDocumentType Type of schema document. - \tparam StackAllocator Allocator type for stack. -*/ -template < - unsigned parseFlags, - typename InputStream, - typename SourceEncoding, - typename SchemaDocumentType = SchemaDocument, - typename StackAllocator = CrtAllocator> -class SchemaValidatingReader { -public: - typedef typename SchemaDocumentType::PointerType PointerType; - typedef typename InputStream::Ch Ch; - - //! Constructor - /*! - \param is Input stream. - \param sd Schema document. - */ - SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), isValid_(true) {} - - template <typename Handler> - bool operator()(Handler& handler) { - GenericReader<SourceEncoding, typename SchemaDocumentType::EncodingType, StackAllocator> reader; - GenericSchemaValidator<SchemaDocumentType, Handler> validator(sd_, handler); - parseResult_ = reader.template Parse<parseFlags>(is_, validator); - - isValid_ = validator.IsValid(); - if (isValid_) { - invalidSchemaPointer_ = PointerType(); - invalidSchemaKeyword_ = 0; - invalidDocumentPointer_ = PointerType(); - } - else { - invalidSchemaPointer_ = validator.GetInvalidSchemaPointer(); - invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword(); - invalidDocumentPointer_ = validator.GetInvalidDocumentPointer(); - } - - return parseResult_; - } - - const ParseResult& GetParseResult() const { return parseResult_; } - bool IsValid() const { return isValid_; } - const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; } - const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; } - const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; } - -private: - InputStream& is_; - const SchemaDocumentType& sd_; - - ParseResult parseResult_; - PointerType invalidSchemaPointer_; - const Ch* invalidSchemaKeyword_; - PointerType invalidDocumentPointer_; - bool isValid_; -}; - -RAPIDJSON_NAMESPACE_END -RAPIDJSON_DIAG_POP - -#endif // RAPIDJSON_SCHEMA_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/stream.h b/ext/librethinkdbxx/src/rapidjson/stream.h deleted file mode 100644 index fef82c25..00000000 --- a/ext/librethinkdbxx/src/rapidjson/stream.h +++ /dev/null @@ -1,179 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#include "rapidjson.h" - -#ifndef RAPIDJSON_STREAM_H_ -#define RAPIDJSON_STREAM_H_ - -#include "encodings.h" - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// Stream - -/*! \class rapidjson::Stream - \brief Concept for reading and writing characters. - - For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd(). - - For write-only stream, only need to implement Put() and Flush(). - -\code -concept Stream { - typename Ch; //!< Character type of the stream. - - //! Read the current character from stream without moving the read cursor. - Ch Peek() const; - - //! Read the current character from stream and moving the read cursor to next character. - Ch Take(); - - //! Get the current read cursor. - //! \return Number of characters read from start. - size_t Tell(); - - //! Begin writing operation at the current read pointer. - //! \return The begin writer pointer. - Ch* PutBegin(); - - //! Write a character. - void Put(Ch c); - - //! Flush the buffer. - void Flush(); - - //! End the writing operation. - //! \param begin The begin write pointer returned by PutBegin(). - //! \return Number of characters written. - size_t PutEnd(Ch* begin); -} -\endcode -*/ - -//! Provides additional information for stream. -/*! - By using traits pattern, this type provides a default configuration for stream. - For custom stream, this type can be specialized for other configuration. - See TEST(Reader, CustomStringStream) in readertest.cpp for example. -*/ -template<typename Stream> -struct StreamTraits { - //! Whether to make local copy of stream for optimization during parsing. - /*! - By default, for safety, streams do not use local copy optimization. - Stream that can be copied fast should specialize this, like StreamTraits<StringStream>. - */ - enum { copyOptimization = 0 }; -}; - -//! Reserve n characters for writing to a stream. -template<typename Stream> -inline void PutReserve(Stream& stream, size_t count) { - (void)stream; - (void)count; -} - -//! Write character to a stream, presuming buffer is reserved. -template<typename Stream> -inline void PutUnsafe(Stream& stream, typename Stream::Ch c) { - stream.Put(c); -} - -//! Put N copies of a character to a stream. -template<typename Stream, typename Ch> -inline void PutN(Stream& stream, Ch c, size_t n) { - PutReserve(stream, n); - for (size_t i = 0; i < n; i++) - PutUnsafe(stream, c); -} - -/////////////////////////////////////////////////////////////////////////////// -// StringStream - -//! Read-only string stream. -/*! \note implements Stream concept -*/ -template <typename Encoding> -struct GenericStringStream { - typedef typename Encoding::Ch Ch; - - GenericStringStream(const Ch *src) : src_(src), head_(src) {} - - Ch Peek() const { return *src_; } - Ch Take() { return *src_++; } - size_t Tell() const { return static_cast<size_t>(src_ - head_); } - - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } - void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } - size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } - - const Ch* src_; //!< Current read position. - const Ch* head_; //!< Original head of the string. -}; - -template <typename Encoding> -struct StreamTraits<GenericStringStream<Encoding> > { - enum { copyOptimization = 1 }; -}; - -//! String stream with UTF8 encoding. -typedef GenericStringStream<UTF8<> > StringStream; - -/////////////////////////////////////////////////////////////////////////////// -// InsituStringStream - -//! A read-write string stream. -/*! This string stream is particularly designed for in-situ parsing. - \note implements Stream concept -*/ -template <typename Encoding> -struct GenericInsituStringStream { - typedef typename Encoding::Ch Ch; - - GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {} - - // Read - Ch Peek() { return *src_; } - Ch Take() { return *src_++; } - size_t Tell() { return static_cast<size_t>(src_ - head_); } - - // Write - void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; } - - Ch* PutBegin() { return dst_ = src_; } - size_t PutEnd(Ch* begin) { return static_cast<size_t>(dst_ - begin); } - void Flush() {} - - Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; } - void Pop(size_t count) { dst_ -= count; } - - Ch* src_; - Ch* dst_; - Ch* head_; -}; - -template <typename Encoding> -struct StreamTraits<GenericInsituStringStream<Encoding> > { - enum { copyOptimization = 1 }; -}; - -//! Insitu string stream with UTF8 encoding. -typedef GenericInsituStringStream<UTF8<> > InsituStringStream; - -RAPIDJSON_NAMESPACE_END - -#endif // RAPIDJSON_STREAM_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/stringbuffer.h b/ext/librethinkdbxx/src/rapidjson/stringbuffer.h deleted file mode 100644 index 78f34d20..00000000 --- a/ext/librethinkdbxx/src/rapidjson/stringbuffer.h +++ /dev/null @@ -1,117 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_STRINGBUFFER_H_ -#define RAPIDJSON_STRINGBUFFER_H_ - -#include "stream.h" -#include "internal/stack.h" - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS -#include <utility> // std::move -#endif - -#include "internal/stack.h" - -#if defined(__clang__) -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(c++98-compat) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -//! Represents an in-memory output stream. -/*! - \tparam Encoding Encoding of the stream. - \tparam Allocator type for allocating memory buffer. - \note implements Stream concept -*/ -template <typename Encoding, typename Allocator = CrtAllocator> -class GenericStringBuffer { -public: - typedef typename Encoding::Ch Ch; - - GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} - -#if RAPIDJSON_HAS_CXX11_RVALUE_REFS - GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {} - GenericStringBuffer& operator=(GenericStringBuffer&& rhs) { - if (&rhs != this) - stack_ = std::move(rhs.stack_); - return *this; - } -#endif - - void Put(Ch c) { *stack_.template Push<Ch>() = c; } - void PutUnsafe(Ch c) { *stack_.template PushUnsafe<Ch>() = c; } - void Flush() {} - - void Clear() { stack_.Clear(); } - void ShrinkToFit() { - // Push and pop a null terminator. This is safe. - *stack_.template Push<Ch>() = '\0'; - stack_.ShrinkToFit(); - stack_.template Pop<Ch>(1); - } - - void Reserve(size_t count) { stack_.template Reserve<Ch>(count); } - Ch* Push(size_t count) { return stack_.template Push<Ch>(count); } - Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe<Ch>(count); } - void Pop(size_t count) { stack_.template Pop<Ch>(count); } - - const Ch* GetString() const { - // Push and pop a null terminator. This is safe. - *stack_.template Push<Ch>() = '\0'; - stack_.template Pop<Ch>(1); - - return stack_.template Bottom<Ch>(); - } - - size_t GetSize() const { return stack_.GetSize(); } - - static const size_t kDefaultCapacity = 256; - mutable internal::Stack<Allocator> stack_; - -private: - // Prohibit copy constructor & assignment operator. - GenericStringBuffer(const GenericStringBuffer&); - GenericStringBuffer& operator=(const GenericStringBuffer&); -}; - -//! String buffer with UTF8 encoding -typedef GenericStringBuffer<UTF8<> > StringBuffer; - -template<typename Encoding, typename Allocator> -inline void PutReserve(GenericStringBuffer<Encoding, Allocator>& stream, size_t count) { - stream.Reserve(count); -} - -template<typename Encoding, typename Allocator> -inline void PutUnsafe(GenericStringBuffer<Encoding, Allocator>& stream, typename Encoding::Ch c) { - stream.PutUnsafe(c); -} - -//! Implement specialized version of PutN() with memset() for better performance. -template<> -inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) { - std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c)); -} - -RAPIDJSON_NAMESPACE_END - -#if defined(__clang__) -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_STRINGBUFFER_H_ diff --git a/ext/librethinkdbxx/src/rapidjson/writer.h b/ext/librethinkdbxx/src/rapidjson/writer.h deleted file mode 100644 index 7d0610eb..00000000 --- a/ext/librethinkdbxx/src/rapidjson/writer.h +++ /dev/null @@ -1,609 +0,0 @@ -// Tencent is pleased to support the open source community by making RapidJSON available. -// -// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. -// -// Licensed under the MIT License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://opensource.org/licenses/MIT -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef RAPIDJSON_WRITER_H_ -#define RAPIDJSON_WRITER_H_ - -#include "stream.h" -#include "internal/stack.h" -#include "internal/strfunc.h" -#include "internal/dtoa.h" -#include "internal/itoa.h" -#include "stringbuffer.h" -#include <new> // placement new - -#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) -#include <intrin.h> -#pragma intrinsic(_BitScanForward) -#endif -#ifdef RAPIDJSON_SSE42 -#include <nmmintrin.h> -#elif defined(RAPIDJSON_SSE2) -#include <emmintrin.h> -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(padded) -RAPIDJSON_DIAG_OFF(unreachable-code) -#endif - -RAPIDJSON_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// WriteFlag - -/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS - \ingroup RAPIDJSON_CONFIG - \brief User-defined kWriteDefaultFlags definition. - - User can define this as any \c WriteFlag combinations. -*/ -#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS -#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags -#endif - -//! Combination of writeFlags -enum WriteFlag { - kWriteNoFlags = 0, //!< No flags are set. - kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings. - kWriteNanAndInfFlag = 2, //!< Allow writing of Inf, -Inf and NaN. - kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS -}; - -//! JSON writer -/*! Writer implements the concept Handler. - It generates JSON text by events to an output os. - - User may programmatically calls the functions of a writer to generate JSON text. - - On the other side, a writer can also be passed to objects that generates events, - - for example Reader::Parse() and Document::Accept(). - - \tparam OutputStream Type of output stream. - \tparam SourceEncoding Encoding of source string. - \tparam TargetEncoding Encoding of output stream. - \tparam StackAllocator Type of allocator for allocating memory of stack. - \note implements Handler concept -*/ -template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> -class Writer { -public: - typedef typename SourceEncoding::Ch Ch; - - static const int kDefaultMaxDecimalPlaces = 324; - - //! Constructor - /*! \param os Output stream. - \param stackAllocator User supplied allocator. If it is null, it will create a private one. - \param levelDepth Initial capacity of stack. - */ - explicit - Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) : - os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} - - explicit - Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) : - os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} - - //! Reset the writer with a new stream. - /*! - This function reset the writer with a new stream and default settings, - in order to make a Writer object reusable for output multiple JSONs. - - \param os New output stream. - \code - Writer<OutputStream> writer(os1); - writer.StartObject(); - // ... - writer.EndObject(); - - writer.Reset(os2); - writer.StartObject(); - // ... - writer.EndObject(); - \endcode - */ - void Reset(OutputStream& os) { - os_ = &os; - hasRoot_ = false; - level_stack_.Clear(); - } - - //! Checks whether the output is a complete JSON. - /*! - A complete JSON has a complete root object or array. - */ - bool IsComplete() const { - return hasRoot_ && level_stack_.Empty(); - } - - int GetMaxDecimalPlaces() const { - return maxDecimalPlaces_; - } - - //! Sets the maximum number of decimal places for double output. - /*! - This setting truncates the output with specified number of decimal places. - - For example, - - \code - writer.SetMaxDecimalPlaces(3); - writer.StartArray(); - writer.Double(0.12345); // "0.123" - writer.Double(0.0001); // "0.0" - writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent) - writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent) - writer.EndArray(); - \endcode - - The default setting does not truncate any decimal places. You can restore to this setting by calling - \code - writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces); - \endcode - */ - void SetMaxDecimalPlaces(int maxDecimalPlaces) { - maxDecimalPlaces_ = maxDecimalPlaces; - } - - /*!@name Implementation of Handler - \see Handler - */ - //@{ - - bool Null() { Prefix(kNullType); return WriteNull(); } - bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return WriteBool(b); } - bool Int(int i) { Prefix(kNumberType); return WriteInt(i); } - bool Uint(unsigned u) { Prefix(kNumberType); return WriteUint(u); } - bool Int64(int64_t i64) { Prefix(kNumberType); return WriteInt64(i64); } - bool Uint64(uint64_t u64) { Prefix(kNumberType); return WriteUint64(u64); } - - //! Writes the given \c double value to the stream - /*! - \param d The value to be written. - \return Whether it is succeed. - */ - bool Double(double d) { Prefix(kNumberType); return WriteDouble(d); } - - bool RawNumber(const Ch* str, SizeType length, bool copy = false) { - (void)copy; - Prefix(kNumberType); - return WriteString(str, length); - } - - bool String(const Ch* str, SizeType length, bool copy = false) { - (void)copy; - Prefix(kStringType); - return WriteString(str, length); - } - -#if RAPIDJSON_HAS_STDSTRING - bool String(const std::basic_string<Ch>& str) { - return String(str.data(), SizeType(str.size())); - } -#endif - - bool StartObject() { - Prefix(kObjectType); - new (level_stack_.template Push<Level>()) Level(false); - return WriteStartObject(); - } - - bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } - - bool EndObject(SizeType memberCount = 0) { - (void)memberCount; - RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); - RAPIDJSON_ASSERT(!level_stack_.template Top<Level>()->inArray); - level_stack_.template Pop<Level>(1); - bool ret = WriteEndObject(); - if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text - os_->Flush(); - return ret; - } - - bool StartArray() { - Prefix(kArrayType); - new (level_stack_.template Push<Level>()) Level(true); - return WriteStartArray(); - } - - bool EndArray(SizeType elementCount = 0) { - (void)elementCount; - RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); - RAPIDJSON_ASSERT(level_stack_.template Top<Level>()->inArray); - level_stack_.template Pop<Level>(1); - bool ret = WriteEndArray(); - if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text - os_->Flush(); - return ret; - } - //@} - - /*! @name Convenience extensions */ - //@{ - - //! Simpler but slower overload. - bool String(const Ch* str) { return String(str, internal::StrLen(str)); } - bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } - - //@} - - //! Write a raw JSON value. - /*! - For user to write a stringified JSON as a value. - - \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. - \param length Length of the json. - \param type Type of the root of json. - */ - bool RawValue(const Ch* json, size_t length, Type type) { Prefix(type); return WriteRawValue(json, length); } - -protected: - //! Information for each nested level - struct Level { - Level(bool inArray_) : valueCount(0), inArray(inArray_) {} - size_t valueCount; //!< number of values in this level - bool inArray; //!< true if in array, otherwise in object - }; - - static const size_t kDefaultLevelDepth = 32; - - bool WriteNull() { - PutReserve(*os_, 4); - PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true; - } - - bool WriteBool(bool b) { - if (b) { - PutReserve(*os_, 4); - PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e'); - } - else { - PutReserve(*os_, 5); - PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e'); - } - return true; - } - - bool WriteInt(int i) { - char buffer[11]; - const char* end = internal::i32toa(i, buffer); - PutReserve(*os_, static_cast<size_t>(end - buffer)); - for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p)); - return true; - } - - bool WriteUint(unsigned u) { - char buffer[10]; - const char* end = internal::u32toa(u, buffer); - PutReserve(*os_, static_cast<size_t>(end - buffer)); - for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p)); - return true; - } - - bool WriteInt64(int64_t i64) { - char buffer[21]; - const char* end = internal::i64toa(i64, buffer); - PutReserve(*os_, static_cast<size_t>(end - buffer)); - for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p)); - return true; - } - - bool WriteUint64(uint64_t u64) { - char buffer[20]; - char* end = internal::u64toa(u64, buffer); - PutReserve(*os_, static_cast<size_t>(end - buffer)); - for (char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p)); - return true; - } - - bool WriteDouble(double d) { - if (internal::Double(d).IsNanOrInf()) { - if (!(writeFlags & kWriteNanAndInfFlag)) - return false; - if (internal::Double(d).IsNan()) { - PutReserve(*os_, 3); - PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); - return true; - } - if (internal::Double(d).Sign()) { - PutReserve(*os_, 9); - PutUnsafe(*os_, '-'); - } - else - PutReserve(*os_, 8); - PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); - PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); - return true; - } - - char buffer[25]; - char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); - PutReserve(*os_, static_cast<size_t>(end - buffer)); - for (char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p)); - return true; - } - - bool WriteString(const Ch* str, SizeType length) { - static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; - static const char escape[256] = { -#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 - //0 1 2 3 4 5 6 7 8 9 A B C D E F - 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00 - 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10 - 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20 - Z16, Z16, // 30~4F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50 - Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF -#undef Z16 - }; - - if (TargetEncoding::supportUnicode) - PutReserve(*os_, 2 + length * 6); // "\uxxxx..." - else - PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..." - - PutUnsafe(*os_, '\"'); - GenericStringStream<SourceEncoding> is(str); - while (ScanWriteUnescapedString(is, length)) { - const Ch c = is.Peek(); - if (!TargetEncoding::supportUnicode && static_cast<unsigned>(c) >= 0x80) { - // Unicode escaping - unsigned codepoint; - if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint))) - return false; - PutUnsafe(*os_, '\\'); - PutUnsafe(*os_, 'u'); - if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) { - PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]); - PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]); - PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]); - PutUnsafe(*os_, hexDigits[(codepoint ) & 15]); - } - else { - RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF); - // Surrogate pair - unsigned s = codepoint - 0x010000; - unsigned lead = (s >> 10) + 0xD800; - unsigned trail = (s & 0x3FF) + 0xDC00; - PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]); - PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]); - PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]); - PutUnsafe(*os_, hexDigits[(lead ) & 15]); - PutUnsafe(*os_, '\\'); - PutUnsafe(*os_, 'u'); - PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]); - PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]); - PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]); - PutUnsafe(*os_, hexDigits[(trail ) & 15]); - } - } - else if ((sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast<unsigned char>(c)])) { - is.Take(); - PutUnsafe(*os_, '\\'); - PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(escape[static_cast<unsigned char>(c)])); - if (escape[static_cast<unsigned char>(c)] == 'u') { - PutUnsafe(*os_, '0'); - PutUnsafe(*os_, '0'); - PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) >> 4]); - PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) & 0xF]); - } - } - else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? - Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) : - Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_)))) - return false; - } - PutUnsafe(*os_, '\"'); - return true; - } - - bool ScanWriteUnescapedString(GenericStringStream<SourceEncoding>& is, size_t length) { - return RAPIDJSON_LIKELY(is.Tell() < length); - } - - bool WriteStartObject() { os_->Put('{'); return true; } - bool WriteEndObject() { os_->Put('}'); return true; } - bool WriteStartArray() { os_->Put('['); return true; } - bool WriteEndArray() { os_->Put(']'); return true; } - - bool WriteRawValue(const Ch* json, size_t length) { - PutReserve(*os_, length); - for (size_t i = 0; i < length; i++) { - RAPIDJSON_ASSERT(json[i] != '\0'); - PutUnsafe(*os_, json[i]); - } - return true; - } - - void Prefix(Type type) { - (void)type; - if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root - Level* level = level_stack_.template Top<Level>(); - if (level->valueCount > 0) { - if (level->inArray) - os_->Put(','); // add comma if it is not the first element in array - else // in object - os_->Put((level->valueCount % 2 == 0) ? ',' : ':'); - } - if (!level->inArray && level->valueCount % 2 == 0) - RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name - level->valueCount++; - } - else { - RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root. - hasRoot_ = true; - } - } - - OutputStream* os_; - internal::Stack<StackAllocator> level_stack_; - int maxDecimalPlaces_; - bool hasRoot_; - -private: - // Prohibit copy constructor & assignment operator. - Writer(const Writer&); - Writer& operator=(const Writer&); -}; - -// Full specialization for StringStream to prevent memory copying - -template<> -inline bool Writer<StringBuffer>::WriteInt(int i) { - char *buffer = os_->Push(11); - const char* end = internal::i32toa(i, buffer); - os_->Pop(static_cast<size_t>(11 - (end - buffer))); - return true; -} - -template<> -inline bool Writer<StringBuffer>::WriteUint(unsigned u) { - char *buffer = os_->Push(10); - const char* end = internal::u32toa(u, buffer); - os_->Pop(static_cast<size_t>(10 - (end - buffer))); - return true; -} - -template<> -inline bool Writer<StringBuffer>::WriteInt64(int64_t i64) { - char *buffer = os_->Push(21); - const char* end = internal::i64toa(i64, buffer); - os_->Pop(static_cast<size_t>(21 - (end - buffer))); - return true; -} - -template<> -inline bool Writer<StringBuffer>::WriteUint64(uint64_t u) { - char *buffer = os_->Push(20); - const char* end = internal::u64toa(u, buffer); - os_->Pop(static_cast<size_t>(20 - (end - buffer))); - return true; -} - -template<> -inline bool Writer<StringBuffer>::WriteDouble(double d) { - if (internal::Double(d).IsNanOrInf()) { - // Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag). - if (!(kWriteDefaultFlags & kWriteNanAndInfFlag)) - return false; - if (internal::Double(d).IsNan()) { - PutReserve(*os_, 3); - PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); - return true; - } - if (internal::Double(d).Sign()) { - PutReserve(*os_, 9); - PutUnsafe(*os_, '-'); - } - else - PutReserve(*os_, 8); - PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); - PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); - return true; - } - - char *buffer = os_->Push(25); - char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); - os_->Pop(static_cast<size_t>(25 - (end - buffer))); - return true; -} - -#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) -template<> -inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, size_t length) { - if (length < 16) - return RAPIDJSON_LIKELY(is.Tell() < length); - - if (!RAPIDJSON_LIKELY(is.Tell() < length)) - return false; - - const char* p = is.src_; - const char* end = is.head_ + length; - const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15)); - const char* endAligned = reinterpret_cast<const char*>(reinterpret_cast<size_t>(end) & static_cast<size_t>(~15)); - if (nextAligned > end) - return true; - - while (p != nextAligned) - if (*p < 0x20 || *p == '\"' || *p == '\\') { - is.src_ = p; - return RAPIDJSON_LIKELY(is.Tell() < length); - } - else - os_->PutUnsafe(*p++); - - // The rest of string using SIMD - static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; - static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; - const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0])); - const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0])); - const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0])); - - for (; p != endAligned; p += 16) { - const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p)); - const __m128i t1 = _mm_cmpeq_epi8(s, dq); - const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 - const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); - unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x)); - if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped - SizeType len; -#ifdef _MSC_VER // Find the index of first escaped - unsigned long offset; - _BitScanForward(&offset, r); - len = offset; -#else - len = static_cast<SizeType>(__builtin_ffs(r) - 1); -#endif - char* q = reinterpret_cast<char*>(os_->PushUnsafe(len)); - for (size_t i = 0; i < len; i++) - q[i] = p[i]; - - p += len; - break; - } - _mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s); - } - - is.src_ = p; - return RAPIDJSON_LIKELY(is.Tell() < length); -} -#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) - -RAPIDJSON_NAMESPACE_END - -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/ext/librethinkdbxx/src/term.cc b/ext/librethinkdbxx/src/term.cc deleted file mode 100644 index 711ef27d..00000000 --- a/ext/librethinkdbxx/src/term.cc +++ /dev/null @@ -1,285 +0,0 @@ -#include <cstdlib> -#include <set> - -#include "term.h" -#include "json_p.h" - -namespace RethinkDB { - -using TT = Protocol::Term::TermType; - -struct { - Datum operator() (const Array& array) { - Array copy; - copy.reserve(array.size()); - for (const auto& it : array) { - copy.emplace_back(it.apply<Datum>(*this)); - } - return Datum(Array{TT::MAKE_ARRAY, std::move(copy)}); - } - Datum operator() (const Object& object) { - Object copy; - for (const auto& it : object) { - copy.emplace(it.first, it.second.apply<Datum>(*this)); - } - return std::move(copy); - } - template<class T> - Datum operator() (T&& atomic) { - return Datum(std::forward<T>(atomic)); - } -} datum_to_term; - -Term::Term(Datum&& datum_) : datum(datum_.apply<Datum>(datum_to_term)) { } -Term::Term(const Datum& datum_) : datum(datum_.apply<Datum>(datum_to_term)) { } - -Term::Term(Term&& orig, OptArgs&& new_optargs) : datum(Nil()) { - Datum* cur = orig.datum.get_nth(2); - Object optargs; - free_vars = std::move(orig.free_vars); - if (cur) { - optargs = std::move(cur->extract_object()); - } - for (auto& it : new_optargs) { - optargs.emplace(std::move(it.first), alpha_rename(std::move(it.second))); - } - datum = Array{ std::move(orig.datum.extract_nth(0)), std::move(orig.datum.extract_nth(1)), std::move(optargs) }; -} - -Term nil() { - return Term(Nil()); -} - -Cursor Term::run(Connection& conn, OptArgs&& opts) { - if (!free_vars.empty()) { - throw Error("run: term has free variables"); - } - - return conn.start_query(this, std::move(opts)); -} - -struct { - Datum operator() (Object&& object, const std::map<int, int>& subst, bool) { - Object ret; - for (auto& it : object) { - ret.emplace(std::move(it.first), std::move(it.second).apply<Datum>(*this, subst, false)); - } - return ret; - } - Datum operator() (Array&& array, const std::map<int, int>& subst, bool args) { - if (!args) { - double cmd = array[0].extract_number(); - if (cmd == static_cast<int>(TT::VAR)) { - double var = array[1].extract_nth(0).extract_number(); - auto it = subst.find(static_cast<int>(var)); - if (it != subst.end()) { - return Array{ TT::VAR, { it->second }}; - } - } - if (array.size() == 2) { - return Array{ std::move(array[0]), std::move(array[1]).apply<Datum>(*this, subst, true) }; - } else { - return Array{ - std::move(array[0]), - std::move(array[1]).apply<Datum>(*this, subst, true), - std::move(array[2]).apply<Datum>(*this, subst, false) }; - } - } else { - Array ret; - for (auto& it : array) { - ret.emplace_back(std::move(it).apply<Datum>(*this, subst, false)); - } - return ret; - } - } - template <class T> - Datum operator() (T&& a, const std::map<int, int>&, bool) { - return std::move(a); - } -} alpha_renamer; - -static int new_var_id(const std::map<int, int*>& vars) { - while (true) { - int id = gen_var_id(); - if (vars.find(id) == vars.end()) { - return id; - } - } -} - -Datum Term::alpha_rename(Term&& term) { - if (free_vars.empty()) { - free_vars = std::move(term.free_vars); - return std::move(term.datum); - } - - std::map<int, int> subst; - for (auto it = term.free_vars.begin(); it != term.free_vars.end(); ++it) { - auto var = free_vars.find(it->first); - if (var == free_vars.end()) { - free_vars.emplace(it->first, it->second); - } else if (var->second != it->second) { - int id = new_var_id(free_vars); - subst.emplace(it->first, id); - free_vars.emplace(id, it->second); - } - } - if (subst.empty()) { - return std::move(term.datum); - } else { - return term.datum.apply<Datum>(alpha_renamer, subst, false); - } -} - -int gen_var_id() { - return ::random() % (1<<30); -} - -C0_IMPL(db_list, DB_LIST) -C0_IMPL(table_list, TABLE_LIST) -C0_IMPL(random, RANDOM) -C0_IMPL(now, NOW) -C0_IMPL(range, RANGE) -C0_IMPL(error, ERROR) -C0_IMPL(uuid, UUID) -C0_IMPL(literal, LITERAL) -CO0_IMPL(wait, WAIT) -C0_IMPL(rebalance, REBALANCE) -CO0_IMPL(random, RANDOM) - -Term row(TT::IMPLICIT_VAR, {}); -Term minval(TT::MINVAL, {}); -Term maxval(TT::MAXVAL, {}); - -Term binary(const std::string& data) { - return expr(Binary(data)); -} - -Term binary(std::string&& data) { - return expr(Binary(data)); -} - -Term binary(const char* data) { - return expr(Binary(data)); -} - -struct { - bool operator() (const Object& object) { - for (const auto& it : object) { - if (it.second.apply<bool>(*this)) { - return true; - } - } - return false; - } - bool operator() (const Array& array) { - int type = *array[0].get_number(); - if (type == static_cast<int>(TT::IMPLICIT_VAR)) { - return true; - } - if (type == static_cast<int>(TT::FUNC)) { - return false; - } - for (const auto& it : *array[1].get_array()) { - if (it.apply<bool>(*this)) { - return true; - } - } - if (array.size() == 3) { - return array[2].apply<bool>(*this); - } else { - return false; - } - } - template <class T> - bool operator() (T) { - return false; - } -} needs_func_wrap; - -Term Term::func_wrap(Term&& term) { - if (term.datum.apply<bool>(needs_func_wrap)) { - return Term(TT::FUNC, {expr(Array{new_var_id(term.free_vars)}), std::move(term)}); - } - return term; -} - -Term Term::func_wrap(const Term& term) { - if (term.datum.apply<bool>(needs_func_wrap)) { - // TODO return Term(TT::FUNC, {expr(Array{new_var_id(Term.free_vars)}), Term.copy()}); - return Term(Nil()); - } - return term; -} - -Term Term::make_object(std::vector<Term>&& args) { - if (args.size() % 2 != 0) { - return Term(TT::OBJECT, std::move(args)); - } - std::set<std::string> keys; - for (auto it = args.begin(); it != args.end() && it + 1 != args.end(); it += 2) { - std::string* key = it->datum.get_string(); - if (!key || keys.count(*key)) { - return Term(TT::OBJECT, std::move(args)); - } - keys.insert(*key); - } - Term ret{Nil()}; - Object object; - for (auto it = args.begin(); it != args.end(); it += 2) { - std::string* key = it->datum.get_string(); - object.emplace(std::move(*key), ret.alpha_rename(std::move(*(it + 1)))); - } - ret.datum = std::move(object); - return ret; -} - -Term Term::make_binary(Term&& term) { - std::string* string = term.datum.get_string(); - if (string) { - return expr(Binary(std::move(*string))); - } - return Term(TT::BINARY, std::vector<Term>{term}); -} - -Term::Term(OptArgs&& optargs) : datum(Nil()) { - Object oargs; - for (auto& it : optargs) { - oargs.emplace(it.first, alpha_rename(std::move(it.second))); - } - datum = std::move(oargs); -} - -OptArgs optargs() { - return OptArgs{}; -} - -Term january(TT::JANUARY, {}); -Term february(TT::FEBRUARY, {}); -Term march(TT::MARCH, {}); -Term april(TT::APRIL, {}); -Term may(TT::MAY, {}); -Term june(TT::JUNE, {}); -Term july(TT::JULY, {}); -Term august(TT::AUGUST, {}); -Term september(TT::SEPTEMBER, {}); -Term october(TT::OCTOBER, {}); -Term november(TT::NOVEMBER, {}); -Term december(TT::DECEMBER, {}); -Term monday(TT::MONDAY, {}); -Term tuesday(TT::TUESDAY, {}); -Term wednesday(TT::WEDNESDAY, {}); -Term thursday(TT::THURSDAY, {}); -Term friday(TT::FRIDAY, {}); -Term saturday(TT::SATURDAY, {}); -Term sunday(TT::SUNDAY, {}); - -Term Term::copy() const { - return *this; -} - -Datum Term::get_datum() const { - return datum; -} - -} diff --git a/ext/librethinkdbxx/src/term.h b/ext/librethinkdbxx/src/term.h deleted file mode 100644 index cdee5ec4..00000000 --- a/ext/librethinkdbxx/src/term.h +++ /dev/null @@ -1,592 +0,0 @@ -#pragma once - -#include "datum.h" -#include "connection.h" -#include "protocol_defs.h" -#include "cursor.h" - -namespace RethinkDB { - -using TT = Protocol::Term::TermType; - -class Term; -class Var; - -// An alias for the Term constructor -template <class T> -Term expr(T&&); - -int gen_var_id(); - -// Can be used as the last argument to some ReQL commands that expect named arguments -using OptArgs = std::map<std::string, Term>; - -// Represents a ReQL Term (RethinkDB Query Language) -// Designed to be used with r-value *this -class Term { -public: - Term(const Term& other) = default; - Term(Term&& other) = default; - Term& operator= (const Term& other) = default; - Term& operator= (Term&& other) = default; - - explicit Term(Datum&&); - explicit Term(const Datum&); - explicit Term(OptArgs&&); - - // Create a copy of the Term - Term copy() const; - - Term(std::function<Term()> f) : datum(Nil()) { set_function<std::function<Term()>>(f); } - Term(std::function<Term(Var)> f) : datum(Nil()) { set_function<std::function<Term(Var)>, 0>(f); } - Term(std::function<Term(Var, Var)> f) : datum(Nil()) { set_function<std::function<Term(Var, Var)>, 0, 1>(f); } - Term(std::function<Term(Var, Var, Var)> f) : datum(Nil()) { set_function<std::function<Term(Var, Var, Var)>, 0, 1, 2>(f); } - Term(Protocol::Term::TermType type, std::vector<Term>&& args) : datum(Array()) { - Array dargs; - for (auto& it : args) { - dargs.emplace_back(alpha_rename(std::move(it))); - } - datum = Datum(Array{ type, std::move(dargs) }); - } - - Term(Protocol::Term::TermType type, std::vector<Term>&& args, OptArgs&& optargs) : datum(Array()) { - Array dargs; - for (auto& it : args) { - dargs.emplace_back(alpha_rename(std::move(it))); - } - Object oargs; - for (auto& it : optargs) { - oargs.emplace(it.first, alpha_rename(std::move(it.second))); - } - datum = Array{ type, std::move(dargs), std::move(oargs) }; - } - - // Used internally to support row - static Term func_wrap(Term&&); - static Term func_wrap(const Term&); - - - // These macros are used to define most ReQL commands - // * Cn represents a method with n arguments - // * COn represents a method with n arguments and optional named arguments - // * C_ represents a method with any number of arguments - // Each method is implemented twice, once with r-value *this, and once with const *this - // The third argument, wrap, allows converting arguments into functions if they contain row - -#define C0(name, type) \ - Term name() && { return Term(TT::type, std::vector<Term>{ std::move(*this) }); } \ - Term name() const & { return Term(TT::type, std::vector<Term>{ *this }); } -#define C1(name, type, wrap) \ - template <class T> \ - Term name(T&& a) && { return Term(TT::type, std::vector<Term>{ std::move(*this), wrap(expr(std::forward<T>(a))) }); } \ - template <class T> \ - Term name(T&& a) const & { return Term(TT::type, std::vector<Term>{ *this, wrap(expr(std::forward<T>(a))) }); } -#define C2(name, type) \ - template <class T, class U> Term name(T&& a, U&& b) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); } \ - template <class T, class U> Term name(T&& a, U&& b) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); } -#define C_(name, type, wrap) \ - template <class ...T> Term name(T&& ...a) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - wrap(expr(std::forward<T>(a)))... }); } \ - template <class ...T> Term name(T&& ...a) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - wrap(expr(std::forward<T>(a)))... }); } -#define CO0(name, type) \ - Term name(OptArgs&& optarg = {}) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this) }, std::move(optarg)); } \ - Term name(OptArgs&& optarg = {}) const & { \ - return Term(TT::type, std::vector<Term>{ *this }, std::move(optarg)); } -#define CO1(name, type, wrap) \ - template <class T> Term name(T&& a, OptArgs&& optarg = {}) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - wrap(expr(std::forward<T>(a))) }, std::move(optarg)); } \ - template <class T> Term name(T&& a, OptArgs&& optarg = {}) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - wrap(expr(std::forward<T>(a))) }, std::move(optarg)); } -#define CO2(name, type, wrap) \ - template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))) }, std::move(optarg)); } \ - template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))) }, std::move(optarg)); } -#define CO3(name, type, wrap) \ - template <class T, class U, class V> Term name(T&& a, U&& b, V&& c, OptArgs&& optarg = {}) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \ - wrap(expr(std::forward<V>(c))) }, std::move(optarg)); } \ - template <class T, class U, class V> Term name(T&& a, U&& b, V&& c, OptArgs&& optarg = {}) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \ - wrap(expr(std::forward<V>(c)))}, std::move(optarg)); } -#define CO4(name, type, wrap) \ - template <class T, class U, class V, class W> Term name(T&& a, U&& b, V&& c, W&& d, OptArgs&& optarg = {}) && { \ - return Term(TT::type, std::vector<Term>{ std::move(*this), \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \ - wrap(expr(std::forward<V>(c))), wrap(expr(std::forward<W>(d))) }, std::move(optarg)); } \ - template <class T, class U, class V, class W> Term name(T&& a, U&& b, V&& c, W&& d, OptArgs&& optarg = {}) const & { \ - return Term(TT::type, std::vector<Term>{ *this, \ - wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \ - wrap(expr(std::forward<V>(c))), wrap(expr(std::forward<W>(d))) }, std::move(optarg)); } -#define CO_(name, type, wrap) \ - C_(name, type, wrap) \ - CO0(name, type) \ - CO1(name, type, wrap) \ - CO2(name, type, wrap) \ - CO3(name, type, wrap) \ - CO4(name, type, wrap) -#define no_wrap(x) x - - CO1(table_create, TABLE_CREATE, no_wrap) - C1(table_drop, TABLE_DROP, no_wrap) - C0(table_list, TABLE_LIST) - CO1(index_create, INDEX_CREATE, no_wrap) - CO2(index_create, INDEX_CREATE, func_wrap) - C1(index_drop, INDEX_DROP, no_wrap) - C0(index_list, INDEX_LIST) - CO2(index_rename, INDEX_RENAME, no_wrap) - C_(index_status, INDEX_STATUS, no_wrap) - C_(index_wait, INDEX_WAIT, no_wrap) - CO0(changes, CHANGES) - CO1(insert, INSERT, no_wrap) - CO1(update, UPDATE, func_wrap) - CO1(replace, REPLACE, func_wrap) - CO0(delete_, DELETE) - C0(sync, SYNC) - CO1(table, TABLE, no_wrap) - C1(get, GET, no_wrap) - CO_(get_all, GET_ALL, no_wrap) - CO2(between, BETWEEN, no_wrap) - CO1(filter, FILTER, func_wrap) - C2(inner_join, INNER_JOIN) - C2(outer_join, OUTER_JOIN) - CO2(eq_join, EQ_JOIN, func_wrap) - C0(zip, ZIP) - C_(map, MAP, func_wrap) - C_(with_fields, WITH_FIELDS, no_wrap) - C1(concat_map, CONCAT_MAP, func_wrap) - CO_(order_by, ORDER_BY, func_wrap) - C1(skip, SKIP, no_wrap) - C1(limit, LIMIT, no_wrap) - CO1(slice, SLICE, no_wrap) - CO2(slice, SLICE, no_wrap) - C1(nth, NTH, no_wrap) - C1(offsets_of, OFFSETS_OF, func_wrap) - C0(is_empty, IS_EMPTY) - CO_(union_, UNION, no_wrap) - C1(sample, SAMPLE, no_wrap) - CO_(group, GROUP, func_wrap) - C0(ungroup, UNGROUP) - C1(reduce, REDUCE, no_wrap) - CO2(fold, FOLD, no_wrap) - C0(count, COUNT) - C1(count, COUNT, func_wrap) - C0(sum, SUM) - C1(sum, SUM, func_wrap) - C0(avg, AVG) - C1(avg, AVG, func_wrap) - C1(min, MIN, func_wrap) - CO0(min, MIN) - C1(max, MAX, func_wrap) - CO0(max, MAX) - CO0(distinct, DISTINCT) - C_(contains, CONTAINS, func_wrap) - C_(pluck, PLUCK, no_wrap) - C_(without, WITHOUT, no_wrap) - C_(merge, MERGE, func_wrap) - C1(append, APPEND, no_wrap) - C1(prepend, PREPEND, no_wrap) - C1(difference, DIFFERENCE, no_wrap) - C1(set_insert, SET_INSERT, no_wrap) - C1(set_union, SET_UNION, no_wrap) - C1(set_intersection, SET_INTERSECTION, no_wrap) - C1(set_difference, SET_DIFFERENCE, no_wrap) - C1(operator[], BRACKET, no_wrap) - C1(get_field, GET_FIELD, no_wrap) - C_(has_fields, HAS_FIELDS, no_wrap) - C2(insert_at, INSERT_AT) - C2(splice_at, SPLICE_AT) - C1(delete_at, DELETE_AT, no_wrap) - C2(delete_at, DELETE_AT) - C2(change_at, CHANGE_AT) - C0(keys, KEYS) - C1(match, MATCH, no_wrap) - C0(split, SPLIT) - C1(split, SPLIT, no_wrap) - C2(split, SPLIT) - C0(upcase, UPCASE) - C0(downcase, DOWNCASE) - C_(add, ADD, no_wrap) - C1(operator+, ADD, no_wrap) - C_(sub, SUB, no_wrap) - C1(operator-, SUB, no_wrap) - C_(mul, MUL, no_wrap) - C1(operator*, MUL, no_wrap) - C_(div, DIV, no_wrap) - C1(operator/, DIV, no_wrap) - C1(mod, MOD, no_wrap) - C1(operator%, MOD, no_wrap) - C_(and_, AND, no_wrap) - C1(operator&&, AND, no_wrap) - C_(or_, OR, no_wrap) - C1(operator||, OR, no_wrap) - C1(eq, EQ, no_wrap) - C1(operator==, EQ, no_wrap) - C1(ne, NE, no_wrap) - C1(operator!=, NE, no_wrap) - C1(gt, GT, no_wrap) - C1(operator>, GT, no_wrap) - C1(ge, GE, no_wrap) - C1(operator>=, GE, no_wrap) - C1(lt, LT, no_wrap) - C1(operator<, LT, no_wrap) - C1(le, LE, no_wrap) - C1(operator<=, LE, no_wrap) - C0(not_, NOT) - C0(operator!, NOT) - C1(in_timezone, IN_TIMEZONE, no_wrap) - C0(timezone, TIMEZONE) - CO2(during, DURING, no_wrap) - C0(date, DATE) - C0(time_of_day, TIME_OF_DAY) - C0(year, YEAR) - C0(month, MONTH) - C0(day, DAY) - C0(day_of_week, DAY_OF_WEEK) - C0(day_of_year, DAY_OF_YEAR) - C0(hours, HOURS) - C0(minutes, MINUTES) - C0(seconds, SECONDS) - C0(to_iso8601, TO_ISO8601) - C0(to_epoch_time, TO_EPOCH_TIME) - C1(for_each, FOR_EACH, func_wrap) - C1(default_, DEFAULT, no_wrap) - CO1(js, JAVASCRIPT, no_wrap) - C1(coerce_to, COERCE_TO, no_wrap) - C0(type_of, TYPE_OF) - C0(info, INFO) - C0(to_json, TO_JSON_STRING) - C0(to_json_string, TO_JSON_STRING) - C1(distance, DISTANCE, no_wrap) - C0(fill, FILL) - C0(to_geojson, TO_GEOJSON) - CO1(get_intersecting, GET_INTERSECTING, no_wrap) - CO1(get_nearest, GET_NEAREST, no_wrap) - C1(includes, INCLUDES, no_wrap) - C1(intersects, INTERSECTS, no_wrap) - C1(polygon_sub, POLYGON_SUB, no_wrap) - C0(config, CONFIG) - C0(rebalance, REBALANCE) - CO0(reconfigure, RECONFIGURE) - C0(status, STATUS) - CO0(wait, WAIT) - C0(floor, FLOOR) - C0(ceil, CEIL) - C0(round, ROUND) - C0(values, VALUES) - - // The expansion of this macro fails to compile on some versions of GCC and Clang: - // C_(operator(), FUNCALL, no_wrap) - // The std::enable_if makes the error go away - - // $doc(do) - - template <class T, class ...U> - typename std::enable_if<!std::is_same<T, Var>::value, Term>::type - operator() (T&& a, U&& ...b) && { - return Term(TT::FUNCALL, std::vector<Term>{ - std::move(*this), - expr(std::forward<T>(a)), - expr(std::forward<U>(b))... }); - } - template <class T, class ...U> - typename std::enable_if<!std::is_same<T, Var>::value, Term>::type - operator() (T&& a, U&& ...b) const & { - return Term(TT::FUNCALL, std::vector<Term>{ - *this, - expr(std::forward<T>(a)), - expr(std::forward<U>(b))... }); - } - -#undef C0 -#undef C1 -#undef C2 -#undef C_ -#undef CO0 -#undef CO1 -#undef CO2 - - // Send the term to the server and return the results. - // Errors returned by the server are thrown. - Cursor run(Connection&, OptArgs&& args = {}); - - // $doc(do) - template <class ...T> - Term do_(T&& ...a) && { - auto list = { std::move(*this), Term::func_wrap(expr(std::forward<T>(a)))... }; - std::vector<Term> args; - args.reserve(list.size() + 1); - args.emplace_back(func_wrap(std::move(*(list.end()-1)))); - for (auto it = list.begin(); it + 1 != list.end(); ++it) { - args.emplace_back(std::move(*it)); - } - return Term(TT::FUNCALL, std::move(args)); - } - - // Adds optargs to an already built term - Term opt(OptArgs&& optargs) && { - return Term(std::move(*this), std::move(optargs)); - } - - // Used internally to implement object() - static Term make_object(std::vector<Term>&&); - - // Used internally to implement array() - static Term make_binary(Term&&); - - Datum get_datum() const; - -private: - friend class Var; - friend class Connection; - friend struct Query; - - template <int _> - Var mkvar(std::vector<int>& vars); - - template <class F, int ...N> - void set_function(F); - - Datum alpha_rename(Term&&); - - Term(Term&& orig, OptArgs&& optargs); - - std::map<int, int*> free_vars; - Datum datum; -}; - -// A term representing null -Term nil(); - -template <class T> -Term expr(T&& a) { - return Term(std::forward<T>(a)); -} - -// Represents a ReQL variable. -// This type is passed to functions used in ReQL queries. -class Var { -public: - // Convert to a term - Term operator*() const { - Term term(TT::VAR, std::vector<Term>{expr(*id)}); - term.free_vars = {{*id, id}}; - return term; - } - - Var(int* id_) : id(id_) { } -private: - int* id; -}; - -template <int N> -Var Term::mkvar(std::vector<int>& vars) { - int id = gen_var_id(); - vars.push_back(id); - return Var(&*vars.rbegin()); -} - -template <class F, int ...N> -void Term::set_function(F f) { - std::vector<int> vars; - vars.reserve(sizeof...(N)); - std::vector<Var> args = { mkvar<N>(vars)... }; - Term body = f(args[N] ...); - - int* low = &*vars.begin(); - int* high = &*(vars.end() - 1); - for (auto it = body.free_vars.begin(); it != body.free_vars.end(); ) { - if (it->second >= low && it->second <= high) { - if (it->first != *it->second) { - throw Error("Internal error: variable index mis-match"); - } - ++it; - } else { - free_vars.emplace(*it); - ++it; - } - } - datum = Array{TT::FUNC, Array{Array{TT::MAKE_ARRAY, vars}, body.datum}}; -} - -// These macros are similar to those defined above, but for top-level ReQL operations - -#define C0(name) Term name(); -#define C0_IMPL(name, type) Term name() { return Term(TT::type, std::vector<Term>{}); } -#define CO0(name) Term name(OptArgs&& optargs = {}); -#define CO0_IMPL(name, type) Term name(OptArgs&& optargs) { return Term(TT::type, std::vector<Term>{}, std::move(optargs)); } -#define C1(name, type, wrap) template <class T> Term name(T&& a) { \ - return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a))) }); } -#define C2(name, type) template <class T, class U> Term name(T&& a, U&& b) { \ - return Term(TT::type, std::vector<Term>{ expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); } -#define C3(name, type) template <class A, class B, class C> \ - Term name(A&& a, B&& b, C&& c) { return Term(TT::type, std::vector<Term>{ \ - expr(std::forward<A>(a)), expr(std::forward<B>(b)), expr(std::forward<C>(c)) }); } -#define C4(name, type) template <class A, class B, class C, class D> \ - Term name(A&& a, B&& b, C&& c, D&& d) { return Term(TT::type, std::vector<Term>{ \ - expr(std::forward<A>(a)), expr(std::forward<B>(b)), \ - expr(std::forward<C>(c)), expr(std::forward<D>(d))}); } -#define C7(name, type) template <class A, class B, class C, class D, class E, class F, class G> \ - Term name(A&& a, B&& b, C&& c, D&& d, E&& e, F&& f, G&& g) { return Term(TT::type, std::vector<Term>{ \ - expr(std::forward<A>(a)), expr(std::forward<B>(b)), expr(std::forward<C>(c)), \ - expr(std::forward<D>(d)), expr(std::forward<E>(e)), expr(std::forward<F>(f)), \ - expr(std::forward<G>(g))}); } -#define C_(name, type, wrap) template <class ...T> Term name(T&& ...a) { \ - return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a)))... }); } -#define CO1(name, type, wrap) template <class T> Term name(T&& a, OptArgs&& optarg = {}) { \ - return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a)))}, std::move(optarg)); } -#define CO2(name, type) template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) { \ - return Term(TT::type, std::vector<Term>{ expr(std::forward<T>(a)), expr(std::forward<U>(b))}, std::move(optarg)); } -#define func_wrap Term::func_wrap - -C1(db_create, DB_CREATE, no_wrap) -C1(db_drop, DB_DROP, no_wrap) -C0(db_list) -CO1(table_create, TABLE_CREATE, no_wrap) -C1(table_drop, TABLE_DROP, no_wrap) -C0(table_list) -C1(db, DB, no_wrap) -CO1(table, TABLE, no_wrap) -C_(add, ADD, no_wrap) -C2(sub, SUB) -C_(mul, MUL, no_wrap) -C_(div, DIV, no_wrap) -C2(mod, MOD) -C_(and_, AND, no_wrap) -C_(or_, OR, no_wrap) -C2(eq, EQ) -C2(ne, NE) -C2(gt, GT) -C2(ge, GE) -C2(lt, LT) -C2(le, LE) -C1(not_, NOT, no_wrap) -CO0(random) -CO1(random, RANDOM, no_wrap) -CO2(random, RANDOM) -C0(now) -C4(time, TIME) -C7(time, TIME) -C1(epoch_time, EPOCH_TIME, no_wrap) -CO1(iso8601, ISO8601, no_wrap) -CO1(js, JAVASCRIPT, no_wrap) -C1(args, ARGS, no_wrap) -C_(branch, BRANCH, no_wrap) -C0(range) -C1(range, RANGE, no_wrap) -C2(range, RANGE) -C0(error) -C1(error, ERROR, no_wrap) -C1(json, JSON, no_wrap) -CO1(http, HTTP, func_wrap) -C0(uuid) -C1(uuid, UUID, no_wrap) -CO2(circle, CIRCLE) -C1(geojson, GEOJSON, no_wrap) -C_(line, LINE, no_wrap) -C2(point, POINT) -C_(polygon, POLYGON, no_wrap) -C_(array, MAKE_ARRAY, no_wrap) -C1(desc, DESC, func_wrap) -C1(asc, ASC, func_wrap) -C0(literal) -C1(literal, LITERAL, no_wrap) -C1(type_of, TYPE_OF, no_wrap) -C_(map, MAP, func_wrap) -C1(floor, FLOOR, no_wrap) -C1(ceil, CEIL, no_wrap) -C1(round, ROUND, no_wrap) -C_(union_, UNION, no_wrap) -C_(group, GROUP, func_wrap) -C1(count, COUNT, no_wrap) -C_(count, COUNT, func_wrap) -C1(sum, SUM, no_wrap) -C_(sum, SUM, func_wrap) -C1(avg, AVG, no_wrap) -C_(avg, AVG, func_wrap) -C1(min, MIN, no_wrap) -C_(min, MIN, func_wrap) -C1(max, MAX, no_wrap) -C_(max, MAX, func_wrap) -C1(distinct, DISTINCT, no_wrap) -C1(contains, CONTAINS, no_wrap) -C_(contains, CONTAINS, func_wrap) - -#undef C0 -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C7 -#undef C_ -#undef CO1 -#undef CO2 -#undef func_wrap - -// $doc(do) -template <class R, class ...T> -Term do_(R&& a, T&& ...b) { - return expr(std::forward<R>(a)).do_(std::forward<T>(b)...); -} - -// $doc(object) -template <class ...T> -Term object(T&& ...a) { - return Term::make_object(std::vector<Term>{ expr(std::forward<T>(a))... }); -} - -// $doc(binary) -template <class T> -Term binary(T&& a) { - return Term::make_binary(expr(std::forward<T>(a))); -} - -// Construct an empty optarg -OptArgs optargs(); - -// Construct an optarg made out of pairs of arguments -// For example: optargs("k1", v1, "k2", v2) -template <class V, class ...T> -OptArgs optargs(const char* key, V&& val, T&& ...rest) { - OptArgs opts = optargs(rest...); - opts.emplace(key, expr(std::forward<V>(val))); - return opts; -} - -extern Term row; -extern Term maxval; -extern Term minval; -extern Term january; -extern Term february; -extern Term march; -extern Term april; -extern Term may; -extern Term june; -extern Term july; -extern Term august; -extern Term september; -extern Term october; -extern Term november; -extern Term december; -extern Term monday; -extern Term tuesday; -extern Term wednesday; -extern Term thursday; -extern Term friday; -extern Term saturday; -extern Term sunday; -} diff --git a/ext/librethinkdbxx/src/types.cc b/ext/librethinkdbxx/src/types.cc deleted file mode 100644 index ea9becaf..00000000 --- a/ext/librethinkdbxx/src/types.cc +++ /dev/null @@ -1,47 +0,0 @@ -#include <cstdlib> - -#include "types.h" -#include "error.h" - -namespace RethinkDB { - -bool Time::parse_utc_offset(const std::string& string, double* offset) { - const char *s = string.c_str(); - double sign = 1; - switch (s[0]) { - case '-': - sign = -1; - case '+': - ++s; - break; - case 0: - return false; - } - for (int i = 0; i < 5; ++i) { - if (s[i] == 0) return false; - if (i == 2) continue; - if (s[i] < '0' || s[i] > '9') return false; - } - if (s[2] != ':') return false; - *offset = sign * ((s[0] - '0') * 36000 + (s[1] - '0') * 3600 + (s[3] - '0') * 600 + (s[4] - '0') * 60); - return true; -} - -double Time::parse_utc_offset(const std::string& string) { - double out; - if (!parse_utc_offset(string, &out)) { - throw Error("invalid utc offset `%s'", string.c_str()); - } - return out; -} - -std::string Time::utc_offset_string(double offset) { - char buf[8]; - int hour = offset / 3600; - int minutes = std::abs(static_cast<int>(offset / 60)) % 60; - int n = snprintf(buf, 7, "%+03d:%02d", hour, minutes); - buf[n] = 0; - return std::string(buf); -} - -} diff --git a/ext/librethinkdbxx/src/types.h b/ext/librethinkdbxx/src/types.h deleted file mode 100644 index ac35a871..00000000 --- a/ext/librethinkdbxx/src/types.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include <vector> -#include <map> -#include <ctime> -#include <string> - -namespace RethinkDB { - -class Datum; - -// Represents a null datum -struct Nil { }; - -using Array = std::vector<Datum>; -using Object = std::map<std::string, Datum>; - -// Represents a string of bytes. Plain std::strings are passed on to the server as utf-8 strings -struct Binary { - bool operator== (const Binary& other) const { - return data == other.data; - } - - Binary(const std::string& data_) : data(data_) { } - Binary(std::string&& data_) : data(std::move(data_)) { } - std::string data; -}; - -// Represents a point in time as -// * A floating amount of seconds since the UNIX epoch -// * And a timezone offset represented as seconds relative to UTC -struct Time { - Time(double epoch_time_, double utc_offset_ = 0) : - epoch_time(epoch_time_), utc_offset(utc_offset_) { } - - static Time now() { - return Time(time(NULL)); - } - - static bool parse_utc_offset(const std::string&, double*); - static double parse_utc_offset(const std::string&); - static std::string utc_offset_string(double); - - double epoch_time; - double utc_offset; -}; - -// Not implemented -class Point; -class Line; -class Polygon; - -} diff --git a/ext/librethinkdbxx/src/utils.cc b/ext/librethinkdbxx/src/utils.cc deleted file mode 100644 index 5a2c244d..00000000 --- a/ext/librethinkdbxx/src/utils.cc +++ /dev/null @@ -1,153 +0,0 @@ -#include "utils.h" -#include "error.h" - -namespace RethinkDB { - -size_t utf8_encode(unsigned int code, char* buf) { - if (!(code & ~0x7F)) { - buf[0] = code; - return 1; - } else if (!(code & ~0x7FF)) { - buf[0] = 0xC0 | (code >> 6); - buf[1] = 0x80 | (code & 0x3F); - return 2; - } else if (!(code & ~0xFFFF)) { - buf[0] = 0xE0 | (code >> 12); - buf[1] = 0x80 | ((code >> 6) & 0x3F); - buf[2] = 0x80 | (code & 0x3F); - return 3; - } else if (!(code & ~0x1FFFFF)) { - buf[0] = 0xF0 | (code >> 18); - buf[1] = 0x80 | ((code >> 12) & 0x3F); - buf[2] = 0x80 | ((code >> 6) & 0x3F); - buf[3] = 0x80 | (code & 0x3F); - return 4; - } else if (!(code & ~0x3FFFFFF)) { - buf[0] = 0xF8 | (code >> 24); - buf[1] = 0x80 | ((code >> 18) & 0x3F); - buf[2] = 0x80 | ((code >> 12) & 0x3F); - buf[3] = 0x80 | ((code >> 6) & 0x3F); - buf[4] = 0x80 | (code & 0x3F); - return 5; - } else if (!(code & ~0x7FFFFFFF)) { - buf[0] = 0xFC | (code >> 30); - buf[1] = 0x80 | ((code >> 24) & 0x3F); - buf[2] = 0x80 | ((code >> 18) & 0x3F); - buf[3] = 0x80 | ((code >> 12) & 0x3F); - buf[4] = 0x80 | ((code >> 6) & 0x3F); - buf[5] = 0x80 | (code & 0x3F); - return 6; - } else { - throw Error("Invalid unicode codepoint %ud", code); - } -} - -bool base64_decode(char c, int* out) { - if (c >= 'A' && c <= 'Z') { - *out = c - 'A'; - } else if (c >= 'a' && c <= 'z') { - *out = c - ('a' - 26); - } else if (c >= '0' && c <= '9') { - *out = c - ('0' - 52); - } else if (c == '+') { - *out = 62; - } else if (c == '/') { - *out = 63; - } else { - return false; - } - return true; -} - -bool base64_decode(const std::string& in, std::string& out) { - out.clear(); - out.reserve(in.size() * 3 / 4); - auto read = in.begin(); - while (true) { - int c[4]; - int end = 4; - for (int i = 0; i < 4; i++) { - while (true) { - if (read == in.end()) { - c[i] = 0; - end = i; - i = 3; - break; - } else if (base64_decode(*read, &c[i])) { - ++read; - break; - } else { - ++read; - } - } - } - if (end == 1) return false; - int val = c[0] << 18 | c[1] << 12 | c[2] << 6 | c[3]; - if (end > 1) out.append(1, val >> 16); - if (end > 2) out.append(1, val >> 8 & 0xFF); - if (end > 3) out.append(1, val & 0xFF); - if (end != 4) break; - } - return true; -} - -char base64_encode(unsigned int c) { - if (c < 26) { - return 'A' + c; - } else if (c < 52) { - return 'a' + c - 26; - } else if (c < 62) { - return '0' + c - 52; - } else if (c == 62) { - return '+'; - } else if (c == 63) { - return '/'; - } else { - throw Error("unreachable: base64 encoding %d", c); - } -} - -void base64_encode(unsigned int* c, int n, std::string& out) { - if (n == 0) { - return; - } - out.append(1, base64_encode(c[0] >> 2)); - out.append(1, base64_encode((c[0] & 0x3) << 4 | c[1] >> 4)); - if (n == 1) { - out.append("=="); - return; - } - out.append(1, base64_encode((c[1] & 0xF) << 2 | c[2] >> 6)); - if (n == 2) { - out.append("="); - return; - } - out.append(1, base64_encode(c[2] & 0x3F)); -} - -std::string base64_encode(const std::string& in) { - std::string out; - out.reserve(in.size() * 4 / 3 + in.size() / 48 + 3); - auto read = in.begin(); - while (true) { - for (int group = 0; group < 16; ++group) { - unsigned int c[3]; - int i = 0; - for (; i < 3; ++i) { - if (read == in.end()) { - c[i] = 0; - break; - } else { - c[i] = static_cast<unsigned char>(*read++); - } - } - base64_encode(c, i, out); - if (i != 3) { - return out; - } - } - out.append("\n"); - } -} - -} diff --git a/ext/librethinkdbxx/src/utils.h b/ext/librethinkdbxx/src/utils.h deleted file mode 100644 index 04496e2c..00000000 --- a/ext/librethinkdbxx/src/utils.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include <cstddef> -#include <string> - -namespace RethinkDB { - -// The size of the longest UTF-8 encoded unicode codepoint -const size_t max_utf8_encoded_size = 6; - -// Decode a base64 string. Returns false on failure. -bool base64_decode(const std::string& in, std::string& out); -std::string base64_encode(const std::string&); - -// Encodes a single unicode codepoint into UTF-8. Returns the number of bytes written. -// Does not add a trailing null byte -size_t utf8_encode(unsigned int, char*); - -} diff --git a/ext/miniupnpc/mingw32make.bat b/ext/miniupnpc/mingw32make.bat deleted file mode 100644 index c5d3cc4f..00000000 --- a/ext/miniupnpc/mingw32make.bat +++ /dev/null @@ -1,8 +0,0 @@ -@mingw32-make -f Makefile.mingw %1 -@if errorlevel 1 goto end -@if not exist upnpc-static.exe goto end -@strip upnpc-static.exe -@upx --best upnpc-static.exe -@strip upnpc-shared.exe -@upx --best upnpc-shared.exe -:end diff --git a/ext/miniupnpc/setup.py b/ext/miniupnpc/setup.py deleted file mode 100644 index 97e42bf1..00000000 --- a/ext/miniupnpc/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/python -# vim: tabstop=8 shiftwidth=8 expandtab -# $Id: setup.py,v 1.12 2015/10/26 17:03:17 nanard Exp $ -# the MiniUPnP Project (c) 2007-2014 Thomas Bernard -# http://miniupnp.tuxfamily.org/ or http://miniupnp.free.fr/ -# -# python script to build the miniupnpc module under unix -# -# replace libminiupnpc.a by libminiupnpc.so for shared library usage -try: - from setuptools import setup, Extension -except ImportError: - from distutils.core import setup, Extension -from distutils import sysconfig -sysconfig.get_config_vars()["OPT"] = '' -sysconfig.get_config_vars()["CFLAGS"] = '' -setup(name="miniupnpc", - version=open('VERSION').read().strip(), - author='Thomas BERNARD', - author_email='miniupnp@free.fr', - license=open('LICENSE').read(), - url='http://miniupnp.free.fr/', - description='miniUPnP client', - ext_modules=[ - Extension(name="miniupnpc", sources=["miniupnpcmodule.c"], - extra_objects=["libminiupnpc.a"]) - ]) - diff --git a/ext/miniupnpc/setupmingw32.py b/ext/miniupnpc/setupmingw32.py deleted file mode 100644 index 43dfb465..00000000 --- a/ext/miniupnpc/setupmingw32.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/python -# vim: tabstop=8 shiftwidth=8 expandtab -# $Id: setupmingw32.py,v 1.10 2015/10/26 17:03:17 nanard Exp $ -# the MiniUPnP Project (c) 2007-2014 Thomas Bernard -# http://miniupnp.tuxfamily.org/ or http://miniupnp.free.fr/ -# -# python script to build the miniupnpc module under windows (using mingw32) -# -try: - from setuptools import setup, Extension -except ImportError: - from distutils.core import setup, Extension -from distutils import sysconfig -sysconfig.get_config_vars()["OPT"] = '' -sysconfig.get_config_vars()["CFLAGS"] = '' -setup(name="miniupnpc", - version=open('VERSION').read().strip(), - author='Thomas BERNARD', - author_email='miniupnp@free.fr', - license=open('LICENSE').read(), - url='http://miniupnp.free.fr/', - description='miniUPnP client', - ext_modules=[ - Extension(name="miniupnpc", sources=["miniupnpcmodule.c"], - libraries=["ws2_32", "iphlpapi"], - extra_objects=["libminiupnpc.a"]) - ]) - diff --git a/ext/tap-mac/README.txt b/ext/tap-mac/README.txt deleted file mode 100644 index 177b936f..00000000 --- a/ext/tap-mac/README.txt +++ /dev/null @@ -1,19 +0,0 @@ -This is a hack of tuntaposx. It's here for two reasons: - -1) There seem to be issues with large MTUs in the original tuntap code, - so we set up our zt0 tap with the correct ZeroTier MTU as the default. - -2) Lots of other mac products (VPNs, etc.) ship their own tap device - drivers that like to conflict with one another. This gives us no - choice but to play along. But we call our tap device zt0, which means - it won't conflict with everyone else's tap0. - -3) It's nice to call the device zt0, same as Linux, for consistency across - *nix platforms. Mac does not seem to support interface renaming. - -This will be placed in the ZeroTier home as a kext and is auto-loaded by the -ZeroTier One binary if /dev/zt0 is not found. It can also be auto-updated. - -See this page for the original: - -http://tuntaposx.sourceforge.net diff --git a/ext/tap-mac/tuntap/Makefile b/ext/tap-mac/tuntap/Makefile deleted file mode 100644 index 53ab1a9d..00000000 --- a/ext/tap-mac/tuntap/Makefile +++ /dev/null @@ -1,95 +0,0 @@ -# Lets have a version, at last! -TUNTAP_VERSION = 20150118 - -# BASE install directory -BASE= - -all: tap.kext - -keysetup: - -security delete-keychain net.sf.tuntaposx.tmp - security create-keychain -p $$(head -c 32 /dev/urandom | hexdump -e '"%02x"') \ - net.sf.tuntaposx.tmp - security set-keychain-settings -lut 60 net.sf.tuntaposx.tmp - security import identity.p12 -k net.sf.tuntaposx.tmp -f pkcs12 \ - -P $$(read -sp 'identity passphrase: ' pw && echo "$$pw") -A - security find-identity -v net.sf.tuntaposx.tmp | \ - awk -F \" '$$2 ~ /^Developer ID Application:/ { print $$2 }' > .signing_identity - security find-identity -v net.sf.tuntaposx.tmp | \ - awk -F \" '$$2 ~ /^Developer ID Installer:/ { print $$2 }' > .installer_identity - -pkgbuild/%.pkg: %.kext - mkdir -p pkgbuild/$*_root/Library/Extensions - cp -pR $*.kext pkgbuild/$*_root/Library/Extensions - mkdir -p pkgbuild/$*_root/Library/LaunchDaemons - cp pkg/launchd/net.sf.tuntaposx.$*.plist pkgbuild/$*_root/Library/LaunchDaemons - pkgbuild --root pkgbuild/$*_root \ - --component-plist pkg/components/$*.plist \ - --scripts pkg/scripts/$* pkgbuild/$*.pkg - -tuntap_$(TUNTAP_VERSION).pkg: pkgbuild/tap.pkg pkgbuild/tun.pkg - productbuild --distribution pkg/distribution.xml --package-path pkgbuild \ - --resources pkg/res.dummy \ - tuntap_$(TUNTAP_VERSION).pkg ; \ - pkgutil --expand tuntap_$(TUNTAP_VERSION).pkg pkgbuild/tuntap_pkg.d - cp -pR pkg/res/ pkgbuild/tuntap_pkg.d/Resources - pkgutil --flatten pkgbuild/tuntap_pkg.d tuntap_$(TUNTAP_VERSION).pkg - if test -s ".installer_identity"; then \ - productsign --sign "$$(cat .installer_identity)" --keychain net.sf.tuntaposx.tmp \ - tuntap_$(TUNTAP_VERSION).pkg tuntap_$(TUNTAP_VERSION).pkg.signed ; \ - mv tuntap_$(TUNTAP_VERSION).pkg.signed tuntap_$(TUNTAP_VERSION).pkg ; \ - fi - -pkg: tuntap_$(TUNTAP_VERSION).pkg - tar czf tuntap_$(TUNTAP_VERSION).tar.gz \ - README.installer README tuntap_$(TUNTAP_VERSION).pkg - -# Install targets -# They are provided for the gentoo ebuild, but should work just fine for other people as well. -install_%_kext: %.kext - mkdir -p $(BASE)/Library/Extensions - cp -pR $*.kext $(BASE)/Library/Extensions/ - chown -R root:wheel $(BASE)/Library/Extensions/$*.kext - mkdir -p $(BASE)/Library/LaunchDaemons - cp pkg/launchd/net.sf.tuntaposx.$*.plist $(BASE)/Library/LaunchDaemons - chown -R root:wheel $(BASE)/Library/LaunchDaemons/net.sf.tuntaposx.$*.plist - -install: install_tap_kext install_tun_kext - -tarball: clean - touch tuntap_$(TUNTAP_VERSION)_src.tar.gz - tar czf tuntap_$(TUNTAP_VERSION)_src.tar.gz \ - -C .. \ - --exclude "tuntap/identity.p12" \ - --exclude "tuntap/tuntap_$(TUNTAP_VERSION)_src.tar.gz" \ - --exclude "tuntap/tuntap_$(TUNTAP_VERSION).tar.gz" \ - --exclude "tuntap/tuntap_$(TUNTAP_VERSION).pkg" \ - --exclude "*/.*" \ - tuntap - -clean: - cd src/tap && make -f Makefile clean - cd src/tun && make -f Makefile clean - -rm -rf pkgbuild - -rm -rf tuntap_$(TUNTAP_VERSION).pkg - -rm -f tuntap_$(TUNTAP_VERSION).tar.gz - -rm -f tuntap_$(TUNTAP_VERSION)_src.tar.gz - -%.kext: - cd src/$* && make TUNTAP_VERSION=$(TUNTAP_VERSION) -f Makefile all - if test -s ".signing_identity"; then \ - codesign -fv --keychain net.sf.tuntaposx.tmp -s "$$(cat .signing_identity)" \ - $*.kext ; \ - fi - -test: - # configd messes with interface flags, issuing SIOCSIFFLAGS ioctls upon receiving kernel - # events indicating protocols have been attached and detached. Unfortunately, configd does - # this asynchronously, making the SIOCSIFFLAGS changes totally unpredictable when we bring - # our interfaces up and down in rapid succession during our tests. I haven't found a good - # way to suppress or handle this mess other than disabling configd temporarily. - killall -STOP configd - -PYTHONPATH=test python test/tuntap/tuntap_tests.py --tests='$(TESTS)' - killall -CONT configd - -.PHONY: test diff --git a/ext/tap-mac/tuntap/src/lock.cc b/ext/tap-mac/tuntap/src/lock.cc deleted file mode 100644 index 9c78783a..00000000 --- a/ext/tap-mac/tuntap/src/lock.cc +++ /dev/null @@ -1,206 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. - * - * Locking implementation. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "lock.h" - -extern "C" { - -#include <kern/clock.h> - -#include <sys/syslog.h> -#include <sys/proc.h> - -} - -#if 0 -#define dprintf(...) log(LOG_INFO, __VA_ARGS__) -#else -#define dprintf(...) -#endif - -/* class tt_lock */ -lck_grp_t *tt_lock::tt_lck_grp = NULL; - -bool -tt_lock::initialize() -{ - /* init if necessary */ - if (tt_lck_grp == NULL) { - dprintf("initing lock group\n"); - tt_lck_grp = lck_grp_alloc_init("tuntap locks", LCK_GRP_ATTR_NULL); - - if (tt_lck_grp == NULL) { - /* if something fails, the lock won't work */ - log(LOG_ERR, "tuntap: could not allocate locking group\n"); - return false; - } - } - - return true; -} - -void -tt_lock::shutdown() -{ - /* free the locking group */ - if (tt_lck_grp != NULL) { - dprintf("freeing lock group\n"); - lck_grp_free(tt_lck_grp); - tt_lck_grp = NULL; - } -} - -/* tt_mutex */ -tt_mutex::tt_mutex() -{ - /* fail if locking group not initialized */ - if (tt_lck_grp == NULL) - return; - - /* allocate the lock */ - lck = lck_rw_alloc_init(tt_lck_grp, NULL); - - if (lck == NULL) - log(LOG_ERR, "tuntap: could not allocate mutex\n"); -} - -tt_mutex::~tt_mutex() -{ - /* if the lock doesn't exist, this will be a no-op */ - if (lck == NULL) - return; - - /* free the lock */ - lck_rw_free(lck, tt_lck_grp); -} - -void -tt_mutex::lock() -{ - if (lck != NULL) - lck_rw_lock_exclusive(lck); -} - -void -tt_mutex::unlock() -{ - if (lck != NULL) - lck_rw_unlock_exclusive(lck); -} - -void -tt_mutex::sleep(void *cond) -{ - if (lck != NULL) - lck_rw_sleep(lck, LCK_SLEEP_DEFAULT, cond, THREAD_INTERRUPTIBLE); -} - -void -tt_mutex::sleep(void *cond, uint64_t nanoseconds) -{ - if (lck != NULL) { - uint64_t abstime; - nanoseconds_to_absolutetime(nanoseconds, &abstime); - lck_rw_sleep_deadline(lck, LCK_SLEEP_DEFAULT, cond, THREAD_INTERRUPTIBLE, abstime); - } -} - -void -tt_mutex::wakeup(void *cond) -{ - if (lck != NULL) - ::wakeup(cond); -} - -/* tt_gate */ -tt_gate::tt_gate() - : ticket_number(0), - population(0) -{ -} - -void -tt_gate::enter() -{ - /* just try to grab the lock, increase the ticket number and the population */ - auto_lock l(&slock); - ticket_number++; - population++; -} - -void -tt_gate::exit() -{ - auto_lock l(&slock); - ticket_number--; - population--; -} - -bool -tt_gate::is_anyone_in() -{ - return population != 0; -} - -unsigned int -tt_gate::get_ticket_number() -{ - return ticket_number; -} - -void -tt_gate::lock() -{ - slock.lock(); -} - -void -tt_gate::unlock() -{ - slock.unlock(); -} - -void -tt_gate::sleep(void* cond) -{ - slock.sleep(cond); -} - -void -tt_gate::sleep(void* cond, uint64_t nanoseconds) -{ - slock.sleep(cond, nanoseconds); -} - -void -tt_gate::wakeup(void* cond) -{ - slock.wakeup(cond); -} - diff --git a/ext/tap-mac/tuntap/src/lock.h b/ext/tap-mac/tuntap/src/lock.h deleted file mode 100644 index 51d3299a..00000000 --- a/ext/tap-mac/tuntap/src/lock.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. - * - * Locking is not as straightforward for Tiger. So declare our own locking class. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __LOCK_H__ -#define __LOCK_H__ - -extern "C" { - -#include <kern/locks.h> -#include <sys/param.h> - -} - -/* our own locking class. declares the common interface of the locking primitives. */ -class tt_lock { - - protected: - /* locking group */ - static lck_grp_t *tt_lck_grp; - - public: - /* be virtual */ - virtual ~tt_lock() { }; - - /* static intialization (inits the locking group) */ - static bool initialize(); - static void shutdown(); - - /* locking */ - virtual void lock() = 0; - virtual void unlock() = 0; - - /* monitor primitives */ - virtual void sleep(void* cond) = 0; - virtual void sleep(void* cond, uint64_t) = 0; - virtual void wakeup(void* cond) = 0; -}; - -/* simple mutex */ -class tt_mutex : public tt_lock { - - private: - /* underlying darwin lock */ - lck_rw_t *lck; - - public: - tt_mutex(); - virtual ~tt_mutex(); - - void lock(); - void unlock(); - - /* monitor primitives */ - void sleep(void* cond); - void sleep(void* cond, uint64_t); - void wakeup(void* cond); -}; - -/* A very special locking class that we use to track threads that enter and leave the character - * device service functions. They call enter() before entering the actual service routinge and - * exit() when done. enter() only permits them to pass when the gate isn't locked. Furthermore, the - * gate assigns ticket numbers to everyone that passes the gate, so you can check whether more - * threads came through. See tuntap_mgr::shutdown() for how we use that stuff. - */ -class tt_gate : public tt_lock { - - private: - /* synchronization lock */ - tt_mutex slock; - /* ticket number */ - unsigned int ticket_number; - /* count of threads that are in */ - unsigned int population; - - public: - /* construct a new gate */ - tt_gate(); - - /* enter - pass the gate */ - void enter(); - /* exit - pass the gate */ - void exit(); - - /* check whether anyone is in */ - bool is_anyone_in(); - /* gets the next ticket number */ - unsigned int get_ticket_number(); - - /* lock the gate */ - void lock(); - /* unlock the gate */ - void unlock(); - - /* monitor primitives */ - void sleep(void* cond); - void sleep(void* cond, uint64_t); - void wakeup(void* cond); -}; - -/* auto_lock and auto_rwlock serve as automatic lock managers: Create an object, passing the - * tt_[rw]lock you want to lock to have it grab the lock. When the object goes out of scope, the - * destructor of the class will release the lock. - */ -class auto_lock { - - protected: - /* the lock we hold */ - tt_lock *l; - - public: - auto_lock(tt_lock *m) - : l(m) - { - lock(); - } - - ~auto_lock() - { - unlock(); - } - - void lock() - { - l->lock(); - } - - void unlock() - { - l->unlock(); - } -}; - -#endif /* __LOCK_H__ */ - diff --git a/ext/tap-mac/tuntap/src/mem.cc b/ext/tap-mac/tuntap/src/mem.cc deleted file mode 100644 index cd3264fa..00000000 --- a/ext/tap-mac/tuntap/src/mem.cc +++ /dev/null @@ -1,76 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. Common functionality of tap_interface and tun_interface. - * - * Memory management implementation. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "mem.h" - -extern "C" { - -#include <libkern/OSMalloc.h> - -} - -#if 0 -#define dprintf(...) log(LOG_INFO, __VA_ARGS__) -#else -#define dprintf(...) -#endif - -static int inited = 0; -static OSMallocTag tag; - -void -mem_initialize(const char* name) { - - if (!inited) { - tag = OSMalloc_Tagalloc(name, OSMT_DEFAULT); - inited = 1; - } -} - -void -mem_shutdown() { - - if (inited) { - OSMalloc_Tagfree(tag); - inited = 0; - } -} - -void * -mem_alloc(uint32_t size) { - - return OSMalloc(size, tag); -} - -void -mem_free(void *addr, uint32_t size) { - - OSFree(addr, size, tag); -} - diff --git a/ext/tap-mac/tuntap/src/mem.h b/ext/tap-mac/tuntap/src/mem.h deleted file mode 100644 index 4d06fd8c..00000000 --- a/ext/tap-mac/tuntap/src/mem.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. Common functionality of tap_interface and tun_interface. - * - * Memory management. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_H__ -#define __MEM_H__ - -extern "C" { - -#include <stdint.h> - -} - -/* Memory manager initalization and shutdown */ -void mem_initialize(const char *name); -void mem_shutdown(); - -/* Memory allocation functions */ -void *mem_alloc(uint32_t size); -void mem_free(void *addr, uint32_t size); - -#endif /* __MEM_H__ */ - diff --git a/ext/tap-mac/tuntap/src/tap/Info.plist b/ext/tap-mac/tuntap/src/tap/Info.plist deleted file mode 100644 index bb9b03fd..00000000 --- a/ext/tap-mac/tuntap/src/tap/Info.plist +++ /dev/null @@ -1,36 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> -<plist version="1.0"> -<dict> - <key>CFBundleDevelopmentRegion</key> - <string>@@CFBUNDLEDEVELOPMENTREGION@@</string> - <key>CFBundleExecutable</key> - <string>@@CFBUNDLEEXECUTABLE@@</string> - <key>CFBundleIdentifier</key> - <string>@@CFBUNDLEIDENTIFIER@@</string> - <key>CFBundleInfoDictionaryVersion</key> - <string>6.0</string> - <key>CFBundleName</key> - <string>@@CFBUNDLEEXECUTABLE@@</string> - <key>CFBundlePackageType</key> - <string>@@CFBUNDLEPACKAGETYPE@@</string> - <key>CFBundleShortVersionString</key> - <string>@@CFBUNDLEVERSION@@</string> - <key>CFBundleSignature</key> - <string>@@CFBUNDLESIGNATURE@@</string> - <key>CFBundleVersion</key> - <string>1.0</string> - <key>OSBundleLibraries</key> - <dict> - <key>com.apple.kpi.mach</key> - <string>8.0</string> - <key>com.apple.kpi.bsd</key> - <string>8.0</string> - <key>com.apple.kpi.libkern</key> - <string>8.0</string> - <key>com.apple.kpi.unsupported</key> - <string>8.0</string> - </dict> -</dict> -</plist> - diff --git a/ext/tap-mac/tuntap/src/tap/Makefile b/ext/tap-mac/tuntap/src/tap/Makefile deleted file mode 100644 index 306a86d7..00000000 --- a/ext/tap-mac/tuntap/src/tap/Makefile +++ /dev/null @@ -1,60 +0,0 @@ -# -# ethertap driver for MacOSX -# -# Makefile -# -# (c) 2004, 2005, 2006, 2007, 2008 Mattias Nissler -# - -OBJS = ../tuntap.o ../tuntap_mgr.o ../lock.o ../mem.o kmod.o tap.o -KMOD_BIN = tap -BUNDLE_DIR = ../.. -BUNDLE_NAME = tap.kext - -TAP_KEXT_VERSION = $(TUNTAP_VERSION) - -BUNDLE_REGION = English -BUNDLE_IDENTIFIER = com.zerotier.tap -BUNDLE_SIGNATURE = ???? -BUNDLE_PACKAGETYPE = KEXT -BUNDLE_VERSION = $(TAP_KEXT_VERSION) - -INCLUDE = -I.. -I/System/Library/Frameworks/Kernel.framework/Headers -CFLAGS = -Wall -Werror -mkernel -force_cpusubtype_ALL \ - -nostdinc -fno-builtin -fno-stack-protector -msoft-float -fno-common \ - -arch x86_64 \ - -DKERNEL -DAPPLE -DKERNEL_PRIVATE -DTUNTAP_VERSION=\"$(TUNTAP_VERSION)\" \ - -DTAP_KEXT_VERSION=\"$(TAP_KEXT_VERSION)\" -CCFLAGS = $(CFLAGS) -LDFLAGS = -Wall -Werror -arch x86_64 -Xlinker -kext -nostdlib -lkmodc++ -lkmod -lcc_kext - -CCP = clang -x c++ -CC = clang -x c -LD = clang - -all: $(KMOD_BIN) bundle - -.c.o: - $(CC) $(CFLAGS) $(INCLUDE) -c $< -o $@ -.cc.o: - $(CCP) $(CCFLAGS) $(INCLUDE) -c $< -o $@ - -$(KMOD_BIN): $(OBJS) - $(LD) $(LDFLAGS) -o $(KMOD_BIN) $(OBJS) - -bundle: $(KMOD_BIN) - rm -rf $(BUNDLE_DIR)/$(BUNDLE_NAME) - mkdir -p $(BUNDLE_DIR)/$(BUNDLE_NAME)/Contents/MacOS - cp $(KMOD_BIN) $(BUNDLE_DIR)/$(BUNDLE_NAME)/Contents/MacOS - sed -e "s/@@CFBUNDLEEXECUTABLE@@/$(KMOD_BIN)/" \ - -e "s/@@CFBUNDLEDEVELOPMENTREGION@@/$(BUNDLE_REGION)/" \ - -e "s/@@CFBUNDLEIDENTIFIER@@/$(BUNDLE_IDENTIFIER)/" \ - -e "s/@@CFBUNDLESIGNATURE@@/$(BUNDLE_SIGNATURE)/" \ - -e "s/@@CFBUNDLEPACKAGETYPE@@/$(BUNDLE_PACKAGETYPE)/" \ - -e "s/@@CFBUNDLEVERSION@@/$(BUNDLE_VERSION)/" \ - Info.plist > $(BUNDLE_DIR)/$(BUNDLE_NAME)/Contents/Info.plist - -clean: - -rm -f $(OBJS) $(KMOD_BIN) - -rm -rf $(BUNDLE_DIR)/$(BUNDLE_NAME) - diff --git a/ext/tap-mac/tuntap/src/tap/kmod.cc b/ext/tap-mac/tuntap/src/tap/kmod.cc deleted file mode 100644 index f9c4a40e..00000000 --- a/ext/tap-mac/tuntap/src/tap/kmod.cc +++ /dev/null @@ -1,93 +0,0 @@ -/* - * ethertap device for MacOSX. - * - * Kext definition (it is a mach kmod really...) - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "tap.h" -#include "mem.h" - -extern "C" { - -#include <sys/param.h> - -#include <mach/kmod.h> - -static tap_manager *mgr; - -/* - * start function. called when the kext gets loaded. - */ -static kern_return_t tap_module_start(struct kmod_info *ki, void *data) -{ - mem_initialize(TAP_FAMILY_NAME); - - /* initialize locking */ - if (!tt_lock::initialize()) - return KMOD_RETURN_FAILURE; - - /* create a tap manager that will handle the rest */ - mgr = new tap_manager(); - - if (mgr != NULL) { - if (mgr->initialize(TAP_IF_COUNT, (char *) TAP_FAMILY_NAME)) - return KMOD_RETURN_SUCCESS; - - delete mgr; - mgr = NULL; - /* clean up locking */ - tt_lock::shutdown(); - } - - return KMOD_RETURN_FAILURE; -} - -/* - * stop function. called when the kext should be unloaded. unloading can be prevented by - * returning failure - */ -static kern_return_t tap_module_stop(struct kmod_info *ki, void *data) -{ - if (mgr != NULL) { - if (!mgr->shutdown()) - return KMOD_RETURN_FAILURE; - - delete mgr; - mgr = NULL; - } - - /* clean up locking */ - tt_lock::shutdown(); - - mem_shutdown(); - - return KMOD_RETURN_SUCCESS; -} - -KMOD_DECL(tap, TAP_KEXT_VERSION) - -} - diff --git a/ext/tap-mac/tuntap/src/tap/tap.cc b/ext/tap-mac/tuntap/src/tap/tap.cc deleted file mode 100644 index b348a85e..00000000 --- a/ext/tap-mac/tuntap/src/tap/tap.cc +++ /dev/null @@ -1,533 +0,0 @@ -/* - * ethertap device for macosx. - * - * tap_interface class definition - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "tap.h" - -extern "C" { - -#include <sys/systm.h> -#include <sys/syslog.h> -#include <sys/param.h> -#include <sys/sockio.h> -#include <sys/random.h> -#include <sys/kern_event.h> - -#include <mach/thread_policy.h> - -#include <net/if_types.h> -#include <net/if_arp.h> -#include <net/if_dl.h> -#include <net/if_media.h> -#include <net/dlil.h> -#include <net/ethernet.h> - -} - -#if 0 -#define dprintf(...) log(LOG_INFO, __VA_ARGS__) -#else -#define dprintf(...) -#endif - -// These declarations are missing in the Kernel.framework headers, put present in userspace :-/ -#pragma pack(4) -struct ifmediareq { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ - int *ifm_ulist; /* media words */ -}; - -struct ifmediareq64 { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ - user64_addr_t ifmu_ulist __attribute__((aligned(8))); -}; - -struct ifmediareq32 { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ - user32_addr_t ifmu_ulist; /* 32-bit pointer */ -}; -#pragma pack() - -#define SIOCGIFMEDIA32 _IOWR('i', 56, struct ifmediareq32) /* get net media */ -#define SIOCGIFMEDIA64 _IOWR('i', 56, struct ifmediareq64) /* get net media (64-bit) */ - -/* thread_policy_set is exported in Mach.kext, but commented in mach/thread_policy.h in the - * Kernel.Framework headers (why?). Add a local declaration to work around that. - */ -extern "C" { -kern_return_t thread_policy_set( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t count); -} - -static unsigned char ETHER_BROADCAST_ADDR[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - -/* members */ -tap_interface::tap_interface() { - bzero(attached_protos, sizeof(attached_protos)); - input_thread = THREAD_NULL; -} - -bool -tap_interface::initialize(unsigned short major, unsigned short unit) -{ - this->unit = unit; - this->family_name = TAP_FAMILY_NAME; - this->family = IFNET_FAMILY_ETHERNET; - this->type = IFT_ETHER; - bzero(unique_id, UIDLEN); - snprintf(unique_id, UIDLEN, "%s%d", family_name, unit); - - dprintf("tap: starting interface %s%d\n", TAP_FAMILY_NAME, unit); - - /* register character device */ - if (!tuntap_interface::register_chardev(major)) - return false; - - return true; -} - -void -tap_interface::shutdown() -{ - dprintf("tap: shutting down tap interface %s%d\n", TAP_FAMILY_NAME, unit); - - unregister_chardev(); -} - -int -tap_interface::initialize_interface() -{ - struct sockaddr_dl lladdr; - lladdr.sdl_len = sizeof(lladdr); - lladdr.sdl_family = AF_LINK; - lladdr.sdl_alen = ETHER_ADDR_LEN; - lladdr.sdl_nlen = lladdr.sdl_slen = 0; - - /* generate a random MAC address */ - read_random(LLADDR(&lladdr), ETHER_ADDR_LEN); - - /* clear multicast bit and set local assignment bit (see IEEE 802) */ - (LLADDR(&lladdr))[0] &= 0xfe; - (LLADDR(&lladdr))[0] |= 0x02; - - dprintf("tap: random tap address: %02x:%02x:%02x:%02x:%02x:%02x\n", - (LLADDR(&lladdr))[0] & 0xff, - (LLADDR(&lladdr))[1] & 0xff, - (LLADDR(&lladdr))[2] & 0xff, - (LLADDR(&lladdr))[3] & 0xff, - (LLADDR(&lladdr))[4] & 0xff, - (LLADDR(&lladdr))[5] & 0xff); - - /* register interface */ - if (!tuntap_interface::register_interface(&lladdr, ETHER_BROADCAST_ADDR, ETHER_ADDR_LEN)) - return EIO; - - /* Set link level address. Yes, we need to do that again. Darwin sucks. */ - errno_t err = ifnet_set_lladdr(ifp, LLADDR(&lladdr), ETHER_ADDR_LEN); - if (err) - dprintf("tap: failed to set lladdr on %s%d: %d\n", family_name, unit, err); - - /* set mtu */ - ifnet_set_mtu(ifp, TAP_MTU); - /* set header length */ - ifnet_set_hdrlen(ifp, sizeof(struct ether_header)); - /* add the broadcast flag */ - ifnet_set_flags(ifp, IFF_BROADCAST, IFF_BROADCAST); - - /* we must call bpfattach(). Otherwise we deadlock BPF while unloading. Seems to be a bug in - * the kernel, see bpfdetach() in net/bpf.c, it will return without releasing the lock if - * the interface wasn't attached. I wonder what they were smoking while writing it ;-) - */ - bpfattach(ifp, DLT_EN10MB, ifnet_hdrlen(ifp)); - - /* Inject an empty packet to trigger the input thread calling demux(), which will unblock - * thread_sync_lock. This is part of a hack to avoid a kernel crash on re-attaching - * interfaces, see comment in shutdown_interface for more information. - */ - mbuf_t empty_mbuf; - mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &empty_mbuf); - if (empty_mbuf != NULL) { - mbuf_pkthdr_setrcvif(empty_mbuf, ifp); - mbuf_pkthdr_setlen(empty_mbuf, 0); - mbuf_pkthdr_setheader(empty_mbuf, mbuf_data(empty_mbuf)); - mbuf_set_csum_performed(empty_mbuf, 0, 0); - if (ifnet_input(ifp, empty_mbuf, NULL) == 0) { - auto_lock l(&thread_sync_lock); - for (int i = 0; i < 100 && input_thread == THREAD_NULL; ++i) { - dprintf("input thread not found, waiting...\n"); - thread_sync_lock.sleep(&input_thread, 10000000); - } - } else { - mbuf_freem(empty_mbuf); - } - } - if (input_thread == THREAD_NULL) - dprintf("Failed to determine input thread!\n"); - - return 0; -} - -void -tap_interface::shutdown_interface() -{ - dprintf("tap: shutting down network interface of device %s%d\n", TAP_FAMILY_NAME, unit); - - /* detach all protocols */ - for (unsigned int i = 0; i < MAX_ATTACHED_PROTOS; i++) { - if (attached_protos[i].used) { - errno_t err = ifnet_detach_protocol(ifp, attached_protos[i].proto); - if (err) - log(LOG_WARNING, "tap: could not detach protocol %d from %s%d\n", - attached_protos[i].proto, TAP_FAMILY_NAME, unit); - } - } - - cleanup_interface(); - unregister_interface(); - - /* There's a race condition in the kernel that may cause crashes when quickly re-attaching - * interfaces. The crash happens when the interface gets re-attached before the input thread - * for the interface managed to terminate, in which case an assert on the input_waiting flag - * to be clear triggers in ifnet_attach. The bug is really that there's no synchronization - * for terminating the input thread. To work around this, the following code does add the - * missing synchronization to wait for the input thread to terminate. Of course, threading - * primitives available to kexts are few, and I'm not aware of a way to wait for a thread to - * terminate. Hence, the code calls thread_policy_set (passing bogus parameters) in a loop, - * until it returns KERN_TERMINATED. Since this is all rather fragile, there's an upper - * limit on the loop iteratations we're willing to make, so this terminates eventually even - * if things change on the kernel side eventually. - */ - if (input_thread != THREAD_NULL) { - dprintf("Waiting for input thread...\n"); - kern_return_t result = 0; - for (int i = 0; i < 100; ++i) { - result = thread_policy_set(input_thread, -1, NULL, 0); - dprintf("thread_policy_set result: %d\n", result); - if (result == KERN_TERMINATED) { - dprintf("Input thread terminated.\n"); - thread_deallocate(input_thread); - input_thread = THREAD_NULL; - break; - } - - auto_lock l(&thread_sync_lock); - thread_sync_lock.sleep(&input_thread, 10000000); - } - } -} - -errno_t -tap_interface::if_ioctl(u_int32_t cmd, void *arg) -{ - dprintf("tap: if_ioctl cmd: %d (%x)\n", cmd & 0xff, cmd); - - switch (cmd) { - case SIOCSIFLLADDR: - { - /* set ethernet address */ - struct sockaddr *ea = &(((struct ifreq *) arg)->ifr_addr); - - dprintf("tap: SIOCSIFLLADDR family %d len %d\n", - ea->sa_family, ea->sa_len); - - /* check if it is really an ethernet address */ - if (ea->sa_family != AF_LINK || ea->sa_len != ETHER_ADDR_LEN) - return EINVAL; - - /* ok, copy */ - errno_t err = ifnet_set_lladdr(ifp, ea->sa_data, ETHER_ADDR_LEN); - if (err) { - dprintf("tap: failed to set lladdr on %s%d: %d\n", - family_name, unit, err); - return err; - } - - /* Generate a LINK_ON event. This necessary for configd to re-read - * the interface data and refresh the MAC address. Not doing so - * would result in the DHCP client using a stale MAC address... - */ - generate_link_event(KEV_DL_LINK_ON); - - return 0; - } - - case SIOCGIFMEDIA32: - case SIOCGIFMEDIA64: - { - struct ifmediareq *ifmr = (struct ifmediareq*) arg; - user_addr_t list = USER_ADDR_NULL; - - ifmr->ifm_current = IFM_ETHER; - ifmr->ifm_mask = 0; - ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; - ifmr->ifm_active = IFM_ETHER; - ifmr->ifm_count = 1; - - if (cmd == SIOCGIFMEDIA64) - list = ((struct ifmediareq64*) ifmr)->ifmu_ulist; - else - list = CAST_USER_ADDR_T( - ((struct ifmediareq32*) ifmr)->ifmu_ulist); - - if (list != USER_ADDR_NULL) - return copyout(&ifmr->ifm_current, list, sizeof(int)); - - return 0; - } - - default: - /* let our superclass handle it */ - return tuntap_interface::if_ioctl(cmd, arg); - } - - return EOPNOTSUPP; -} - -errno_t -tap_interface::if_demux(mbuf_t m, char *header, protocol_family_t *proto) -{ - struct ether_header *eh = (struct ether_header *) header; - unsigned char lladdr[ETHER_ADDR_LEN]; - - dprintf("tap: if_demux\n"); - - /* Make note of what input thread this interface is running on. This is part of a hack to - * avoid a crash on re-attaching interfaces, see comment in shutdown_interface for details. - */ - if (input_thread == THREAD_NULL) { - auto_lock l(&thread_sync_lock); - input_thread = current_thread(); - thread_reference(input_thread); - thread_sync_lock.wakeup(&input_thread); - } - - /* size check */ - if (mbuf_len(m) < sizeof(struct ether_header)) - return ENOENT; - - /* catch broadcast and multicast (stolen from bsd/net/ether_if_module.c) */ - if (eh->ether_dhost[0] & 1) { - if (memcmp(ETHER_BROADCAST_ADDR, eh->ether_dhost, ETHER_ADDR_LEN) == 0) { - /* broadcast */ - dprintf("tap: broadcast packet.\n"); - mbuf_setflags_mask(m, MBUF_BCAST, MBUF_BCAST); - } else { - /* multicast */ - dprintf("tap: multicast packet.\n"); - mbuf_setflags_mask(m, MBUF_MCAST, MBUF_MCAST); - } - } else { - /* check wether the packet has our address */ - ifnet_lladdr_copy_bytes(ifp, lladdr, ETHER_ADDR_LEN); - if (memcmp(lladdr, eh->ether_dhost, ETHER_ADDR_LEN) != 0) - mbuf_setflags_mask(m, MBUF_PROMISC, MBUF_PROMISC); - } - - /* find the protocol */ - for (unsigned int i = 0; i < MAX_ATTACHED_PROTOS; i++) { - if (attached_protos[i].used && attached_protos[i].type == eh->ether_type) { - *proto = attached_protos[i].proto; - return 0; - } - } - - dprintf("tap: if_demux() failed to find proto.\n"); - - /* no matching proto found */ - return ENOENT; -} - -errno_t -tap_interface::if_framer(mbuf_t *m, const struct sockaddr *dest, const char *dest_linkaddr, - const char *frame_type) -{ - struct ether_header *eh; - mbuf_t nm = *m; - errno_t err; - - dprintf("tap: if_framer\n"); - - /* prepend the ethernet header */ - err = mbuf_prepend(&nm, sizeof (struct ether_header), MBUF_WAITOK); - if (err) { - dprintf("tap: could not prepend data to mbuf: %d\n", err); - return err; - } - *m = nm; - - /* fill the header */ - eh = (struct ether_header *) mbuf_data(*m); - memcpy(eh->ether_dhost, dest_linkaddr, ETHER_ADDR_LEN); - ifnet_lladdr_copy_bytes(ifp, eh->ether_shost, ETHER_ADDR_LEN); - eh->ether_type = *((u_int16_t *) frame_type); - - return 0; -} - -errno_t -tap_interface::if_add_proto(protocol_family_t proto, const struct ifnet_demux_desc *desc, - u_int32_t ndesc) -{ - errno_t err; - - dprintf("tap: if_add_proto proto %d\n", proto); - - for (unsigned int i = 0; i < ndesc; i++) { - /* try to add the protocol */ - err = add_one_proto(proto, desc[i]); - if (err != 0) { - /* if that fails, remove everything stored so far */ - if_del_proto(proto); - return err; - } - } - - return 0; -} - -errno_t -tap_interface::if_del_proto(protocol_family_t proto) -{ - dprintf("tap: if_del_proto proto %d\n", proto); - - /* delete all matching entries in attached_protos */ - for (unsigned int i = 0; i < MAX_ATTACHED_PROTOS; i++) { - if (attached_protos[i].proto == proto) - attached_protos[i].used = false; - } - - return 0; -} - -errno_t -tap_interface::if_check_multi(const struct sockaddr *maddr) -{ - dprintf("tap: if_check_multi family %d\n", maddr->sa_family); - - /* see whether it is a ethernet address with the multicast bit set */ - if (maddr->sa_family == AF_LINK) { - struct sockaddr_dl *dlmaddr = (struct sockaddr_dl *) maddr; - if (LLADDR(dlmaddr)[0] & 0x01) - return 0; - else - return EADDRNOTAVAIL; - } - - return EOPNOTSUPP; -} - -errno_t -tap_interface::add_one_proto(protocol_family_t proto, const struct ifnet_demux_desc &dd) -{ - int free = -1; - u_int16_t dt; - - /* we only support DLIL_DESC_ETYPE2 */ - if (dd.type != DLIL_DESC_ETYPE2 || dd.datalen != 2) { - log(LOG_WARNING, "tap: tap only supports DLIL_DESC_ETYPE2 protocols.\n"); - return EINVAL; - } - - dt = *((u_int16_t *) (dd.data)); - - /* see if the protocol is already registered */ - for (unsigned int i = 0; i < MAX_ATTACHED_PROTOS; i++) { - if (attached_protos[i].used) { - if (dt == attached_protos[i].type) { - /* already registered */ - if (attached_protos[i].proto == proto) { - /* matches the old entry */ - return 0; - } else - return EEXIST; - } - } else if (free == -1) - free = i; - } - - /* did we find a free entry? */ - if (free == -1) - /* is ENOBUFS correct? */ - return ENOBUFS; - - /* ok, save information */ - attached_protos[free].used = true; - attached_protos[free].type = dt; - attached_protos[free].proto = proto; - - return 0; -} - -/* This code is shamelessly stolen from if_bond.c */ -void -tap_interface::generate_link_event(u_int32_t code) -{ - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = code; - event.header.event_data[0] = family; - event.unit = (u_int32_t) unit; - strncpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - - ifnet_event(ifp, &event.header); -} - -/* tap_manager members */ -tuntap_interface * -tap_manager::create_interface() -{ - return new tap_interface(); -} - diff --git a/ext/tap-mac/tuntap/src/tap/tap.h b/ext/tap-mac/tuntap/src/tap/tap.h deleted file mode 100644 index a5164d4a..00000000 --- a/ext/tap-mac/tuntap/src/tap/tap.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * ethertap device for MacOSX. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __TAP_H__ -#define __TAP_H__ - -#include "tuntap.h" - -extern "C" { - -#include <kern/thread.h> - -} - -#define TAP_FAMILY_NAME ((char *) "zt") -#define TAP_IF_COUNT 32 /* max number of tap interfaces */ -#define TAP_MTU 2800 -#define TAP_LLADDR tap_lladdr - -/* the mac address of our interfaces. note that the last byte will be replaced by the unit number */ -extern u_char tap_lladdr[]; - -/* tap manager */ -class tap_manager : public tuntap_manager { - - protected: - /* just define the interface creation method */ - virtual tuntap_interface *create_interface(); - -}; - -/* the tap network interface */ -class tap_interface : public tuntap_interface { - public: - tap_interface(); - - protected: - /* maximum number of protocols that can be attached */ - static const unsigned int MAX_ATTACHED_PROTOS = 8; - - /* information about attached protocols for demuxing is stored here */ - struct { - /* whether this entry is used */ - bool used; - /* type in the ethernet header */ - u_int16_t type; - /* protocol passed to add_proto */ - protocol_family_t proto; - } attached_protos[MAX_ATTACHED_PROTOS]; - - /* The input thread for the network interface. */ - thread_t input_thread; - - /* initializes the interface */ - virtual bool initialize(unsigned short major, unsigned short unit); - - /* shuts the interface down */ - virtual void shutdown(); - - /* called when the character device is opened in order to intialize the network - * interface. - */ - virtual int initialize_interface(); - /* called when the character device is closed to shutdown the network interface */ - virtual void shutdown_interface(); - - /* override interface routines */ - virtual errno_t if_ioctl(u_int32_t cmd, void *arg); - virtual errno_t if_demux(mbuf_t m, char *header, protocol_family_t *proto); - virtual errno_t if_framer(mbuf_t *m, const struct sockaddr *dest, - const char *dest_linkaddr, const char *frame_type); - virtual errno_t if_add_proto(protocol_family_t proto, - const struct ifnet_demux_desc *ddesc, u_int32_t ndesc); - virtual errno_t if_del_proto(protocol_family_t proto); - virtual errno_t if_check_multi(const struct sockaddr *maddr); - - /* if_add_proto helper */ - errno_t add_one_proto(protocol_family_t proto, const struct ifnet_demux_desc &dd); - - /* generates a kernel event */ - void generate_link_event(u_int32_t code); - - friend class tap_manager; -}; - -#endif /* __TAP_H__ */ - diff --git a/ext/tap-mac/tuntap/src/tuntap.cc b/ext/tap-mac/tuntap/src/tuntap.cc deleted file mode 100644 index d0f89018..00000000 --- a/ext/tap-mac/tuntap/src/tuntap.cc +++ /dev/null @@ -1,963 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. Common functionality of tap_interface and tun_interface. - * - * tuntap_interface class definition - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "tuntap.h" - -#if 0 -#define dprintf(...) log(LOG_INFO, __VA_ARGS__) -#else -#define dprintf(...) -#endif - -extern "C" { - -#include <sys/conf.h> -#include <sys/syslog.h> -#include <sys/param.h> -#include <sys/filio.h> -#include <sys/sockio.h> -#include <sys/fcntl.h> -#include <sys/kpi_socket.h> - -#include <vm/vm_kern.h> - -#include <net/if_types.h> -#include <net/if_var.h> -#include <net/if_dl.h> -#include <net/if_arp.h> - -#include <miscfs/devfs/devfs.h> - -} - -extern "C" { - -/* interface service functions that delegate to the appropriate tuntap_interface instance */ -errno_t -tuntap_if_output(ifnet_t ifp, mbuf_t m) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_output(m); - } - - if (m != NULL) - mbuf_freem_list(m); - - return ENODEV; -} - -errno_t -tuntap_if_ioctl(ifnet_t ifp, long unsigned int cmd, void *arg) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_ioctl(cmd, arg); - } - - return ENODEV; -} - -errno_t -tuntap_if_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, int (*cb)(ifnet_t, mbuf_t)) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_set_bpf_tap(mode, cb); - } - - return ENODEV; -} - -errno_t -tuntap_if_demux(ifnet_t ifp, mbuf_t m, char *header, protocol_family_t *proto) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_demux(m, header, proto); - } - - return ENODEV; -} - -errno_t -tuntap_if_framer(ifnet_t ifp, mbuf_t *m, const struct sockaddr *dest, const char *dest_linkaddr, - const char *frame_type) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_framer(m, dest, dest_linkaddr, frame_type); - } - - return ENODEV; -} - -errno_t -tuntap_if_add_proto(ifnet_t ifp, protocol_family_t proto, const struct ifnet_demux_desc *ddesc, - u_int32_t ndesc) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_add_proto(proto, ddesc, ndesc); - } - - return ENODEV; -} - -errno_t -tuntap_if_del_proto(ifnet_t ifp, protocol_family_t proto) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_del_proto(proto); - } - - return ENODEV; -} - -errno_t -tuntap_if_check_multi(ifnet_t ifp, const struct sockaddr* maddr) -{ - if (ifp != NULL) - { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - return ttif->if_check_multi(maddr); - } - - return ENODEV; -} - -void -tuntap_if_detached(ifnet_t ifp) -{ - if (ifp != NULL) { - tuntap_interface *ttif = (tuntap_interface *) ifnet_softc(ifp); - if (ttif != NULL) - ttif->if_detached(); - } -} - -errno_t -tuntap_if_noop_output(ifnet_t, mbuf_t) -{ - return ENODEV; -} - -errno_t -tuntap_if_noop_demux(ifnet_t, mbuf_t, char*, protocol_family_t*) -{ - return ENODEV; -} - -errno_t -tuntap_if_noop_add_proto(ifnet_t, protocol_family_t, const struct ifnet_demux_desc*, u_int32_t) -{ - return ENODEV; -} - -errno_t -tuntap_if_noop_del_proto(ifnet_t, protocol_family_t) -{ - return ENODEV; -} - -} /* extern "C" */ - -/* tuntap_mbuf_queue */ -tuntap_mbuf_queue::tuntap_mbuf_queue() -{ - head = tail = NULL; - size = 0; -} - -tuntap_mbuf_queue::~tuntap_mbuf_queue() -{ - clear(); -} - -bool -tuntap_mbuf_queue::enqueue(mbuf_t mb) -{ - if (size == QUEUE_SIZE) - return false; - - mbuf_setnextpkt(mb, NULL); - - if (head == NULL) - head = tail = mb; - else { - mbuf_setnextpkt(tail, mb); - tail = mb; - } - size++; - - return true; -} - -mbuf_t -tuntap_mbuf_queue::dequeue() -{ - mbuf_t ret; - - /* check wether there is a packet in the queue */ - if (head == NULL) - return NULL; - - /* fetch it */ - ret = head; - head = mbuf_nextpkt(head); - mbuf_setnextpkt(ret, NULL); - size--; - - return ret; -} - -void -tuntap_mbuf_queue::clear() -{ - /* free mbufs that are in the queue */ - if (head != NULL) - mbuf_freem_list(head); - - head = NULL; - tail = NULL; - size = 0; -} - -/* tuntap_interface members */ -tuntap_interface::tuntap_interface() -{ - /* initialize the members */ - ifp = NULL; - open = false; - block_io = true; - dev_handle = NULL; - pid = 0; - selthreadclear(&rsel); - bpf_mode = BPF_MODE_DISABLED; - bpf_callback = NULL; - bzero(unique_id, UIDLEN); - in_ioctl = false; -} - -tuntap_interface::~tuntap_interface() -{ -} - -bool -tuntap_interface::register_chardev(unsigned short major) -{ - /* register character device */ - dev_handle = devfs_make_node(makedev(major, unit), DEVFS_CHAR, 0, 0, 0660, "%s%d", - family_name, (int) unit); - - if (dev_handle == NULL) { - log(LOG_ERR, "tuntap: could not make /dev/%s%d\n", family_name, (int) unit); - return false; - } - - return true; -} - -void -tuntap_interface::unregister_chardev() -{ - dprintf("unregistering character device\n"); - - /* unregister character device */ - if (dev_handle != NULL) - devfs_remove(dev_handle); - dev_handle = NULL; -} - -bool -tuntap_interface::register_interface(const struct sockaddr_dl* lladdr, void *bcaddr, - u_int32_t bcaddrlen) -{ - struct ifnet_init_params ip; - errno_t err; - - dprintf("register_interface\n"); - - /* initialize an initialization info struct */ - ip.uniqueid_len = UIDLEN; - ip.uniqueid = unique_id; - ip.name = family_name; - ip.unit = unit; - ip.family = family; - ip.type = type; - ip.output = tuntap_if_output; - ip.demux = tuntap_if_demux; - ip.add_proto = tuntap_if_add_proto; - ip.del_proto = tuntap_if_del_proto; - ip.check_multi = tuntap_if_check_multi; - ip.framer = tuntap_if_framer; - ip.softc = this; - ip.ioctl = tuntap_if_ioctl; - ip.set_bpf_tap = tuntap_if_set_bpf_tap; - ip.detach = tuntap_if_detached; - ip.event = NULL; - ip.broadcast_addr = bcaddr; - ip.broadcast_len = bcaddrlen; - - dprintf("tuntap: tuntap_if_check_multi is at 0x%08x\n", (void*) tuntap_if_check_multi); - - /* allocate the interface */ - err = ifnet_allocate(&ip, &ifp); - if (err) { - log(LOG_ERR, "tuntap: could not allocate interface for %s%d: %d\n", family_name, - (int) unit, err); - ifp = NULL; - return false; - } - - /* activate the interface */ - err = ifnet_attach(ifp, lladdr); - if (err) { - log(LOG_ERR, "tuntap: could not attach interface %s%d: %d\n", family_name, - (int) unit, err); - ifnet_release(ifp); - ifp = NULL; - return false; - } - - dprintf("setting interface flags\n"); - - /* set interface flags */ - ifnet_set_flags(ifp, IFF_RUNNING | IFF_MULTICAST | IFF_SIMPLEX, (u_int16_t) ~0UL); - - dprintf("flags: %x\n", ifnet_flags(ifp)); - - return true; -} - -void -tuntap_interface::unregister_interface() -{ - errno_t err; - - dprintf("unregistering network interface\n"); - - if (ifp != NULL) { - interface_detached = false; - - /* detach interface */ - err = ifnet_detach(ifp); - if (err) - log(LOG_ERR, "tuntap: error detaching interface %s%d: %d\n", - family_name, unit, err); - - dprintf("interface detaching\n"); - - /* Wait until the interface has completely been detached. */ - thread_sync_lock.lock(); - while (!interface_detached) - thread_sync_lock.sleep(&interface_detached); - thread_sync_lock.unlock(); - - dprintf("interface detached\n"); - - /* release the interface */ - ifnet_release(ifp); - - ifp = NULL; - } - - dprintf("network interface unregistered\n"); -} - -void -tuntap_interface::cleanup_interface() -{ - errno_t err; - ifaddr_t *addrs; - ifaddr_t *a; - struct ifreq ifr; - - /* mark the interface down */ - ifnet_set_flags(ifp, 0, IFF_UP | IFF_RUNNING); - - /* Unregister all interface addresses. This works around a deficiency in the Darwin kernel. - * If we don't remove all IP addresses that are attached to the interface it can happen that - * the IP code fails to clean them up itself. When the interface is recycled, the IP code - * might then think some addresses are still attached to the interface... - */ - - err = ifnet_get_address_list(ifp, &addrs); - if (!err) { - - /* Execute a SIOCDIFADDR ioctl for each address. For technical reasons, we can only - * do that with a socket of the appropriate family. So try to create a dummy socket. - * I know this is a little expensive, but better than crashing... - * - * This really sucks. - */ - for (a = addrs; *a != NULL; a++) { - /* initialize the request parameters */ - snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", - ifnet_name(ifp), ifnet_unit(ifp)); - ifaddr_address(*a, &(ifr.ifr_addr), sizeof(ifr.ifr_addr)); - if (ifr.ifr_addr.sa_family != AF_INET) - continue; - - dprintf("trying to delete address of family %d\n", ifr.ifr_addr.sa_family); - - do_sock_ioctl(ifr.ifr_addr.sa_family, SIOCDIFADDR, &ifr); - } - - /* release the address list */ - ifnet_free_address_list(addrs); - } -} - -bool -tuntap_interface::idle() -{ - return !(open); -} - -void -tuntap_interface::notify_bpf(mbuf_t mb, bool out) -{ - auto_lock l(&bpf_lock); - - if ((out && bpf_mode == BPF_MODE_OUTPUT) - || (!out && bpf_mode == BPF_MODE_INPUT) - || (bpf_mode == BPF_MODE_INPUT_OUTPUT)) - (*bpf_callback)(ifp, mb); -} - -void -tuntap_interface::do_sock_ioctl(sa_family_t af, unsigned long cmd, void* arg) { - if (in_ioctl) { - log(LOG_ERR, "tuntap: ioctl recursion detected, aborting.\n"); - return; - } - - socket_t sock; - errno_t err = sock_socket(af, SOCK_RAW, 0, NULL, NULL, &sock); - if (err) { - log(LOG_ERR, "tuntap: failed to create socket: %d\n", err); - return; - } - - in_ioctl = true; - - /* issue the ioctl */ - err = sock_ioctl(sock, cmd, arg); - if (err) - log(LOG_ERR, "tuntap: socket ioctl %d failed: %d\n", cmd, err); - - in_ioctl = false; - - /* get rid of the socket */ - sock_close(sock); -} - -/* character device service methods */ -int -tuntap_interface::cdev_open(int flags, int devtype, proc_t p) -{ - dprintf("tuntap: cdev_open()\n"); - - /* grab the lock so that there can only be one thread inside */ - auto_lock l(&lock); - - /* check wether it is already open */ - if (open) - return EBUSY; - - /* bring the network interface up */ - int error = initialize_interface(); - if (error) - return error; - - open = true; - pid = proc_pid(p); - - return 0; -} - -int -tuntap_interface::cdev_close(int flags, int devtype, proc_t p) -{ - dprintf("tuntap: cdev_close()\n"); - - auto_lock l(&lock); - - if (open) { - open = false; - - /* shut down the network interface */ - shutdown_interface(); - - /* clear the queue */ - send_queue.clear(); - - /* wakeup the cdev thread and notify selects */ - wakeup(this); - selwakeup(&rsel); - - return 0; - } - - return EBADF; -} - -int -tuntap_interface::cdev_read(uio_t uio, int ioflag) -{ - auto_lock l(&lock); - - unsigned int nb = 0; - int error; - - dprintf("tuntap: cdev read\n"); - - if (!open || ifp == NULL || !(ifnet_flags(ifp) & IFF_UP)) - return EIO; - - /* fetch a new mbuf from the queue if necessary */ - mbuf_t cur_mbuf = NULL; - while (cur_mbuf == NULL) { - dprintf("tuntap: fetching new mbuf\n"); - - cur_mbuf = send_queue.dequeue(); - if (cur_mbuf == NULL) { - /* nothing in queue, block or return */ - if (!block_io) { - dprintf("tuntap: aborting (nbio)\n"); - return EWOULDBLOCK; - } else { - /* block */ - dprintf("tuntap: waiting\n"); - /* release the lock while waiting */ - l.unlock(); - error = msleep(this, NULL, PZERO | PCATCH, "tuntap", NULL); - - l.lock(); - - if (error) - return error; - - /* see whether the device was closed in the meantime */ - if (!open || ifp == NULL || !(ifnet_flags(ifp) & IFF_UP)) - return EIO; - - } - } - } - - /* notify bpf */ - notify_bpf(cur_mbuf, true); - - /* output what we have */ - do { - dprintf("tuntap: got new mbuf: %p uio_resid: %d\n", cur_mbuf, uio_resid(uio)); - - /* now we have an mbuf */ - int chunk_len = min(mbuf_len(cur_mbuf), uio_resid(uio)); - error = uiomove((char *) mbuf_data(cur_mbuf), chunk_len, uio); - if (error) { - mbuf_freem(cur_mbuf); - return error; - } - nb += chunk_len; - - dprintf("tuntap: moved %d bytes to userspace uio_resid: %d\n", chunk_len, - uio_resid(uio)); - - /* update cur_mbuf */ - cur_mbuf = mbuf_free(cur_mbuf); - - } while (uio_resid(uio) > 0 && cur_mbuf != NULL); - - /* update statistics */ - ifnet_stat_increment_out(ifp, 1, nb, 0); - - /* still data left? forget about that ;-) */ - if (cur_mbuf != NULL) - mbuf_freem(cur_mbuf); - - dprintf("tuntap: read done\n"); - - return 0; -} - -int -tuntap_interface::cdev_write(uio_t uio, int ioflag) -{ - auto_lock l(&lock); - - if (!open || ifp == NULL || !(ifnet_flags(ifp) & IFF_UP)) - return EIO; - - dprintf("tuntap: cdev write. uio_resid: %d\n", uio_resid(uio)); - - /* pack the data into an mbuf chain */ - mbuf_t first, mb; - - /* first we need an mbuf having a header */ - mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &first); - if (first == NULL) { - log(LOG_ERR, "tuntap: could not get mbuf.\n"); - return ENOMEM; - } - mbuf_setlen(first, 0); - - unsigned int mlen = mbuf_maxlen(first); - unsigned int chunk_len; - unsigned int copied = 0; - unsigned int max_data_len = ifnet_mtu(ifp) + ifnet_hdrlen(ifp); - int error; - - /* stuff the data into the mbuf(s) */ - mb = first; - while (uio_resid(uio) > 0) { - /* copy a chunk. enforce mtu (don't know if this is correct behaviour) */ - chunk_len = min(max_data_len - copied, min(uio_resid(uio), mlen)); - error = uiomove((caddr_t) mbuf_data(mb), chunk_len, uio); - if (error) { - log(LOG_ERR, "tuntap: could not copy data from userspace: %d\n", error); - mbuf_freem(first); - return error; - } - - dprintf("tuntap: copied %d bytes, uio_resid %d\n", chunk_len, - uio_resid(uio)); - - mlen -= chunk_len; - mbuf_setlen(mb, mbuf_len(mb) + chunk_len); - copied += chunk_len; - - /* if done, break the loop */ - if (uio_resid(uio) <= 0 || copied >= max_data_len) - break; - - /* allocate a new mbuf if the current is filled */ - if (mlen == 0) { - mbuf_t next; - mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &next); - if (next == NULL) { - log(LOG_ERR, "tuntap: could not get mbuf.\n"); - mbuf_freem(first); - return ENOMEM; - } - mbuf_setnext(mb, next); - mb = next; - mbuf_setlen(mb, 0); - mlen = mbuf_maxlen(mb); - } - } - - /* fill in header info */ - mbuf_pkthdr_setrcvif(first, ifp); - mbuf_pkthdr_setlen(first, copied); - mbuf_pkthdr_setheader(first, mbuf_data(first)); - mbuf_set_csum_performed(first, 0, 0); - - /* update statistics */ - ifnet_stat_increment_in(ifp, 1, copied, 0); - - dprintf("tuntap: mbuf chain constructed. first: %p mb: %p len: %d data: %p\n", - first, mb, mbuf_len(first), mbuf_data(first)); - - /* notify bpf */ - notify_bpf(first, false); - - /* need to adjust the data pointer to point directly behind the linklevel header. The header - * itself is later accessed via m_pkthdr.header. Well, if something is ugly, here is it. - */ - mbuf_adj(first, ifnet_hdrlen(ifp)); - - /* pass the packet over to the network stack */ - error = ifnet_input(ifp, first, NULL); - - if (error) { - log(LOG_ERR, "tuntap: could not input packet into network stack.\n"); - mbuf_freem(first); - return error; - } - - return 0; -} - -int -tuntap_interface::cdev_ioctl(u_long cmd, caddr_t data, int fflag, proc_t p) -{ - auto_lock l(&lock); - - dprintf("tuntap: cdev ioctl: %d\n", (int) (cmd & 0xff)); - - switch (cmd) { - case FIONBIO: - /* set i/o mode */ - block_io = *((int *) data) ? false : true; - return 0; - case FIOASYNC: - /* don't allow switching it on */ - if (*((int *) data)) - return ENOTTY; - return 0; - } - - return ENOTTY; -} - -int -tuntap_interface::cdev_select(int which, void *wql, proc_t p) -{ - auto_lock l(&lock); - - int ret = 0; - - dprintf("tuntap: select. which: %d\n", which); - - switch (which) { - case FREAD: - /* check wether data is available */ - { - if (!send_queue.empty()) - ret = 1; - else { - dprintf("tuntap: select: waiting\n"); - selrecord(p, &rsel, wql); - } - } - break; - case FWRITE: - /* we are always writeable */ - ret = 1; - } - - return ret; -} - -/* interface service methods */ -errno_t -tuntap_interface::if_output(mbuf_t m) -{ - mbuf_t pkt; - - dprintf("tuntap: if output\n"); - - /* just to be sure */ - if (m == NULL) - return 0; - - if (!open || ifp == NULL || !(ifnet_flags(ifp) & IFF_UP)) { - mbuf_freem_list(m); - return EHOSTDOWN; - } - - /* check whether packet has a header */ - if ((mbuf_flags(m) & MBUF_PKTHDR) == 0) { - log(LOG_ERR, "tuntap: packet to be output has no mbuf header.\n"); - mbuf_freem_list(m); - return EINVAL; - } - - /* put the packet(s) into the output queue */ - while (m != NULL) { - /* keep pointer, iterate */ - pkt = m; - m = mbuf_nextpkt(m); - mbuf_setnextpkt(pkt, NULL); - - auto_lock l(&lock); - - if (!send_queue.enqueue(pkt)) { - mbuf_freem(pkt); - mbuf_freem_list(m); - return ENOBUFS; - } - } - - /* protect the wakeup calls with the lock, not sure they are safe. */ - { - auto_lock l(&lock); - - /* wakeup the cdev thread and notify selects */ - wakeup(this); - selwakeup(&rsel); - } - - return 0; -} - -errno_t -tuntap_interface::if_ioctl(u_int32_t cmd, void *arg) -{ - dprintf("tuntap: if ioctl: %d\n", (int) (cmd & 0xff)); - - switch (cmd) { - case SIOCSIFADDR: - { - dprintf("tuntap: if_ioctl: SIOCSIFADDR\n"); - - /* Unfortunately, ifconfig sets the address family field of an INET - * netmask to zero, which makes early mDNSresponder versions ignore - * the interface. Fix that here. This one is of the category "ugly - * workaround". Dumb Darwin... - * - * Meanwhile, Apple has fixed mDNSResponder, and recent versions of - * Leopard don't need this hack anymore. However, Tiger still has a - * broken version so we leave the hack in for now. - * - * TODO: Revisit when dropping Tiger support. - * - * Btw. If you configure other network interfaces using ifconfig, - * you run into the same problem. I still don't know how to make the - * tap devices show up in the network configuration panel... - */ - ifaddr_t ifa = (ifaddr_t) arg; - if (ifa == NULL) - return 0; - - sa_family_t af = ifaddr_address_family(ifa); - if (af != AF_INET) - return 0; - - struct ifaliasreq ifra; - int sa_size = sizeof(struct sockaddr); - if (ifaddr_address(ifa, &ifra.ifra_addr, sa_size) - || ifaddr_dstaddress(ifa, &ifra.ifra_broadaddr, sa_size) - || ifaddr_netmask(ifa, &ifra.ifra_mask, sa_size)) { - log(LOG_WARNING, - "tuntap: failed to parse interface address.\n"); - return 0; - } - - // Check that the address family fields match. If not, issue another - // SIOCAIFADDR to fix the entry. - if (ifra.ifra_addr.sa_family != af - || ifra.ifra_broadaddr.sa_family != af - || ifra.ifra_mask.sa_family != af) { - log(LOG_INFO, "tuntap: Fixing address family for %s%d\n", - family_name, unit); - - snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "%s%d", - family_name, unit); - ifra.ifra_addr.sa_family = af; - ifra.ifra_broadaddr.sa_family = af; - ifra.ifra_mask.sa_family = af; - - do_sock_ioctl(af, SIOCAIFADDR, &ifra); - } - - return 0; - } - - case SIOCSIFFLAGS: - return 0; - - case SIOCGIFSTATUS: - { - struct ifstat *stat = (struct ifstat *) arg; - int len; - char *p; - - if (stat == NULL) - return EINVAL; - - /* print status */ - len = strlen(stat->ascii); - p = stat->ascii + len; - if (open) { - snprintf(p, IFSTATMAX - len, "\topen (pid %u)\n", pid); - } else { - snprintf(p, IFSTATMAX - len, "\tclosed\n"); - } - - return 0; - } - - case SIOCSIFMTU: - { - struct ifreq *ifr = (struct ifreq *) arg; - - if (ifr == NULL) - return EINVAL; - - ifnet_set_mtu(ifp, ifr->ifr_mtu); - - return 0; - } - - case SIOCDIFADDR: - return 0; - - } - - return EOPNOTSUPP; -} - -errno_t -tuntap_interface::if_set_bpf_tap(bpf_tap_mode mode, int (*cb)(ifnet_t, mbuf_t)) -{ - dprintf("tuntap: mode %d\n", mode); - - auto_lock l(&bpf_lock); - - bpf_callback = cb; - bpf_mode = mode; - - return 0; -} - -errno_t -tuntap_interface::if_check_multi(const struct sockaddr *maddr) -{ - dprintf("tuntap: if_check_multi\n"); - - return EOPNOTSUPP; -} - -void -tuntap_interface::if_detached() -{ - dprintf("tuntap: if_detached\n"); - - /* wake unregister_interface() */ - thread_sync_lock.lock(); - interface_detached = true; - thread_sync_lock.wakeup(&interface_detached); - thread_sync_lock.unlock(); - - dprintf("if_detached done\n"); -} - diff --git a/ext/tap-mac/tuntap/src/tuntap.h b/ext/tap-mac/tuntap/src/tuntap.h deleted file mode 100644 index d5f398d0..00000000 --- a/ext/tap-mac/tuntap/src/tuntap.h +++ /dev/null @@ -1,301 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. - * - * The class tuntaptap_interface contains the common functionality of tuntap_interface and - * tap_interface. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __TUNTAP_H__ -#define __TUNTAP_H__ - -#include "util.h" -#include "lock.h" - -extern "C" { - -#include <sys/types.h> -#include <sys/socket.h> -#include <sys/select.h> -#include <sys/systm.h> -#include <sys/kpi_mbuf.h> - -#include <kern/locks.h> - -#include <net/if.h> -#include <net/bpf.h> -#include <net/kpi_interface.h> - -} - -extern "C" { - -errno_t tuntap_if_output(ifnet_t ifp, mbuf_t m); -errno_t tuntap_if_ioctl(ifnet_t ifp, long unsigned int cmd, void *arg); -errno_t tuntap_if_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, int (*cb)(ifnet_t, mbuf_t)); -errno_t tuntap_if_demux(ifnet_t ifp, mbuf_t m, char *header, protocol_family_t *proto); -errno_t tuntap_if_framer(ifnet_t ifp, mbuf_t *m, const struct sockaddr *dest, - const char *dest_linkaddr, const char *frame_type); -errno_t tuntap_if_add_proto(ifnet_t ifp, protocol_family_t proto, - const struct ifnet_demux_desc *ddesc, u_int32_t ndesc); -errno_t tuntap_if_del_proto(ifnet_t ifp, protocol_family_t proto); -errno_t tuntap_if_check_multi(ifnet_t ifp, const struct sockaddr *maddr); -void tuntap_if_detached(ifnet_t ifp); - -} - -/* forward declaration */ -class tuntap_interface; - -/* both interface families have their manager object that will create, initialize, shutdown and - * delete interfaces. This is (mostly) generic so it can be used both for tun and tap. The only - * exception is the interface creation, therefore this class is abstract. tun and tap have their own - * versions that simply fill in create_interface(). - */ -class tuntap_manager { - - protected: - /* manager cdev gate */ - tt_gate cdev_gate; - /* interface count */ - unsigned int count; - /* an array holding all the interface instances */ - tuntap_interface **tuntaps; - /* the major device number */ - int dev_major; - /* family name */ - char *family; - - /* wether static members are initialized */ - static bool statics_initialized; - - /* major-to-manager-map */ - static const int MAX_CDEV = 256; - static tuntap_manager *mgr_map[MAX_CDEV]; - - /* initializes static members */ - void initialize_statics(); - - public: - /* sets major device number, allocates the interface table. */ - bool initialize(unsigned int count, char *family); - - /* tries to shutdown the family. returns true if successful. the manager object may - * not be deleted if this wasn't called successfully. - */ - bool shutdown(); - - /* the destructor deletes allocated memory and unregisters the character device - * switch */ - virtual ~tuntap_manager(); - - /* here are the cdev routines for the class. They will figure out the manager object - * and call the service methods declared below. - */ - static int cdev_open(dev_t dev, int flags, int devtype, proc_t p); - static int cdev_close(dev_t dev, int flags, int devtype, proc_t p); - static int cdev_read(dev_t dev, uio_t uio, int ioflag); - static int cdev_write(dev_t dev, uio_t uio, int ioflag); - static int cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, - proc_t p); - static int cdev_select(dev_t dev, int which, void *wql, proc_t p); - - protected: - /* Here are the actual service routines that will do the required things (creating - * interfaces and such) and forward to the interface's implementation. - */ - int do_cdev_open(dev_t dev, int flags, int devtype, proc_t p); - int do_cdev_close(dev_t dev, int flags, int devtype, proc_t p); - int do_cdev_read(dev_t dev, uio_t uio, int ioflag); - int do_cdev_write(dev_t dev, uio_t uio, int ioflag); - int do_cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, proc_t p); - int do_cdev_select(dev_t dev, int which, void *wql, proc_t p); - - /* abstract method that will create an interface. Implemented by tun and tap */ - virtual tuntap_interface *create_interface() = 0; - - /* makes sure there is one idle interface available (if nothing fails */ - void ensure_idle_device(); - -}; - -/* a class implementing a mbuf packet queue. On Darwin 7 we had struct ifqueue, but that is now - * internal to the kernel for Darwin 8. So lets have our own. - */ -class tuntap_mbuf_queue { - - private: - /* output end of the queue. dequeueing takes mbufs from here */ - mbuf_t head; - /* input end. new mbufs are appended here. */ - mbuf_t tail; - - /* size */ - unsigned int size; - - /* maximum queue size */ - static const unsigned int QUEUE_SIZE = 128; - - public: - /* initialize new empty queue */ - tuntap_mbuf_queue(); - ~tuntap_mbuf_queue(); - - /* is the queue full? */ - bool full() { return size == QUEUE_SIZE; } - /* is it emtpy? */ - bool empty() { return size == 0; } - - /* enqueue an mbuf. returns true if there was space left, so the mbuf could be - * queued, false otherwise */ - bool enqueue(mbuf_t mb); - - /* tries to dequeue the next mbuf. If the queue is empty, NULL is returned */ - mbuf_t dequeue(); - - /* makes the queue empty, discarding any queue packets */ - void clear(); -}; - -class tuntap_interface { - - protected: - /* interface number */ - unsigned int unit; - /* family name */ - char *family_name; - /* family identifier */ - ifnet_family_t family; - /* interface type */ - u_int32_t type; - /* id string */ - static const unsigned int UIDLEN = 20; - char unique_id[UIDLEN]; - - /* synchronization */ - tt_mutex lock; - tt_mutex bpf_lock; - tt_mutex thread_sync_lock; - - /* the interface structure registered */ - ifnet_t ifp; - /* whether the device has been opened */ - bool open; - /* whether we are doing blocking i/o */ - bool block_io; - /* whether the interface has properly been detached */ - bool interface_detached; - /* handle to the devfs node for the character device */ - void *dev_handle; - /* the pid of the process that opened the cdev, if any */ - pid_t pid; - /* read select info */ - struct selinfo rsel; - /* bpf mode, wether filtering is on or off */ - bpf_tap_mode bpf_mode; - /* bpf callback. called when packet arrives/leaves */ - int (*bpf_callback)(ifnet_t, mbuf_t); - /* pending packets queue (for output), must be accessed with the lock held */ - tuntap_mbuf_queue send_queue; - /* whether an ioctl that we issued is currently being processed */ - bool in_ioctl; - - /* protected constructor. initializes most of the members */ - tuntap_interface(); - virtual ~tuntap_interface(); - - /* initialize the device */ - virtual bool initialize(unsigned short major, unsigned short unit) = 0; - - /* character device management */ - virtual bool register_chardev(unsigned short major); - virtual void unregister_chardev(); - - /* network interface management */ - virtual bool register_interface(const struct sockaddr_dl *lladdr, - void *bcaddr, u_int32_t bcaddrlen); - virtual void unregister_interface(); - virtual void cleanup_interface(); - - /* called when the character device is opened in order to intialize the network - * interface. - */ - virtual int initialize_interface() = 0; - /* called when the character device is closed to shutdown the network interface */ - virtual void shutdown_interface() = 0; - - /* check wether the interface is idle (so it can be brought down) */ - virtual bool idle(); - - /* shut it down */ - virtual void shutdown() = 0; - - /* notifies BPF of a packet coming through */ - virtual void notify_bpf(mbuf_t mb, bool out); - - /* executes a socket ioctl through a temporary socket */ - virtual void do_sock_ioctl(sa_family_t af, unsigned long cmd, void* arg); - - /* character device service methods. Called by the manager */ - virtual int cdev_open(int flags, int devtype, proc_t p); - virtual int cdev_close(int flags, int devtype, proc_t p); - virtual int cdev_read(uio_t uio, int ioflag); - virtual int cdev_write(uio_t uio, int ioflag); - virtual int cdev_ioctl(u_long cmd, caddr_t data, int fflag, proc_t p); - virtual int cdev_select(int which, void *wql, proc_t p); - - /* interface functions. friends and implementation methods */ - friend errno_t tuntap_if_output(ifnet_t ifp, mbuf_t m); - friend errno_t tuntap_if_ioctl(ifnet_t ifp, long unsigned int cmd, void *arg); - friend errno_t tuntap_if_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, - int (*cb)(ifnet_t, mbuf_t)); - friend errno_t tuntap_if_demux(ifnet_t ifp, mbuf_t m, char *header, - protocol_family_t *proto); - friend errno_t tuntap_if_framer(ifnet_t ifp, mbuf_t *m, const struct sockaddr *dest, - const char *dest_linkaddr, const char *frame_type); - friend errno_t tuntap_if_add_proto(ifnet_t ifp, protocol_family_t proto, - const struct ifnet_demux_desc *ddesc, u_int32_t ndesc); - friend errno_t tuntap_if_del_proto(ifnet_t ifp, protocol_family_t proto); - friend errno_t tuntap_if_check_multi(ifnet_t ifp, const struct sockaddr *maddr); - friend void tuntap_if_detached(ifnet_t ifp); - - virtual errno_t if_output(mbuf_t m); - virtual errno_t if_ioctl(u_int32_t cmd, void *arg); - virtual errno_t if_set_bpf_tap(bpf_tap_mode mode, int (*cb)(ifnet_t, mbuf_t)); - virtual errno_t if_demux(mbuf_t m, char *header, protocol_family_t *proto) = 0; - virtual errno_t if_framer(mbuf_t *m, const struct sockaddr *dest, - const char *dest_linkaddr, const char *frame_type) = 0; - virtual errno_t if_add_proto(protocol_family_t proto, - const struct ifnet_demux_desc *ddesc, u_int32_t ndesc) = 0; - virtual errno_t if_del_proto(protocol_family_t proto) = 0; - virtual errno_t if_check_multi(const struct sockaddr *maddr); - virtual void if_detached(); - - /* tuntap_manager feeds us with cdev input, so it is our friend */ - friend class tuntap_manager; -}; - -#endif /* __TUNTAP_H__ */ - diff --git a/ext/tap-mac/tuntap/src/tuntap_mgr.cc b/ext/tap-mac/tuntap/src/tuntap_mgr.cc deleted file mode 100644 index f41394e9..00000000 --- a/ext/tap-mac/tuntap/src/tuntap_mgr.cc +++ /dev/null @@ -1,372 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. - * - * tuntap_manager definition. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "tuntap.h" -#include "mem.h" - -extern "C" { - -#include <sys/conf.h> -#include <sys/param.h> -#include <sys/syslog.h> -#include <sys/systm.h> - -#include <vm/vm_kern.h> - -#include <miscfs/devfs/devfs.h> - -} - -#if 0 -#define dprintf(...) log(LOG_INFO, __VA_ARGS__) -#else -#define dprintf(...) -#endif - -/* cdevsw for tuntap_manager */ -static struct cdevsw mgr_cdevsw = -{ - tuntap_manager::cdev_open, - tuntap_manager::cdev_close, - tuntap_manager::cdev_read, - tuntap_manager::cdev_write, - tuntap_manager::cdev_ioctl, - eno_stop, - eno_reset, - NULL, - tuntap_manager::cdev_select, - eno_mmap, - eno_strat, - eno_getc, - eno_putc, - 0 -}; - -/* tuntap_manager members */ -tuntap_manager *tuntap_manager::mgr_map[MAX_CDEV]; - -bool tuntap_manager::statics_initialized = false; - -/* static initializer */ -void -tuntap_manager::initialize_statics() -{ - dprintf("initializing mgr_map\n"); - - /* initialize the major-to-manager map */ - for (int i = 0; i < MAX_CDEV; i++) - mgr_map[i] = NULL; - - statics_initialized = true; -} - -bool -tuntap_manager::initialize(unsigned int count, char *family) -{ - this->count = count; - this->family = family; - this->tuntaps = NULL; - - if (!statics_initialized) - initialize_statics(); - - /* make sure noone can access the character devices until we are done */ - auto_lock l(&cdev_gate); - - /* register the switch for the tap character devices */ - dev_major = cdevsw_add(-1, &mgr_cdevsw); - if (dev_major == -1) { - log(LOG_ERR, "%s: could not register character device switch.\n", family); - return false; - } - - /* allocate memory for the interface instance table */ - tuntaps = (tuntap_interface **) mem_alloc(count * sizeof(tuntap_interface *)); - if (tuntaps == NULL) - { - log(LOG_ERR, "%s: no memory!\n", family); - return false; - } - - bzero(tuntaps, count * sizeof(tuntap_interface *)); - - /* Create the interfaces. This will only add the character devices. The network devices will - * be created upon open()ing the corresponding character devices. - */ - for (int i = 0; i < (int) count; i++) - { - tuntaps[i] = create_interface(); - - if (tuntaps[i] != NULL) - { - if (tuntaps[i]->initialize(dev_major, i)) - { - continue; - } - - /* error here. current interface needs to be shut down */ - i++; - } - - /* something went wrong. clean up. */ - while (--i >= 0) - { - tuntaps[i]->shutdown(); - delete tuntaps[i]; - } - - return false; - } - - /* register the new family in the mgr switch */ - mgr_map[dev_major] = this; - - log(LOG_INFO, "%s kernel extension version %s <mattias.nissler@gmx.de>\n", - family, TUNTAP_VERSION); - - return true; -} - -bool -tuntap_manager::shutdown() -{ - bool ok = true; - - /* we halt the whole thing while we check whether we can shutdown */ - auto_lock l(&cdev_gate); - - /* anyone in? */ - if (cdev_gate.is_anyone_in()) { - dprintf("tuntap_mgr: won't shutdown, threads still behind the gate."); - ok = false; - } else { - /* query the interfaces to see if shutting down is ok */ - if (tuntaps != NULL) { - for (unsigned int i = 0; i < count; i++) { - if (tuntaps[i] != NULL) - ok &= tuntaps[i]->idle(); - } - - /* if yes, do it now */ - if (ok) { - for (unsigned int i = 0; i < count; i++) { - if (tuntaps[i] != NULL) { - tuntaps[i]->shutdown(); - delete tuntaps[i]; - tuntaps[i] = NULL; - } - } - } - } - } - - /* unregister the character device switch */ - if (ok) { - if (dev_major != -1 && cdevsw_remove(dev_major, &mgr_cdevsw) == -1) { - log(LOG_WARNING, - "%s: character device switch got lost. strange.\n", family); - } - mgr_map[dev_major] = NULL; - dev_major = -1; - - /* at this point there is still a chance that some thread hangs at the cdev_gate in - * one of the cdev service functions. I can't imagine any way that would aviod this. - * So lets unblock the gate such that they fail. - */ - unsigned int old_number; - do { - old_number = cdev_gate.get_ticket_number(); - - dprintf("tuntap_manager: waiting for other threads to give up.\n"); - - /* wait one second */ - cdev_gate.sleep(&cdev_gate, 1000000); - - } while (cdev_gate.get_ticket_number() != old_number); - - /* I hope it is safe to unload now. */ - - } else { - log(LOG_WARNING, "%s: won't unload, at least one interface is busy.\n", family); - } - - dprintf("tuntap manager: shutdown %s\n", ok ? "ok" : "failed"); - - return ok; -} - -tuntap_manager::~tuntap_manager() -{ - dprintf("freeing interface table\n"); - - /* free memory */ - if (tuntaps != NULL) - mem_free(tuntaps, count * sizeof(tuntap_interface *)); -} - -/* service method dispatchers */ -int -tuntap_manager::cdev_open(dev_t dev, int flags, int devtype, proc_t p) -{ - return (mgr_map[major(dev)] == NULL ? ENOENT - : mgr_map[major(dev)]->do_cdev_open(dev, flags, devtype, p)); -} - -int -tuntap_manager::cdev_close(dev_t dev, int flags, int devtype, proc_t p) -{ - return (mgr_map[major(dev)] == NULL ? EBADF - : mgr_map[major(dev)]->do_cdev_close(dev, flags, devtype, p)); -} - -int -tuntap_manager::cdev_read(dev_t dev, uio_t uio, int ioflag) -{ - return (mgr_map[major(dev)] == NULL ? EBADF - : mgr_map[major(dev)]->do_cdev_read(dev, uio, ioflag)); -} - -int -tuntap_manager::cdev_write(dev_t dev, uio_t uio, int ioflag) -{ - return (mgr_map[major(dev)] == NULL ? EBADF - : mgr_map[major(dev)]->do_cdev_write(dev, uio, ioflag)); -} - -int -tuntap_manager::cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, proc_t p) -{ - return (mgr_map[major(dev)] == NULL ? EBADF - : mgr_map[major(dev)]->do_cdev_ioctl(dev, cmd, data, fflag, p)); -} - -int -tuntap_manager::cdev_select(dev_t dev, int which, void *wql, proc_t p) -{ - return (mgr_map[major(dev)] == NULL ? EBADF - : mgr_map[major(dev)]->do_cdev_select(dev, which, wql, p)); -} - -/* character device service methods */ -int -tuntap_manager::do_cdev_open(dev_t dev, int flags, int devtype, proc_t p) -{ - int dmin = minor(dev); - int error = ENOENT; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_open(flags, devtype, p); - - cdev_gate.exit(); - - return error; -} - -int -tuntap_manager::do_cdev_close(dev_t dev, int flags, int devtype, proc_t p) -{ - int dmin = minor(dev); - int error = EBADF; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_close(flags, devtype, p); - - cdev_gate.exit(); - - return error; -} - -int -tuntap_manager::do_cdev_read(dev_t dev, uio_t uio, int ioflag) -{ - int dmin = minor(dev); - int error = EBADF; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_read(uio, ioflag); - - cdev_gate.exit(); - - return error; -} - -int -tuntap_manager::do_cdev_write(dev_t dev, uio_t uio, int ioflag) -{ - int dmin = minor(dev); - int error = EBADF; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_write(uio, ioflag); - - cdev_gate.exit(); - - return error; -} - -int -tuntap_manager::do_cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, proc_t p) -{ - int dmin = minor(dev); - int error = EBADF; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_ioctl(cmd, data, fflag, p); - - cdev_gate.exit(); - - return error; -} - -int -tuntap_manager::do_cdev_select(dev_t dev, int which, void *wql, proc_t p) -{ - int dmin = minor(dev); - int error = EBADF; - - cdev_gate.enter(); - - if (dmin < (int) count && dmin >= 0 && tuntaps[dmin] != NULL) - error = tuntaps[dmin]->cdev_select(which, wql, p); - - cdev_gate.exit(); - - return error; -} - diff --git a/ext/tap-mac/tuntap/src/util.h b/ext/tap-mac/tuntap/src/util.h deleted file mode 100644 index 0f6955e8..00000000 --- a/ext/tap-mac/tuntap/src/util.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * ip tunnel/ethertap device for MacOSX. - * - * Some utilities and misc stuff. - */ -/* - * Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de> - * - * Redistribution and use in source and binary forms, with or without modification, are permitted - * provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials provided - * with the distribution. - * 3. The name of the author may not be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __UTIL_H__ -#define __UTIL_H__ - -extern "C" { - -/* In Darwin 8 (OS X Tiger) there is a problem with struct selinfo. It was made `private' to the - * kernel, so its definition is not available from the headers in Kernel.framework. However, we need - * to declare something :-( - */ -struct selinfo { - char data[128]; /* should be enough... */ -}; - -} /* extern "C" */ - -#endif /* __UTIL_H__ */ - |