summaryrefslogtreecommitdiff
path: root/ext/librethinkdbxx/test/upstream/limits.yaml
blob: 41316df23a63e67a130197c975530d4c6e24e28b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
desc: Tests array limit variations
table_variable_name: tbl
tests:

  # test simplistic array limits
  - cd: r.expr([1,1,1,1]).union([1, 1, 1, 1])
    runopts:
      array_limit: 8
    ot: [1,1,1,1,1,1,1,1]
  - cd: r.expr([1,2,3,4]).union([5, 6, 7, 8])
    runopts:
      array_limit: 4
    ot: err("ReqlResourceLimitError", "Array over size limit `4`.", [0])

  # test array limits on query creation
  - cd: r.expr([1,2,3,4,5,6,7,8])
    runopts:
      array_limit: 4
    ot: err("ReqlResourceLimitError", "Array over size limit `4`.", [0])

  # test bizarre array limits
  - cd: r.expr([1,2,3,4,5,6,7,8])
    runopts:
      array_limit: -1
    ot: err("ReqlQueryLogicError", "Illegal array size limit `-1`.  (Must be >= 1.)", [])

  - cd: r.expr([1,2,3,4,5,6,7,8])
    runopts:
      array_limit: 0
    ot: err("ReqlQueryLogicError", "Illegal array size limit `0`.  (Must be >= 1.)", [])

  # make enormous > 100,000 element array
  - def: ten_l = r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
  - def:
      js: ten_f = function(l) { return ten_l }
      py: ten_f = lambda l:list(range(1,11))
  - def:
      js: huge_l = r.expr(ten_l).concatMap(ten_f).concatMap(ten_f).concatMap(ten_f).concatMap(ten_f)
      py: huge_l = r.expr(ten_l).concat_map(ten_f).concat_map(ten_f).concat_map(ten_f).concat_map(ten_f)
      rb: huge_l = r.expr(ten_l).concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}
  - cd: huge_l.append(1).count()
    runopts:
      array_limit: 100001
    ot: 100001

  # attempt to insert enormous array
  - cd: tbl.insert({'id':0, 'array':huge_l.append(1)})
    runopts:
      array_limit: 100001
    ot: partial({'errors':1, 'first_error':"Array too large for disk writes (limit 100,000 elements)."})

  - cd: tbl.get(0)
    runopts:
      array_limit: 100001
    ot: (null)

  # attempt to read array that violates limit from disk
  - cd: tbl.insert({'id':1, 'array':ten_l})
    ot: ({'deleted':0,'replaced':0,'unchanged':0,'errors':0,'skipped':0,'inserted':1})
  - cd: tbl.get(1)
    runopts:
      array_limit: 4
    ot: ({'array':[1,2,3,4,5,6,7,8,9,10],'id':1})


  # Test that the changefeed queue size actually causes changes to be sent early.
  - cd: tbl.delete().get_field('deleted')
    ot: 1

  - cd: c = tbl.changes({squash:1000000, changefeed_queue_size:10})
    py: c = tbl.changes(squash=1000000, changefeed_queue_size=10)

  - cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}]).get_field('inserted')
    ot: 7
  - py: fetch(c, 7)
    rb: fetch(c, 7)
    ot: bag([{'old_val':null, 'new_val':{'id':0}},
             {'old_val':null, 'new_val':{'id':1}},
             {'old_val':null, 'new_val':{'id':2}},
             {'old_val':null, 'new_val':{'id':3}},
             {'old_val':null, 'new_val':{'id':4}},
             {'old_val':null, 'new_val':{'id':5}},
             {'old_val':null, 'new_val':{'id':6}}])

  - cd: tbl.insert([{'id':7}, {'id':8}, {'id':9}, {'id':10}, {'id':11}, {'id':12}, {'id':13}]).get_field('inserted')
    ot: 7
  - py: fetch(c, 7)
    rb: fetch(c, 7)
    ot: bag([{'old_val':null, 'new_val':{'id':7}},
             {'old_val':null, 'new_val':{'id':8}},
             {'old_val':null, 'new_val':{'id':9}},
             {'old_val':null, 'new_val':{'id':10}},
             {'old_val':null, 'new_val':{'id':11}},
             {'old_val':null, 'new_val':{'id':12}},
             {'old_val':null, 'new_val':{'id':13}}])

  - cd: tbl.delete().get_field('deleted')
    ot: 14

  - cd: c2 = tbl.changes({squash:1000000})
    py: c2 = tbl.changes(squash=1000000)
    runopts:
      changefeed_queue_size: 10


  - cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}]).get_field('inserted')
    ot: 7
  - py: fetch(c2, 7)
    rb: fetch(c2, 7)
    ot: bag([{'old_val':null, 'new_val':{'id':0}},
             {'old_val':null, 'new_val':{'id':1}},
             {'old_val':null, 'new_val':{'id':2}},
             {'old_val':null, 'new_val':{'id':3}},
             {'old_val':null, 'new_val':{'id':4}},
             {'old_val':null, 'new_val':{'id':5}},
             {'old_val':null, 'new_val':{'id':6}}])

  - cd: tbl.insert([{'id':7}, {'id':8}, {'id':9}, {'id':10}, {'id':11}, {'id':12}, {'id':13}]).get_field('inserted')
    ot: 7
  - py: fetch(c2, 7)
    rb: fetch(c2, 7)
    ot: bag([{'old_val':null, 'new_val':{'id':7}},
             {'old_val':null, 'new_val':{'id':8}},
             {'old_val':null, 'new_val':{'id':9}},
             {'old_val':null, 'new_val':{'id':10}},
             {'old_val':null, 'new_val':{'id':11}},
             {'old_val':null, 'new_val':{'id':12}},
             {'old_val':null, 'new_val':{'id':13}}])