1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#include "internal.hpp"
namespace CaDiCaL {
/*------------------------------------------------------------------------*/
// Once in a while we reduce, e.g., we remove learned clauses which are
// supposed to be less useful in the future. This is done in increasing
// intervals, which has the effect of allowing more and more learned clause
// to be kept for a longer period. The number of learned clauses kept
// in memory corresponds to an upper bound on the 'space' of a resolution
// proof needed to refute a formula in proof complexity sense.
bool Internal::reducing () {
if (!opts.reduce)
return false;
if (!stats.current.redundant)
return false;
return stats.conflicts >= lim.reduce;
}
/*------------------------------------------------------------------------*/
// Even less regularly we are flushing all redundant clauses.
bool Internal::flushing () {
if (!opts.flush)
return false;
return stats.conflicts >= lim.flush;
}
/*------------------------------------------------------------------------*/
void Internal::mark_clauses_to_be_flushed () {
const int tier1limit = tier1[false];
const int tier2limit = max (tier1limit, tier2[false]);
for (const auto &c : clauses) {
if (!c->redundant)
continue; // keep irredundant
if (c->garbage)
continue; // already marked as garbage
if (c->reason)
continue; // need to keep reasons
const unsigned used = c->used;
if (used)
c->used--;
if (c->glue <= tier1limit && used)
continue;
if (c->glue <= tier2limit && used >= max_used - 1)
continue;
mark_garbage (c); // flush unused clauses
if (c->hyper)
stats.flush.hyper++;
else
stats.flush.learned++;
}
// No change to 'lim.kept{size,glue}'.
}
/*------------------------------------------------------------------------*/
// Clauses of larger glue or larger size are considered less useful.
//
// We also follow the observations made by the Glucose team in their
// IJCAI'09 paper and keep all low glue clauses limited by
// 'options.keepglue' (typically '2').
//
// In earlier versions we pre-computed a 64-bit sort key per clause and
// wrapped a pointer to the clause and the 64-bit sort key into a separate
// data structure for sorting. This was probably faster but awkward and
// so we moved back to a simpler scheme which also uses 'stable_sort'
// instead of 'rsort' below. Sorting here is not a hot-spot anyhow.
struct reduce_less_useful {
bool operator() (const Clause *c, const Clause *d) const {
if (c->glue > d->glue)
return true;
if (c->glue < d->glue)
return false;
return c->size > d->size;
}
};
// This function implements the important reduction policy. It determines
// which redundant clauses are considered not useful and thus will be
// collected in a subsequent garbage collection phase.
void Internal::mark_useless_redundant_clauses_as_garbage () {
// We use a separate stack for sorting candidates for removal. This uses
// (slightly) more memory but has the advantage to keep the relative order
// in 'clauses' intact, which actually due to using stable sorting goes
// into the candidate selection (more recently learned clauses are kept if
// they otherwise have the same glue and size).
vector<Clause *> stack;
const int tier1limit = tier1[false];
const int tier2limit = max (tier1limit, tier2[false]);
stack.reserve (stats.current.redundant);
for (const auto &c : clauses) {
if (!c->redundant)
continue; // Keep irredundant.
if (c->garbage)
continue; // Skip already marked.
if (c->reason)
continue; // Need to keep reasons.
const unsigned used = c->used;
if (used)
c->used--;
if (c->glue <= tier1limit && used)
continue;
if (c->glue <= tier2limit && used >= max_used - 1)
continue;
if (c->hyper) { // Hyper binary and ternary resolvents
assert (c->size <= 3); // are only kept for one reduce round
if (!used)
mark_garbage (c); // unless
continue; // used recently.
}
stack.push_back (c);
}
stable_sort (stack.begin (), stack.end (), reduce_less_useful ());
size_t target = 1e-2 * opts.reducetarget * stack.size ();
// This is defensive code, which I usually consider a bug, but here I am
// just not sure that using floating points in the line above is precise
// in all situations and instead of figuring that out, I just use this.
//
if (target > stack.size ())
target = stack.size ();
PHASE ("reduce", stats.reductions, "reducing %zd clauses %.0f%%", target,
percent (target, stats.current.redundant));
auto i = stack.begin ();
const auto t = i + target;
while (i != t) {
Clause *c = *i++;
LOG (c, "marking useless to be collected");
mark_garbage (c);
stats.reduced++;
}
lim.keptsize = lim.keptglue = 0;
const auto end = stack.end ();
for (i = t; i != end; i++) {
Clause *c = *i;
LOG (c, "keeping");
if (c->size > lim.keptsize)
lim.keptsize = c->size;
if (c->glue > lim.keptglue)
lim.keptglue = c->glue;
}
erase_vector (stack);
PHASE ("reduce", stats.reductions, "maximum kept size %d glue %d",
lim.keptsize, lim.keptglue);
}
/*------------------------------------------------------------------------*/
// If chronological backtracking produces out-of-order assigned units, then
// it is necessary to completely propagate them at the root level in order
// to derive all implied units. Otherwise the blocking literals in
// 'flush_watches' are messed up and assertion 'FW1' fails.
bool Internal::propagate_out_of_order_units () {
if (!level)
return true;
int oou = 0;
for (size_t i = control[1].trail; !oou && i < trail.size (); i++) {
const int lit = trail[i];
assert (val (lit) > 0);
if (var (lit).level)
continue;
LOG ("found out-of-order assigned unit %d", oou);
oou = lit;
}
if (!oou)
return true;
assert (opts.chrono || external_prop || did_external_prop);
backtrack (0);
if (propagate ())
return true;
learn_empty_clause ();
return false;
}
/*------------------------------------------------------------------------*/
// reduction is scheduled with reduceint, reducetarget and reduceopt.
// with reduceopt=1 the number of learnt clauses scale with
// sqrt of conflicts times reduceint
// the scaling is the same as with reduceopt=0 (the classical default)
// however, the constants are different. To avoid this (and get roughly the
// same behaviour with reduceopt=0 and reduceopt=1) we need to scale the
// interval, namely (reduceint^2/2)
// Lastly, reduceopt=2 just replaces sqrt conflicts with log conflicts.
// The learnt clauses should not be bigger than
// 1/reducetarget * reduceint * function (conflicts)
// for function being log if reduceint=2 an sqrt otherwise.
// This is however only the theoretical target and second chance for
// tier2 clauses and very long lifespan of tier1 clauses (through used flag)
// make this behave differently.
// reduceinit shifts the curve to the right, increasing the number of
// clauses in the solver. This impact will decrease over time.
void Internal::reduce () {
START (reduce);
stats.reductions++;
report ('.', 1);
bool flush = flushing ();
if (flush)
stats.flush.count++;
if (!propagate_out_of_order_units ())
goto DONE;
mark_satisfied_clauses_as_garbage ();
protect_reasons ();
if (flush)
mark_clauses_to_be_flushed ();
else
mark_useless_redundant_clauses_as_garbage ();
garbage_collection ();
{
int64_t delta = opts.reduceint;
double factor = stats.reductions + 1;
if (opts.reduceopt ==
0) // adjust delta such this is the same as reduceopt=1
delta = delta * delta / 2;
else if (opts.reduceopt == 1) {
// this is the same as reduceopt=0 if reduceint = sqrt (reduceint) =
// 17
factor = sqrt ((double) stats.conflicts);
} else if (opts.reduceopt == 2)
// log scaling instead
factor = log ((double) stats.conflicts);
if (factor < 1)
factor = 1;
delta = delta * factor;
if (irredundant () > 1e5) {
delta *= log (irredundant () / 1e4) / log (10);
}
if (delta < 1)
delta = 1;
lim.reduce = stats.conflicts + delta;
PHASE ("reduce", stats.reductions,
"new reduce limit %" PRId64 " after %" PRId64 " conflicts",
lim.reduce, delta);
}
if (flush) {
PHASE ("flush", stats.flush.count, "new flush increment %" PRId64 "",
inc.flush);
inc.flush *= opts.flushfactor;
lim.flush = stats.conflicts + inc.flush;
PHASE ("flush", stats.flush.count, "new flush limit %" PRId64 "",
lim.flush);
}
last.reduce.conflicts = stats.conflicts;
DONE:
report (flush ? 'f' : '-');
STOP (reduce);
}
} // namespace CaDiCaL