1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
|
/* @file dur_commitjob.h used by dur.cpp
*/
/**
* Copyright (C) 2009 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "../util/alignedbuilder.h"
#include "../util/mongoutils/hash.h"
#include "../util/concurrency/synchronization.h"
#include "cmdline.h"
#include "durop.h"
#include "dur.h"
#include "taskqueue.h"
//#define DEBUG_WRITE_INTENT 1
namespace mongo {
namespace dur {
/** declaration of an intent to write to a region of a memory mapped view
*
* We store the end rather than the start pointer to make operator< faster
* since that is heavily used in set lookup.
*/
struct WriteIntent { /* copyable */
WriteIntent() : w_ptr(0), p(0) { }
WriteIntent(void *a, unsigned b) : w_ptr(0), p((char*)a+b), len(b) { }
void* start() const { return (char*)p - len; }
void* end() const { return p; }
unsigned length() const { return len; }
bool operator < (const WriteIntent& rhs) const { return end() < rhs.end(); }
// can they be merged?
bool overlaps(const WriteIntent& rhs) const {
return (start() <= rhs.end() && end() >= rhs.start());
}
// is merging necessary?
bool contains(const WriteIntent& rhs) const {
return (start() <= rhs.start() && end() >= rhs.end());
}
// merge into me
void absorb(const WriteIntent& other);
friend ostream& operator << (ostream& out, const WriteIntent& wi) {
return (out << "p: " << wi.p << " end: " << wi.end() << " len: " << wi.len);
}
mutable void *w_ptr; // writable mapping of p.
// mutable because set::iterator is const but this isn't used in op<
#if defined(_EXPERIMENTAL)
mutable unsigned ofsInJournalBuffer;
#endif
private:
void *p; // intent to write up to p
unsigned len; // up to this len
};
/** try to remember things we have already marked for journaling. false negatives are ok if infrequent -
we will just log them twice.
*/
template<int Prime>
class Already : boost::noncopyable {
public:
Already() { clear(); }
void clear() { memset(this, 0, sizeof(*this)); }
/* see if we have Already recorded/indicated our write intent for this region of memory.
automatically upgrades the length if the length was shorter previously.
@return true if already indicated.
*/
bool checkAndSet(void* p, int len) {
unsigned x = mongoutils::hashPointer(p);
pair<void*, int> nd = nodes[x % N];
if( nd.first == p ) {
if( nd.second < len ) {
nd.second = len;
return false; // haven't indicated this len yet
}
return true; // already indicated
}
nd.first = p;
nd.second = len;
return false; // a new set
}
private:
enum { N = Prime }; // this should be small the idea is that it fits in the cpu cache easily
pair<void*,int> nodes[N];
};
/** our record of pending/uncommitted write intents */
class Writes : boost::noncopyable {
struct D {
void *p;
unsigned len;
static void go(const D& d);
};
public:
TaskQueue<D> _deferred;
Already<127> _alreadyNoted;
set<WriteIntent> _writes;
vector< shared_ptr<DurOp> > _ops; // all the ops other than basic writes
bool _drained; // _deferred is drained? for asserting/testing
/** reset the Writes structure (empties all the above) */
void clear();
/** merges into set (ie non-deferred version) */
void _insertWriteIntent(void* p, int len);
void insertWriteIntent(void* p, int len) {
#if defined(DEBUG_WRITE_INTENT)
if( _debug[p] < len )
_debug[p] = len;
#endif
D d;
d.p = p;
d.len = len;
_deferred.defer(d);
}
#ifdef _DEBUG
WriteIntent _last;
#endif
#if defined(DEBUG_WRITE_INTENT)
map<void*,int> _debug;
#endif
};
#if defined(DEBUG_WRITE_INTENT)
void assertAlreadyDeclared(void *, int len);
#else
inline void assertAlreadyDeclared(void *, int len) { }
#endif
/** A commit job object for a group commit. Currently there is one instance of this object.
concurrency: assumption is caller is appropriately locking.
for example note() invocations are from the write lock.
other uses are in a read lock from a single thread (durThread)
*/
class CommitJob : boost::noncopyable {
public:
AlignedBuilder _ab; // for direct i/o writes to journal
CommitJob();
/** record/note an intent to write */
void note(void* p, int len);
/** note an operation other than a "basic write" */
void noteOp(shared_ptr<DurOp> p);
set<WriteIntent>& writes() {
if( !_wi._drained ) {
// generally, you don't want to use the set until it is prepared (after deferred ops are applied)
// thus this assert here.
assert(false);
}
return _wi._writes;
}
vector< shared_ptr<DurOp> >& ops() { return _wi._ops; }
/** this method is safe to call outside of locks. when haswritten is false we don't do any group commit and avoid even
trying to acquire a lock, which might be helpful at times.
*/
bool hasWritten() const { return _hasWritten; }
/** we use the commitjob object over and over, calling reset() rather than reconstructing */
void reset();
/** the commit code calls this when data reaches the journal (on disk) */
void notifyCommitted() { _notify.notifyAll(); }
/** Wait until the next group commit occurs. That is, wait until someone calls notifyCommitted. */
void awaitNextCommit() {
if( hasWritten() )
_notify.wait();
}
/** we check how much written and if it is getting to be a lot, we commit sooner. */
size_t bytes() const { return _bytes; }
#if defined(_DEBUG)
const WriteIntent& lastWrite() const { return _wi._last; }
#endif
Writes& wi() { return _wi; }
private:
bool _hasWritten;
Writes _wi; // todo: fix name
size_t _bytes;
NotifyAll _notify; // for getlasterror fsync:true acknowledgements
public:
unsigned _nSinceCommitIfNeededCall;
};
extern CommitJob commitJob;
}
}
|