Annotation of src/usr.bin/ssh/monitor_mm.c, Revision 1.8
1.1 provos 1: /*
2: * Copyright 2002 Niels Provos <provos@citi.umich.edu>
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: *
14: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24: */
25:
26: #include "includes.h"
1.8 ! millert 27: RCSID("$OpenBSD: monitor_mm.c,v 1.7 2002/06/28 01:49:31 millert Exp $");
1.1 provos 28:
29: #include <sys/mman.h>
30:
31: #include "ssh.h"
32: #include "xmalloc.h"
33: #include "log.h"
34: #include "monitor_mm.h"
35:
36: static int
37: mm_compare(struct mm_share *a, struct mm_share *b)
38: {
1.7 millert 39: long diff = (char *)a->address - (char *)b->address;
40:
41: if (diff == 0)
42: return (0);
43: else if (diff < 0)
44: return (-1);
45: else
46: return (1);
1.1 provos 47: }
48:
49: RB_GENERATE(mmtree, mm_share, next, mm_compare)
50:
51: static struct mm_share *
52: mm_make_entry(struct mm_master *mm, struct mmtree *head,
53: void *address, size_t size)
54: {
55: struct mm_share *tmp, *tmp2;
56:
57: if (mm->mmalloc == NULL)
58: tmp = xmalloc(sizeof(struct mm_share));
59: else
60: tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
61: tmp->address = address;
62: tmp->size = size;
63:
64: tmp2 = RB_INSERT(mmtree, head, tmp);
65: if (tmp2 != NULL)
1.4 stevesk 66: fatal("mm_make_entry(%p): double address %p->%p(%lu)",
67: mm, tmp2, address, (u_long)size);
1.1 provos 68:
69: return (tmp);
70: }
71:
72: /* Creates a shared memory area of a certain size */
73:
74: struct mm_master *
75: mm_create(struct mm_master *mmalloc, size_t size)
76: {
77: void *address;
78: struct mm_master *mm;
79:
80: if (mmalloc == NULL)
81: mm = xmalloc(sizeof(struct mm_master));
82: else
83: mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
84:
1.3 markus 85: /*
1.1 provos 86: * If the memory map has a mm_master it can be completely
87: * shared including authentication between the child
88: * and the client.
89: */
90: mm->mmalloc = mmalloc;
91:
92: address = mmap(NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED,
93: -1, 0);
94: if (address == MAP_FAILED)
1.5 stevesk 95: fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
1.1 provos 96:
97: mm->address = address;
98: mm->size = size;
99:
100: RB_INIT(&mm->rb_free);
101: RB_INIT(&mm->rb_allocated);
102:
103: mm_make_entry(mm, &mm->rb_free, address, size);
104:
105: return (mm);
106: }
107:
108: /* Frees either the allocated or the free list */
109:
1.2 markus 110: static void
1.1 provos 111: mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
112: {
113: struct mm_share *mms, *next;
114:
115: for (mms = RB_ROOT(head); mms; mms = next) {
116: next = RB_NEXT(mmtree, head, mms);
117: RB_REMOVE(mmtree, head, mms);
118: if (mmalloc == NULL)
119: xfree(mms);
120: else
121: mm_free(mmalloc, mms);
122: }
123: }
124:
125: /* Destroys a memory mapped area */
126:
127: void
128: mm_destroy(struct mm_master *mm)
129: {
130: mm_freelist(mm->mmalloc, &mm->rb_free);
131: mm_freelist(mm->mmalloc, &mm->rb_allocated);
132:
133: if (munmap(mm->address, mm->size) == -1)
1.5 stevesk 134: fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
135: strerror(errno));
1.1 provos 136: if (mm->mmalloc == NULL)
137: xfree(mm);
138: else
139: mm_free(mm->mmalloc, mm);
140: }
141:
142: void *
143: mm_xmalloc(struct mm_master *mm, size_t size)
144: {
145: void *address;
146:
147: address = mm_malloc(mm, size);
148: if (address == NULL)
1.6 markus 149: fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
1.1 provos 150: return (address);
151: }
152:
153:
154: /* Allocates data from a memory mapped area */
155:
156: void *
157: mm_malloc(struct mm_master *mm, size_t size)
158: {
159: struct mm_share *mms, *tmp;
160:
161: if (size == 0)
162: fatal("mm_malloc: try to allocate 0 space");
1.8 ! millert 163: if (size > SIZE_T_MAX - MM_MINSIZE + 1)
! 164: fatal("mm_malloc: size too big");
1.1 provos 165:
1.8 ! millert 166: size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
1.1 provos 167:
168: RB_FOREACH(mms, mmtree, &mm->rb_free) {
169: if (mms->size >= size)
170: break;
171: }
172:
173: if (mms == NULL)
174: return (NULL);
175:
1.3 markus 176: /* Debug */
1.1 provos 177: memset(mms->address, 0xd0, size);
178:
179: tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
180:
181: /* Does not change order in RB tree */
182: mms->size -= size;
183: mms->address = (u_char *)mms->address + size;
184:
185: if (mms->size == 0) {
186: RB_REMOVE(mmtree, &mm->rb_free, mms);
187: if (mm->mmalloc == NULL)
188: xfree(mms);
189: else
190: mm_free(mm->mmalloc, mms);
191: }
192:
193: return (tmp->address);
194: }
195:
196: /* Frees memory in a memory mapped area */
197:
198: void
199: mm_free(struct mm_master *mm, void *address)
200: {
201: struct mm_share *mms, *prev, tmp;
202:
203: tmp.address = address;
204: mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
205: if (mms == NULL)
206: fatal("mm_free(%p): can not find %p", mm, address);
207:
1.3 markus 208: /* Debug */
1.1 provos 209: memset(mms->address, 0xd0, mms->size);
210:
211: /* Remove from allocated list and insert in free list */
212: RB_REMOVE(mmtree, &mm->rb_allocated, mms);
213: if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
214: fatal("mm_free(%p): double address %p", mm, address);
215:
216: /* Find previous entry */
217: prev = mms;
218: if (RB_LEFT(prev, next)) {
219: prev = RB_LEFT(prev, next);
220: while (RB_RIGHT(prev, next))
221: prev = RB_RIGHT(prev, next);
222: } else {
223: if (RB_PARENT(prev, next) &&
224: (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
225: prev = RB_PARENT(prev, next);
226: else {
227: while (RB_PARENT(prev, next) &&
228: (prev == RB_LEFT(RB_PARENT(prev, next), next)))
229: prev = RB_PARENT(prev, next);
230: prev = RB_PARENT(prev, next);
231: }
232: }
233:
234: /* Check if range does not overlap */
235: if (prev != NULL && MM_ADDRESS_END(prev) > address)
1.4 stevesk 236: fatal("mm_free: memory corruption: %p(%lu) > %p",
237: prev->address, (u_long)prev->size, address);
1.1 provos 238:
239: /* See if we can merge backwards */
240: if (prev != NULL && MM_ADDRESS_END(prev) == address) {
241: prev->size += mms->size;
242: RB_REMOVE(mmtree, &mm->rb_free, mms);
243: if (mm->mmalloc == NULL)
244: xfree(mms);
245: else
246: mm_free(mm->mmalloc, mms);
247: } else
248: prev = mms;
249:
250: if (prev == NULL)
251: return;
252:
253: /* Check if we can merge forwards */
254: mms = RB_NEXT(mmtree, &mm->rb_free, prev);
255: if (mms == NULL)
256: return;
257:
258: if (MM_ADDRESS_END(prev) > mms->address)
1.4 stevesk 259: fatal("mm_free: memory corruption: %p < %p(%lu)",
260: mms->address, prev->address, (u_long)prev->size);
1.1 provos 261: if (MM_ADDRESS_END(prev) != mms->address)
262: return;
263:
264: prev->size += mms->size;
265: RB_REMOVE(mmtree, &mm->rb_free, mms);
266:
267: if (mm->mmalloc == NULL)
268: xfree(mms);
269: else
270: mm_free(mm->mmalloc, mms);
271: }
272:
1.2 markus 273: static void
1.1 provos 274: mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
275: struct mm_master *mm, struct mm_master *mmold)
276: {
277: struct mm_master *mmalloc = mm->mmalloc;
278: struct mm_share *mms, *new;
279:
280: /* Sync free list */
281: RB_FOREACH(mms, mmtree, oldtree) {
282: /* Check the values */
283: mm_memvalid(mmold, mms, sizeof(struct mm_share));
284: mm_memvalid(mm, mms->address, mms->size);
285:
286: new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
287: memcpy(new, mms, sizeof(struct mm_share));
288: RB_INSERT(mmtree, newtree, new);
289: }
290: }
291:
292: void
293: mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
294: {
295: struct mm_master *mm;
296: struct mm_master *mmalloc;
297: struct mm_master *mmold;
298: struct mmtree rb_free, rb_allocated;
299:
1.6 markus 300: debug3("%s: Share sync", __func__);
1.1 provos 301:
302: mm = *pmm;
303: mmold = mm->mmalloc;
304: mm_memvalid(mmold, mm, sizeof(*mm));
305:
306: mmalloc = mm_create(NULL, mm->size);
307: mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
308: memcpy(mm, *pmm, sizeof(struct mm_master));
309: mm->mmalloc = mmalloc;
310:
311: rb_free = mm->rb_free;
312: rb_allocated = mm->rb_allocated;
313:
314: RB_INIT(&mm->rb_free);
315: RB_INIT(&mm->rb_allocated);
316:
317: mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
318: mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
319:
320: mm_destroy(mmold);
321:
322: *pmm = mm;
323: *pmmalloc = mmalloc;
324:
1.6 markus 325: debug3("%s: Share sync end", __func__);
1.1 provos 326: }
327:
328: void
329: mm_memvalid(struct mm_master *mm, void *address, size_t size)
330: {
331: void *end = (u_char *)address + size;
332:
333: if (address < mm->address)
334: fatal("mm_memvalid: address too small: %p", address);
335: if (end < address)
336: fatal("mm_memvalid: end < address: %p < %p", end, address);
337: if (end > (void *)((u_char *)mm->address + mm->size))
338: fatal("mm_memvalid: address too large: %p", address);
339: }