Annotation of src/usr.bin/ssh/monitor_mm.c, Revision 1.6
1.1 provos 1: /*
2: * Copyright 2002 Niels Provos <provos@citi.umich.edu>
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: *
14: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24: */
25:
26: #include "includes.h"
1.6 ! markus 27: RCSID("$OpenBSD: monitor_mm.c,v 1.5 2002/05/28 16:45:27 stevesk Exp $");
1.1 provos 28:
29: #include <sys/mman.h>
30:
31: #include "ssh.h"
32: #include "xmalloc.h"
33: #include "log.h"
34: #include "monitor_mm.h"
35:
36: static int
37: mm_compare(struct mm_share *a, struct mm_share *b)
38: {
39: return ((char *)a->address - (char *)b->address);
40: }
41:
42: RB_GENERATE(mmtree, mm_share, next, mm_compare)
43:
44: static struct mm_share *
45: mm_make_entry(struct mm_master *mm, struct mmtree *head,
46: void *address, size_t size)
47: {
48: struct mm_share *tmp, *tmp2;
49:
50: if (mm->mmalloc == NULL)
51: tmp = xmalloc(sizeof(struct mm_share));
52: else
53: tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
54: tmp->address = address;
55: tmp->size = size;
56:
57: tmp2 = RB_INSERT(mmtree, head, tmp);
58: if (tmp2 != NULL)
1.4 stevesk 59: fatal("mm_make_entry(%p): double address %p->%p(%lu)",
60: mm, tmp2, address, (u_long)size);
1.1 provos 61:
62: return (tmp);
63: }
64:
65: /* Creates a shared memory area of a certain size */
66:
67: struct mm_master *
68: mm_create(struct mm_master *mmalloc, size_t size)
69: {
70: void *address;
71: struct mm_master *mm;
72:
73: if (mmalloc == NULL)
74: mm = xmalloc(sizeof(struct mm_master));
75: else
76: mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
77:
1.3 markus 78: /*
1.1 provos 79: * If the memory map has a mm_master it can be completely
80: * shared including authentication between the child
81: * and the client.
82: */
83: mm->mmalloc = mmalloc;
84:
85: address = mmap(NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED,
86: -1, 0);
87: if (address == MAP_FAILED)
1.5 stevesk 88: fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
1.1 provos 89:
90: mm->address = address;
91: mm->size = size;
92:
93: RB_INIT(&mm->rb_free);
94: RB_INIT(&mm->rb_allocated);
95:
96: mm_make_entry(mm, &mm->rb_free, address, size);
97:
98: return (mm);
99: }
100:
101: /* Frees either the allocated or the free list */
102:
1.2 markus 103: static void
1.1 provos 104: mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
105: {
106: struct mm_share *mms, *next;
107:
108: for (mms = RB_ROOT(head); mms; mms = next) {
109: next = RB_NEXT(mmtree, head, mms);
110: RB_REMOVE(mmtree, head, mms);
111: if (mmalloc == NULL)
112: xfree(mms);
113: else
114: mm_free(mmalloc, mms);
115: }
116: }
117:
118: /* Destroys a memory mapped area */
119:
120: void
121: mm_destroy(struct mm_master *mm)
122: {
123: mm_freelist(mm->mmalloc, &mm->rb_free);
124: mm_freelist(mm->mmalloc, &mm->rb_allocated);
125:
126: if (munmap(mm->address, mm->size) == -1)
1.5 stevesk 127: fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
128: strerror(errno));
1.1 provos 129: if (mm->mmalloc == NULL)
130: xfree(mm);
131: else
132: mm_free(mm->mmalloc, mm);
133: }
134:
135: void *
136: mm_xmalloc(struct mm_master *mm, size_t size)
137: {
138: void *address;
139:
140: address = mm_malloc(mm, size);
141: if (address == NULL)
1.6 ! markus 142: fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
1.1 provos 143: return (address);
144: }
145:
146:
147: /* Allocates data from a memory mapped area */
148:
149: void *
150: mm_malloc(struct mm_master *mm, size_t size)
151: {
152: struct mm_share *mms, *tmp;
153:
154: if (size == 0)
155: fatal("mm_malloc: try to allocate 0 space");
156:
157: size = ((size + MM_MINSIZE - 1) / MM_MINSIZE) * MM_MINSIZE;
158:
159: RB_FOREACH(mms, mmtree, &mm->rb_free) {
160: if (mms->size >= size)
161: break;
162: }
163:
164: if (mms == NULL)
165: return (NULL);
166:
1.3 markus 167: /* Debug */
1.1 provos 168: memset(mms->address, 0xd0, size);
169:
170: tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
171:
172: /* Does not change order in RB tree */
173: mms->size -= size;
174: mms->address = (u_char *)mms->address + size;
175:
176: if (mms->size == 0) {
177: RB_REMOVE(mmtree, &mm->rb_free, mms);
178: if (mm->mmalloc == NULL)
179: xfree(mms);
180: else
181: mm_free(mm->mmalloc, mms);
182: }
183:
184: return (tmp->address);
185: }
186:
187: /* Frees memory in a memory mapped area */
188:
189: void
190: mm_free(struct mm_master *mm, void *address)
191: {
192: struct mm_share *mms, *prev, tmp;
193:
194: tmp.address = address;
195: mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
196: if (mms == NULL)
197: fatal("mm_free(%p): can not find %p", mm, address);
198:
1.3 markus 199: /* Debug */
1.1 provos 200: memset(mms->address, 0xd0, mms->size);
201:
202: /* Remove from allocated list and insert in free list */
203: RB_REMOVE(mmtree, &mm->rb_allocated, mms);
204: if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
205: fatal("mm_free(%p): double address %p", mm, address);
206:
207: /* Find previous entry */
208: prev = mms;
209: if (RB_LEFT(prev, next)) {
210: prev = RB_LEFT(prev, next);
211: while (RB_RIGHT(prev, next))
212: prev = RB_RIGHT(prev, next);
213: } else {
214: if (RB_PARENT(prev, next) &&
215: (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
216: prev = RB_PARENT(prev, next);
217: else {
218: while (RB_PARENT(prev, next) &&
219: (prev == RB_LEFT(RB_PARENT(prev, next), next)))
220: prev = RB_PARENT(prev, next);
221: prev = RB_PARENT(prev, next);
222: }
223: }
224:
225: /* Check if range does not overlap */
226: if (prev != NULL && MM_ADDRESS_END(prev) > address)
1.4 stevesk 227: fatal("mm_free: memory corruption: %p(%lu) > %p",
228: prev->address, (u_long)prev->size, address);
1.1 provos 229:
230: /* See if we can merge backwards */
231: if (prev != NULL && MM_ADDRESS_END(prev) == address) {
232: prev->size += mms->size;
233: RB_REMOVE(mmtree, &mm->rb_free, mms);
234: if (mm->mmalloc == NULL)
235: xfree(mms);
236: else
237: mm_free(mm->mmalloc, mms);
238: } else
239: prev = mms;
240:
241: if (prev == NULL)
242: return;
243:
244: /* Check if we can merge forwards */
245: mms = RB_NEXT(mmtree, &mm->rb_free, prev);
246: if (mms == NULL)
247: return;
248:
249: if (MM_ADDRESS_END(prev) > mms->address)
1.4 stevesk 250: fatal("mm_free: memory corruption: %p < %p(%lu)",
251: mms->address, prev->address, (u_long)prev->size);
1.1 provos 252: if (MM_ADDRESS_END(prev) != mms->address)
253: return;
254:
255: prev->size += mms->size;
256: RB_REMOVE(mmtree, &mm->rb_free, mms);
257:
258: if (mm->mmalloc == NULL)
259: xfree(mms);
260: else
261: mm_free(mm->mmalloc, mms);
262: }
263:
1.2 markus 264: static void
1.1 provos 265: mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
266: struct mm_master *mm, struct mm_master *mmold)
267: {
268: struct mm_master *mmalloc = mm->mmalloc;
269: struct mm_share *mms, *new;
270:
271: /* Sync free list */
272: RB_FOREACH(mms, mmtree, oldtree) {
273: /* Check the values */
274: mm_memvalid(mmold, mms, sizeof(struct mm_share));
275: mm_memvalid(mm, mms->address, mms->size);
276:
277: new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
278: memcpy(new, mms, sizeof(struct mm_share));
279: RB_INSERT(mmtree, newtree, new);
280: }
281: }
282:
283: void
284: mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
285: {
286: struct mm_master *mm;
287: struct mm_master *mmalloc;
288: struct mm_master *mmold;
289: struct mmtree rb_free, rb_allocated;
290:
1.6 ! markus 291: debug3("%s: Share sync", __func__);
1.1 provos 292:
293: mm = *pmm;
294: mmold = mm->mmalloc;
295: mm_memvalid(mmold, mm, sizeof(*mm));
296:
297: mmalloc = mm_create(NULL, mm->size);
298: mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
299: memcpy(mm, *pmm, sizeof(struct mm_master));
300: mm->mmalloc = mmalloc;
301:
302: rb_free = mm->rb_free;
303: rb_allocated = mm->rb_allocated;
304:
305: RB_INIT(&mm->rb_free);
306: RB_INIT(&mm->rb_allocated);
307:
308: mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
309: mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
310:
311: mm_destroy(mmold);
312:
313: *pmm = mm;
314: *pmmalloc = mmalloc;
315:
1.6 ! markus 316: debug3("%s: Share sync end", __func__);
1.1 provos 317: }
318:
319: void
320: mm_memvalid(struct mm_master *mm, void *address, size_t size)
321: {
322: void *end = (u_char *)address + size;
323:
324: if (address < mm->address)
325: fatal("mm_memvalid: address too small: %p", address);
326: if (end < address)
327: fatal("mm_memvalid: end < address: %p < %p", end, address);
328: if (end > (void *)((u_char *)mm->address + mm->size))
329: fatal("mm_memvalid: address too large: %p", address);
330: }