Annotation of src/usr.bin/fgen/fgen.l, Revision 1.9
1.1 jason 1: %{
1.9 ! deraadt 2: /* $OpenBSD: fgen.l,v 1.8 2008/08/08 08:22:42 sobrado Exp $ */
1.1 jason 3: /* $NetBSD: fgen.l,v 1.12 2001/06/13 10:46:05 wiz Exp $ */
4: /* FLEX input for FORTH input file scanner */
5: /*
6: * Copyright (c) 1998 Eduardo Horvath.
7: * All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed by Eduardo Horvath.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: */
34: /*
35: Specifications are as follows:
36:
37: The function "yylex()" always returns a pointer to a structure:
38:
39: struct tok {
40: int type;
41: char *text;
42: }
43: #define TOKEN struct tok
44: */
45: %}
46:
47: %option yylineno
48:
49: decimal [0-9]
50: hex [0-9A-Fa-f]
51: octal [0-7]
52: white [ \t\n\r\f]
53: tail {white}
54:
55: %{
56: #include <sys/types.h>
57:
58: #include <assert.h>
59: #include <err.h>
60: #include <errno.h>
61: #include <fcntl.h>
62: #include <stdarg.h>
63: #include <stdio.h>
64: #include <string.h>
65: #include <unistd.h>
66:
67: #include "fgen.h"
68: TOKEN token;
69:
70: /*
71: * Global variables that control the parse state.
72: */
73:
74: struct fcode *dictionary = NULL;
75: struct macro *aliases = NULL;
76: int outf = 1; /* stdout */
77: int state = 0;
78: int nextfcode = 0x800;
79: int base = TOK_HEX;
80: long outpos;
81: char *outbuf = NULL;
82: char *outfile, *infile;
83: #define BUFCLICK (1024*1024)
84: size_t outbufsiz = 0;
85: char *myname = NULL;
86: int offsetsize = 8;
87: int defining = 0;
88: int tokenizer = 0;
89:
90: #define PSTKSIZ 1024
91: Cell parse_stack[PSTKSIZ];
92: int parse_stack_ptr = 0;
93:
1.2 millert 94: void token_err(int, char *, char *, char *, ...)
1.1 jason 95: __attribute__((__format__(__printf__, 4, 5)));
96: YY_DECL;
97:
98: int debug = 0;
99: #define ASSERT if (debug) assert
100: #define STATE(y, x) do { if (debug) printf( "%ld State %s: token `%s'\n", outpos, x, y); } while (0)
101:
102: #define YY_NO_UNPUT
103: %}
104:
105: %%
106:
107: 0 { token.type = TOK_OTHER; token.text = yytext;
108: return &token; }
109:
110: 1 { token.type = TOK_OTHER; token.text = yytext;
111: return &token; }
112:
113: 2 { token.type = TOK_OTHER; token.text = yytext;
114: return &token; }
115:
116: 3 { token.type = TOK_OTHER; token.text = yytext;
117: return &token; }
118:
119: -1 { token.type = TOK_OTHER; token.text = yytext;
120: return &token; }
121:
122: {white}* /* whitespace -- keep looping */ ;
123:
124: \\[^\n]*\n /* end of line comment -- keep looping */ { STATE(yytext, "EOL comment"); }
125:
126: -?{hex}+ { token.type = TOK_NUMBER; token.text = yytext;
127: return &token; }
128:
129: \'.\' { token.type = TOK_C_LIT; token.text = yytext; return &token; }
130:
131: \"{white}*(\\\"|[^"])*\" { token.type = TOK_STRING_LIT; token.text = yytext;
132: return &token; } /* String started by `"' or `."' */
133:
134: \.\({white}*(\\\"|[^)])*\) { token.type = TOK_PSTRING; token.text = yytext;
135: return &token; } /* String of type `.(.....)' */
136:
137: \.\"{white}*(\\\"|[^"])*\" { token.type = TOK_PSTRING; token.text = yytext;
138: return &token; }
139:
140: "(" { token.type = TOK_COMMENT; token.text = yytext;
141: return &token; }
142:
143: ")" { token.type = TOK_ENDCOMMENT; token.text = yytext;
144: return &token; }
145:
146: ":" { token.type = TOK_COLON; token.text = yytext;
147: return &token; }
148:
149: ";" { token.type = TOK_SEMICOLON; token.text = yytext;
150: return &token; }
151:
152: \' { token.type = TOK_TOKENIZE; token.text = yytext;
153: return &token; }
154:
155: [aA][gG][aA][iI][nN] { token.type = TOK_AGAIN; token.text = yytext;
156: return &token; }
157:
158: [aA][lL][iI][aA][sS] { token.type = TOK_ALIAS; token.text = yytext;
159: return &token; }
160:
161: \[\'\] { token.type = TOK_GETTOKEN; token.text = yytext;
162: return &token; }
163:
164: [aA][sS][cC][iI][iI] { token.type = TOK_ASCII; token.text = yytext;
165: return &token; }
166:
167: [bB][eE][gG][iI][nN] { token.type = TOK_BEGIN; token.text = yytext;
168: return &token; }
169:
170: [bB][uU][fF][fF][eE][rR]: { token.type = TOK_BUFFER; token.text = yytext;
171: return &token; }
172:
173: [cC][aA][sS][eE] { token.type = TOK_CASE; token.text = yytext;
174: return &token; }
175:
176: [cC][oO][nN][sS][tT][aA][nN][tT] { token.type = TOK_CONSTANT; token.text = yytext;
177: return &token; }
178:
179: [cC][oO][nN][tT][rR][oO][lL] { token.type = TOK_CONTROL; token.text = yytext;
180: return &token; }
181:
182: [cC][rR][eE][aA][tT][eE] { token.type = TOK_CREATE; token.text = yytext;
183: return &token; }
184:
185: [dD]# { token.type = TOK_DECIMAL; token.text = yytext;
186: return &token; }
187:
188: [dD][eE][cC][iI][mM][aA][lL] { token.type = TOK_DECIMAL; token.text = yytext;
189: return &token; }
190:
191: [dD][eE][fF][eE][rR] { token.type = TOK_DEFER; token.text = yytext;
192: return &token; }
193:
194: \??[dD][oO] { token.type = TOK_DO; token.text = yytext;
195: return &token; }
196:
197: [eE][lL][sS][eE] { token.type = TOK_ELSE; token.text = yytext;
198: return &token; }
199:
200: [eE][nN][dD][cC][aA][sS][eE] { token.type = TOK_ENDCASE; token.text = yytext;
201: return &token; }
202:
203: [eE][nN][dD][oO][fF] { token.type = TOK_ENDOF; token.text = yytext;
204: return &token; }
205:
206: [eE][xX][tT][eE][rR][nN][aA][lL] { token.type = TOK_EXTERNAL; token.text = yytext;
207: return &token; }
208:
209: [fF][iI][eE][lL][dD] { token.type = TOK_FIELD; token.text = yytext;
210: return &token; }
211:
212: [hH]# { token.type = TOK_HEX; token.text = yytext;
213: return &token; }
214:
215: [hH][eE][aA][dD][eE][rR][lL][eE][sS][sS] { token.type = TOK_HEADERLESS; token.text = yytext;
216: return &token; }
217:
218: [hH][eE][aA][dD][eE][rR][sS] { token.type = TOK_HEADERS; token.text = yytext;
219: return &token; }
220:
221: [hH][eE][xX] { token.type = TOK_HEX; token.text = yytext;
222: return &token; }
223:
224: [iI][fF] { token.type = TOK_IF; token.text = yytext;
225: return &token; }
226:
227: \??[lL][eE][aA][vV][eE] { token.type = TOK_LEAVE; token.text = yytext;
228: return &token; }
229:
230: \+?[lL][oO][oO][pP] { token.type = TOK_LOOP; token.text = yytext;
231: return &token; }
232:
233: [oO]# { token.type = TOK_OCTAL; token.text = yytext;
234: return &token; }
235:
236: [oO][cC][tT][aA][lL] { token.type = TOK_OCTAL; token.text = yytext;
237: return &token; }
238:
239: [oO][fF] { token.type = TOK_OF; token.text = yytext;
240: return &token; }
241:
242: [rR][eE][pP][eE][aA][tT] { token.type = TOK_REPEAT; token.text = yytext;
243: return &token; }
244:
245: [tT][hH][eE][nN] { token.type = TOK_THEN; token.text = yytext;
246: return &token; }
247:
248: [tT][oO] { token.type = TOK_TO; token.text = yytext;
249: return &token; }
250:
251: [uU][nN][tT][iI][lL] { token.type = TOK_UNTIL; token.text = yytext;
252: return &token; }
253:
254: [vV][aA][lL][uU][eE] { token.type = TOK_VALUE; token.text = yytext;
255: return &token; }
256:
257: [vV][aA][rR][iI][aA][bB][lL][eE] { token.type = TOK_VARIABLE; token.text = yytext;
258: return &token; }
259:
260: [wW][hH][iI][lL][eE] { token.type = TOK_WHILE; token.text = yytext;
261: return &token; }
262:
263: offset16 { token.type = TOK_OFFSET16; token.text = yytext;
264: return &token; }
265:
266: tokenizer\[ { token.type = TOK_BEGTOK; token.text = yytext;
267: return &token; }
268:
269: emit-byte { token.type = TOK_EMIT_BYTE; token.text = yytext;
270: return &token; }
271:
272: \]tokenizer { token.type = TOK_ENDTOK; token.text = yytext;
273: return &token; }
274:
275: fload { token.type = TOK_FLOAD; token.text = yytext;
276: return &token; }
277:
278:
279: [^ \n\t\r\f]+ { token.type = TOK_OTHER; token.text = yytext;
280: return &token; }
281:
282: <<EOF>> { return NULL; }
283: %%
284:
285: /* Function definitions */
1.2 millert 286: void push(Cell);
287: Cell pop(void);
288: int depth(void);
289: int fadd(struct fcode *, struct fcode *);
290: struct fcode *flookup(struct fcode *, char *);
291: int aadd(struct macro *, struct macro *);
292: struct macro *alookup(struct macro *, char *);
293: void initdic(void);
294: void usage(char *);
295: void tokenize(YY_BUFFER_STATE);
296: int emit(char *);
297: int spit(long);
298: void sspit(char *);
299: int apply_macros(YY_BUFFER_STATE, char *);
300: int main(int argc, char *argv[]);
1.1 jason 301:
302: /*
303: * Standard FCode names and numbers. Includes standard
304: * tokenizer aliases.
305: */
306: struct fcode fcodes[] = {
307: { "end0", 0x0000 },
308: { "b(lit)", 0x0010 },
309: { "b(')", 0x0011 },
310: { "b(\")", 0x0012 },
311: { "bbranch", 0x0013 },
312: { "b?branch", 0x0014 },
313: { "b(loop)", 0x0015 },
314: { "b(+loop)", 0x0016 },
315: { "b(do)", 0x0017 },
316: { "b(?do)", 0x0018 },
317: { "i", 0x0019 },
318: { "j", 0x001a },
319: { "b(leave)", 0x001b },
320: { "b(of)", 0x001c },
321: { "execute", 0x001d },
322: { "+", 0x001e },
323: { "-", 0x001f },
324: { "*", 0x0020 },
325: { "/", 0x0021 },
326: { "mod", 0x0022 },
327: { "and", 0x0023 },
328: { "or", 0x0024 },
329: { "xor", 0x0025 },
330: { "invert", 0x0026 },
331: { "lshift", 0x0027 },
332: { "rshift", 0x0028 },
333: { ">>a", 0x0029 },
334: { "/mod", 0x002a },
335: { "u/mod", 0x002b },
336: { "negate", 0x002c },
337: { "abs", 0x002d },
338: { "min", 0x002e },
339: { "max", 0x002f },
340: { ">r", 0x0030 },
341: { "r>", 0x0031 },
342: { "r@", 0x0032 },
343: { "exit", 0x0033 },
344: { "0=", 0x0034 },
345: { "0<>", 0x0035 },
346: { "0<", 0x0036 },
347: { "0<=", 0x0037 },
348: { "0>", 0x0038 },
349: { "0>=", 0x0039 },
350: { "<", 0x003a },
351: { ">", 0x003b },
352: { "=", 0x003c },
353: { "<>", 0x003d },
354: { "u>", 0x003e },
355: { "u<=", 0x003f },
356: { "u<", 0x0040 },
357: { "u>=", 0x0041 },
358: { ">=", 0x0042 },
359: { "<=", 0x0043 },
360: { "between", 0x0044 },
361: { "within", 0x0045 },
362: { "drop", 0x0046 },
363: { "dup", 0x0047 },
364: { "over", 0x0048 },
365: { "swap", 0x0049 },
366: { "rot", 0x004a },
367: { "-rot", 0x004b },
368: { "tuck", 0x004c },
369: { "nip", 0x004d },
370: { "pick", 0x004e },
371: { "roll", 0x004f },
372: { "?dup", 0x0050 },
373: { "depth", 0x0051 },
374: { "2drop", 0x0052 },
375: { "2dup", 0x0053 },
376: { "2over", 0x0054 },
377: { "2swap", 0x0055 },
378: { "2rot", 0x0056 },
379: { "2/", 0x0057 },
380: { "u2/", 0x0058 },
381: { "2*", 0x0059 },
382: { "/c", 0x005a },
383: { "/w", 0x005b },
384: { "/l", 0x005c },
385: { "/n", 0x005d },
386: { "ca+", 0x005e },
387: { "wa+", 0x005f },
388: { "la+", 0x0060 },
389: { "na+", 0x0061 },
390: { "char+", 0x0062 },
391: { "wa1+", 0x0063 },
392: { "la1+", 0x0064 },
393: { "cell+", 0x0065 },
394: { "chars", 0x0066 },
395: { "/w*", 0x0067 },
396: { "/l*", 0x0068 },
397: { "cells", 0x0069 },
398: { "on", 0x006a },
399: { "off", 0x006b },
400: { "+!", 0x006c },
401: { "@", 0x006d },
402: { "l@", 0x006e },
403: { "w@", 0x006f },
404: { "<w@", 0x0070 },
405: { "c@", 0x0071 },
406: { "!", 0x0072 },
407: { "l!", 0x0073 },
408: { "w!", 0x0074 },
409: { "c!", 0x0075 },
410: { "2@", 0x0076 },
411: { "2!", 0x0077 },
412: { "move", 0x0078 },
413: { "fill", 0x0079 },
414: { "comp", 0x007a },
415: { "noop", 0x007b },
416: { "lwsplit", 0x007c },
417: { "wjoin", 0x007d },
418: { "lbsplit", 0x007e },
419: { "bljoin", 0x007f },
420: { "wbflip", 0x0080 },
421: { "upc", 0x0081 },
422: { "lcc", 0x0082 },
423: { "pack", 0x0083 },
424: { "count", 0x0084 },
425: { "body>", 0x0085 },
426: { ">body", 0x0086 },
427: { "fcode-revision", 0x0087 },
428: { "span", 0x0088 },
429: { "unloop", 0x0089 },
430: { "expect", 0x008a },
431: { "alloc-mem", 0x008b },
432: { "free-mem", 0x008c },
433: { "key?", 0x008d },
434: { "key", 0x008e },
435: { "emit", 0x008f },
436: { "type", 0x0090 },
437: { "(cr", 0x0091 },
438: { "cr", 0x0092 },
439: { "#out", 0x0093 },
440: { "#line", 0x0094 },
441: { "hold", 0x0095 },
442: { "<#", 0x0096 },
443: { "u#>", 0x0097 },
444: { "sign", 0x0098 },
445: { "u#", 0x0099 },
446: { "u#s", 0x009a },
447: { "u.", 0x009b },
448: { "u.r", 0x009c },
449: { ".", 0x009d },
450: { ".r", 0x009e },
451: { ".s", 0x009f },
452: { "base", 0x00a0 },
453: { "convert", 0x00a1 },
454: { "$number", 0x00a2 },
455: { "digit", 0x00a3 },
456: { "-1", 0x00a4 },
457: { "true", 0x00a4 },
458: { "0", 0x00a5 },
459: { "1", 0x00a6 },
460: { "2", 0x00a7 },
461: { "3", 0x00a8 },
462: { "bl", 0x00a9 },
463: { "bs", 0x00aa },
464: { "bell", 0x00ab },
465: { "bounds", 0x00ac },
466: { "here", 0x00ad },
467: { "aligned", 0x00ae },
468: { "wbsplit", 0x00af },
469: { "bwjoin", 0x00b0 },
470: { "b(<mark)", 0x00b1 },
471: { "b(>resolve)", 0x00b2 },
472: { "set-token-table", 0x00b3 },
473: { "set-table", 0x00b4 },
474: { "new-token", 0x00b5 },
475: { "named-token", 0x00b6 },
476: { "b(:)", 0x00b7 },
477: { "b(value)", 0x00b8 },
478: { "b(variable)", 0x00b9 },
479: { "b(constant)", 0x00ba },
480: { "b(create)", 0x00bb },
481: { "b(defer)", 0x00bc },
482: { "b(buffer:)", 0x00bd },
483: { "b(field)", 0x00be },
484: { "b(code)", 0x00bf },
485: { "instance", 0x00c0 },
486: { "b(;)", 0x00c2 },
487: { "b(to)", 0x00c3 },
488: { "b(case)", 0x00c4 },
489: { "b(endcase)", 0x00c5 },
490: { "b(endof)", 0x00c6 },
491: { "#", 0x00c7 },
492: { "#s", 0x00c8 },
493: { "#>", 0x00c9 },
494: { "external-token", 0x00ca },
495: { "$find", 0x00cb },
496: { "offset16", 0x00cc },
497: { "evaluate", 0x00cd },
498: { "c,", 0x00d0 },
499: { "w,", 0x00d1 },
500: { "l,", 0x00d2 },
501: { "'", 0x00d3 },
502: { "um*", 0x00d4 },
503: { "um/mod", 0x00d5 },
504: { "d+", 0x00d8 },
505: { "d-", 0x00d9 },
506: { "get-token", 0x00da },
507: { "set-token", 0x00db },
508: { "state", 0x00dc },
509: { "compile,", 0x00dd },
510: { "behavior", 0x00de },
511: { "start0", 0x00f0 },
512: { "start1", 0x00f1 },
513: { "start2", 0x00f2 },
514: { "start4", 0x00f3 },
515: { "ferror", 0x00fc },
516: { "version1", 0x00fd },
517: { "4-byte-id", 0x00fe },
518: { "end1", 0x00ff },
519: { "dma-alloc", 0x0101 },
520: { "my-address", 0x0102 },
521: { "my-space", 0x0103 },
522: { "memmap", 0x0104 },
523: { "free-virtual", 0x0105 },
524: { ">physical", 0x0106 },
525: { "my-params", 0x010f },
526: { "property", 0x0110 },
527: { "encode-int", 0x0111 },
528: { "encode+", 0x0112 },
529: { "encode-phys", 0x0113 },
530: { "encode-string", 0x0114 },
531: { "encode-bytes", 0x0115 },
532: { "reg", 0x0116 },
533: { "intr", 0x0117 },
534: { "driver", 0x0118 },
535: { "model", 0x0119 },
536: { "device-type", 0x011a },
537: { "parse-2int", 0x011b },
538: { "is-install", 0x011c },
539: { "is-remove", 0x011d },
540: { "is-selftest", 0x011e },
541: { "new-device", 0x011f },
542: { "diagnostic-mode?", 0x0120 },
543: { "display-status", 0x0121 },
544: { "memory-test-suite", 0x0122 },
545: { "group-code", 0x0123 },
546: { "mask", 0x0124 },
547: { "get-msecs", 0x0125 },
548: { "ms", 0x0126 },
549: { "find-device", 0x0127 },
550: { "decode-phys", 0x0128 },
551: { "map-low", 0x0130 },
552: { "sbus-intr>cpu", 0x0131 },
553: { "#lines", 0x0150 },
554: { "#columns", 0x0151 },
555: { "line#", 0x0152 },
556: { "column#", 0x0153 },
557: { "inverse?", 0x0154 },
558: { "inverse-screen?", 0x0155 },
559: { "frame-buffer-busy?", 0x0156 },
560: { "draw-character", 0x0157 },
561: { "reset-screen", 0x0158 },
562: { "toggle-cursor", 0x0159 },
563: { "erase-screen", 0x015a },
564: { "blink-screen", 0x015b },
565: { "invert-screen", 0x015c },
566: { "insert-characters", 0x015d },
567: { "delete-characters", 0x015e },
568: { "insert-lines", 0x015f },
569: { "delete-lines", 0x0160 },
570: { "draw-logo", 0x0161 },
571: { "frame-buffer-addr", 0x0162 },
572: { "screen-height", 0x0163 },
573: { "screen-width", 0x0164 },
574: { "window-top", 0x0165 },
575: { "window-left", 0x0166 },
576: { "default-font", 0x016a },
577: { "set-font", 0x016b },
578: { "char-height", 0x016c },
579: { "char-width", 0x016d },
580: { ">font", 0x016e },
581: { "fontbytes", 0x016f },
582: { "fb8-draw-character", 0x0180 },
583: { "fb8-reset-screen", 0x0181 },
584: { "fb8-toggle-cursor", 0x0182 },
585: { "fb8-erase-screen", 0x0183 },
586: { "fb8-blink-screen", 0x0184 },
587: { "fb8-invert-screen", 0x0185 },
588: { "fb8-insert-characters", 0x0186 },
589: { "fb8-delete-characters", 0x0187 },
590: { "fb8-inisert-lines", 0x0188 },
591: { "fb8-delete-lines", 0x0189 },
592: { "fb8-draw-logo", 0x018a },
593: { "fb8-install", 0x018b },
594: { "return-buffer", 0x01a0 },
595: { "xmit-packet", 0x01a1 },
596: { "poll-packet", 0x01a2 },
597: { "mac-address", 0x01a4 },
598: { "device-name", 0x0201 },
599: { "my-args", 0x0202 },
600: { "my-self", 0x0203 },
601: { "find-package", 0x0204 },
602: { "open-package", 0x0205 },
603: { "close-package", 0x0206 },
604: { "find-method", 0x0207 },
605: { "call-package", 0x0208 },
606: { "$call-parent", 0x0209 },
607: { "my-parent", 0x020a },
608: { "ihandle>phandle", 0x020b },
609: { "my-unit", 0x020d },
610: { "$call-method", 0x020e },
611: { "$open-package", 0x020f },
612: { "processor-type", 0x0210 },
613: { "firmware-version", 0x0211 },
614: { "fcode-version", 0x0212 },
615: { "alarm", 0x0213 },
616: { "(is-user-word)", 0x0214 },
617: { "suspend-fcode", 0x0215 },
618: { "abort", 0x0216 },
619: { "catch", 0x0217 },
620: { "throw", 0x0218 },
621: { "user-abort", 0x0219 },
622: { "get-my-property", 0x021a },
623: { "decode-int", 0x021b },
624: { "decode-string", 0x021c },
625: { "get-inherited-property", 0x021d },
626: { "delete-property", 0x021e },
627: { "get-package-property", 0x021f },
628: { "cpeek", 0x0220 },
629: { "wpeek", 0x0221 },
630: { "lpeek", 0x0222 },
631: { "cpoke", 0x0223 },
632: { "wpoke", 0x0224 },
633: { "lpoke", 0x0225 },
634: { "lwflip", 0x0226 },
635: { "lbflip", 0x0227 },
636: { "lbflips", 0x0228 },
637: { "adr-mask", 0x0229 },
638: { "rb@", 0x0230 },
639: { "rb!", 0x0231 },
640: { "rw@", 0x0232 },
641: { "rw!", 0x0233 },
642: { "rl@", 0x0234 },
643: { "rl!", 0x0235 },
644: { "wbflips", 0x0236 },
645: { "lwflips", 0x0237 },
646: { "probe", 0x0238 },
647: { "probe-virtual", 0x0239 },
648: { "child", 0x023b },
649: { "peer", 0x023c },
650: { "next-property", 0x023d },
651: { "byte-load", 0x023e },
652: { "set-args", 0x023f },
653: { "left-parse-string", 0x0240 },
654: /* 64-bit FCode extensions */
655: { "bxjoin", 0x0241 },
656: { "<l@", 0x0242 },
657: { "lxjoin", 0x0243 },
658: { "rx@", 0x022e },
659: { "rx!", 0x022f },
660: { "wxjoin", 0x0244 },
661: { "x,", 0x0245 },
662: { "x@", 0x0246 },
663: { "x!", 0x0247 },
664: { "/x", 0x0248 },
665: { "/x*", 0x0249 },
666: { "xa+", 0x024a },
667: { "xa1+", 0x024b },
668: { "xbflip", 0x024c },
669: { "xbflips", 0x024d },
670: { "xbsplit", 0x024e },
671: { "xlflip", 0x024f },
672: { "xlflips", 0x0250 },
673: { "xlsplit", 0x0251 },
674: { "xwflip", 0x0252 },
675: { "xwflips", 0x0253 },
676: { "xwsplit", 0x0254 },
1.4 jason 677: { NULL, 0 }
1.1 jason 678: };
679:
680: /*
681: * Default macros -- can be overridden by colon definitions.
682: */
683: struct macro macros[] = {
684: { "eval", "evaluate" }, /* Build a more balanced tree */
685: { "(.)", "dup abs <# u#s swap sign u#>" },
686: { "<<", "lshift" },
687: { ">>", "rshift" },
688: { "?", "@ ." },
689: { "1+", "1 +" },
690: { "1-", "1 -" },
691: { "2+", "2 +" },
692: { "2-", "2 -" },
693: { "abort\"", "-2 throw" },
694: { "accept", "span @ -rot expect span @ swap span !" },
695: { "allot", "0 max 0 ?do 0 c, loop" },
696: { "blank", "bl fill" },
697: { "/c*", "chars" },
698: { "ca1+", "char+" },
699: { "carret", "b(lit) 00 00 00 0x0d" },
700: { ".d" "base @ swap 0x0a base ! . base !" },
701: { "decode-bytes", ">r over r@ + swap r@ - rot r>" },
702: { "3drop", "drop 2drop" },
703: { "3dup", "2 pick 2 pick 2 pick" },
704: { "erase", "0 fill" },
705: { "false", "0" },
706: { ".h" "base @ swap 0x10 base ! . base !" },
707: { "linefeed", "b(lit) 00 00 00 0x0a" },
708: { "/n*", "cells" },
709: { "na1+", "cell+", },
710: { "not", "invert", },
711: { "s.", "(.) type space" },
712: { "space", "bl emit" },
713: { "spaces", "0 max 0 ?do space loop" },
714: { "struct", "0" },
715: { "true", "-1" },
716: { "(u,)", "<# u#s u#>" },
717: { NULL, NULL }
718: };
719:
720: /*
721: * Parser stack control functions.
722: */
723:
724: void
725: push(val)
726: Cell val;
727: {
728: parse_stack[parse_stack_ptr++] = val;
729: if (parse_stack_ptr >= PSTKSIZ) {
730: (void)printf( "Parse stack overflow\n");
731: exit(1);
732: }
733: }
734:
735: Cell
736: pop()
737: {
738: ASSERT(parse_stack_ptr);
739: return parse_stack[--parse_stack_ptr];
740: }
741:
742: int
743: depth()
744: {
745: return (parse_stack_ptr);
746: }
747:
748: /*
749: * Insert fcode into dictionary.
750: */
751: int
752: fadd(dict, new)
753: struct fcode *dict, *new;
754: {
755: int res = strcmp(dict->name, new->name);
756:
757: #ifdef DEBUG
758: new->type = FCODE;
759: ASSERT(dict->type == FCODE);
760: #endif
761: /* Don't allow duplicate entries. */
762: if (!res) return (0);
763: if (res < 0) {
764: if (dict->l)
765: return fadd(dict->l, new);
766: else {
767: #ifdef DEBUG
768: if (debug > 1)
769: (void)printf( "fadd: new FCode `%s' is %lx\n",
770: new->name, new->num);
771: #endif
772: new->l = new->r = NULL;
773: dict->l = new;
774: }
775: } else {
776: if (dict->r)
777: return fadd(dict->r, new);
778: else {
779: #ifdef DEBUG
780: if (debug > 1)
781: (void)printf( "fadd: new FCode `%s' is %lx\n",
782: new->name, new->num);
783: #endif
784: new->l = new->r = NULL;
785: dict->r = new;
786: }
787: }
788: return (1);
789: }
790:
791: /*
792: * Look for a code in the dictionary.
793: */
794: struct fcode *
795: flookup(dict, str)
796: struct fcode *dict;
797: char *str;
798: {
799: int res;
800: if (!dict) return (dict);
801:
802: res = strcmp(dict->name, str);
803: #ifdef DEBUG
804: ASSERT(dict->type == FCODE);
805: if (debug > 2)
806: (void)printf( "flookup: `%s' and `%s' %s match\n",
807: str, dict->name, res?"don't":"do");
808: #endif
809: if (!res) return (dict);
810: if (res < 0)
811: return (flookup(dict->l, str));
812: else
813: return (flookup(dict->r, str));
814:
815: }
816:
817: /*
818: * Insert alias into macros.
819: */
820: int
821: aadd(dict, new)
822: struct macro *dict, *new;
823: {
824: int res = strcmp(dict->name, new->name);
825:
826: #ifdef DEBUG
827: new->type = MACRO;
828: ASSERT(dict->type == MACRO);
829: #endif
830: /* Don't allow duplicate entries. */
831: if (!res) return (0);
832: if (res < 0) {
833: if (dict->l)
834: return aadd(dict->l, new);
835: else {
836: new->l = new->r = NULL;
837: dict->l = new;
838: #ifdef DEBUG
839: if (debug > 1)
840: (void)printf( "aadd: new alias `%s' to `%s'\n",
841: new->name, new->equiv);
842: #endif
843: }
844: } else {
845: if (dict->r)
846: return aadd(dict->r, new);
847: else {
848: new->l = new->r = NULL;
849: dict->r = new;
850: #ifdef DEBUG
851: if (debug > 1)
852: (void)printf( "aadd: new alias `%s' to `%s'\n",
853: new->name, new->equiv);
854: #endif
855: }
856: }
857: return (1);
858: }
859:
860: /*
861: * Look for a macro in the aliases.
862: */
863: struct macro *
864: alookup(dict, str)
865: struct macro *dict;
866: char *str;
867: {
868: int res;
869: if (!dict) return (dict);
870:
871: #ifdef DEBUG
872: ASSERT(dict->type == MACRO);
873: #endif
874: res = strcmp(dict->name, str);
875: if (!res) return (dict);
876: if (res < 0)
877: return (alookup(dict->l, str));
878: else
879: return (alookup(dict->r, str));
880:
881: }
882:
883: /*
884: * Bootstrap the dictionary and then install
885: * all the standard FCodes.
886: */
887: void
888: initdic()
889: {
890: struct fcode *code = fcodes;
891: struct macro *alias = macros;
892:
893: ASSERT(dictionary == NULL);
894: code->l = code->r = NULL;
895: dictionary = code;
896: #ifdef DEBUG
897: code->type = FCODE;
898: #endif
899:
900: while ((++code)->name) {
901: if(!fadd(dictionary, code)) {
902: printf("init: duplicate dictionary entry %s\n",
903: code->name);
904: abort();
905: }
906: }
907:
908: ASSERT(aliases == NULL);
909: aliases = alias;
910: alias->l = alias->r = NULL;
911: #ifdef DEBUG
912: alias->type = MACRO;
913: #endif
914: while ((++alias)->name) {
915: if(!aadd(aliases, alias)) {
916: printf("init: duplicate macro entry %s\n",
917: alias->name);
918: abort();
919: }
920: }
921:
922: }
923:
924: int
925: apply_macros(input, str)
926: YY_BUFFER_STATE input;
927: char *str;
928: {
929: struct macro *xform = alookup(aliases, str);
930:
931: if (xform) {
932: YY_BUFFER_STATE newbuf;
933:
934: newbuf = yy_scan_string(xform->equiv);
935: yy_switch_to_buffer(newbuf);
936: tokenize(newbuf);
937: yy_switch_to_buffer(input);
938: yy_delete_buffer(newbuf);
939: }
940: return (xform != NULL);
941: }
942:
943: void
944: usage(me)
945: char *me;
946: {
1.8 sobrado 947: (void)fprintf(stderr, "usage: %s [-d level] [-o outfile] infile\n", me);
1.1 jason 948: exit(1);
949: }
950:
951: int
952: main(argc, argv)
953: int argc;
954: char *argv[];
955: {
956: int bflag, ch;
957: FILE *inf;
958: struct fcode_header *fheader;
959: YY_BUFFER_STATE inbuf;
960: char *hdrtype = "version1";
961: int i;
962:
963: outf = 1; /* stdout */
964: myname = argv[0];
965:
966: bflag = 0;
967: while ((ch = getopt(argc, argv, "d:o:")) != -1)
968: switch(ch) {
969: case 'd':
970: debug = atol(optarg);
971: break;
972: case 'o':
973: outfile = optarg;
974: break;
975: case '?':
976: default:
1.3 nordin 977: warnx("Illegal argument: %c", ch);
1.1 jason 978: usage(myname);
979: }
980: argc -= optind;
981: argv += optind;
982:
983: if (argc != 1)
984: usage(myname);
985:
986: infile = argv[0];
987:
988: /*
989: * Initialization stuff.
990: */
991: initdic();
992: outbufsiz = BUFCLICK;
993: outbuf = malloc(outbufsiz);
1.5 deraadt 994: if (outbuf == NULL)
995: (void)err(1, "out of memory");
996:
1.1 jason 997: fheader = (struct fcode_header *)outbuf;
998: outpos = 0;
999: emit(hdrtype);
1000: outpos = sizeof(*fheader);
1001:
1002: /*
1003: * Do it.
1004: */
1005: if ((inf = fopen(infile, "r")) == NULL)
1006: (void)err(1, "can not open %s for reading", infile);
1007:
1008: inbuf = yy_create_buffer( inf, YY_BUF_SIZE );
1009: yy_switch_to_buffer(inbuf);
1010: tokenize(inbuf);
1011: yy_delete_buffer(inbuf);
1012: fclose(inf);
1013: emit("end0");
1014:
1015: /* Now calculate length and checksum and stick them in the header */
1016: fheader->format = 0x08;
1017: fheader->length = htonl(outpos);
1018: fheader->checksum = 0;
1019: for (i = sizeof(*fheader); i<outpos; i++)
1020: fheader->checksum += outbuf[i];
1021: fheader->checksum = htons(fheader->checksum);
1022:
1.4 jason 1023: if ((outf = open(outfile, O_WRONLY|O_CREAT|O_TRUNC, 0666)) == -1)
1.1 jason 1024: err(1, "can out open %s for writing", outfile);
1025:
1026: if (write(outf, outbuf, outpos) != outpos) {
1027: close(outf);
1028: unlink(outfile);
1029: err(1, "write error");
1030: }
1031: close(outf);
1032: return (0);
1033: };
1034:
1035: /*
1036: * Tokenize one file. This is a separate function so it can
1.7 otto 1037: * be called recursively to parse multiple levels of include files.
1.1 jason 1038: */
1039:
1040: void
1041: tokenize(input)
1042: YY_BUFFER_STATE input;
1043: {
1044: FILE *inf;
1045: YY_BUFFER_STATE inbuf;
1046: TOKEN *token;
1047: char *last_token = "";
1048: struct fcode *fcode;
1049: int pos, off;
1050:
1051: while ((token = yylex()) != NULL) {
1052: switch (token->type) {
1053: case TOK_NUMBER:
1054: STATE(token->text, "TOK_NUMBER");
1055: {
1056: char *end;
1057: Cell value;
1058:
1059: if (tokenizer) {
1060: push(strtol(token->text, &end, 16));
1061: break;
1062: }
1063: value = strtol(token->text, &end, base);
1064: if (*end != 0)
1065: token_err(yylineno, infile, yytext,
1066: "illegal number conversion");
1067:
1068: /*
1069: * If this is a 64-bit value we need to store two literals
1070: * and issue a `lxjoin' to combine them. But that's a future
1071: * project.
1072: */
1073: emit("b(lit)");
1074: spit(value>>24);
1075: spit((value>>16)&0x0ff);
1076: spit((value>>8)&0x0ff);
1077: spit(value&0x0ff);
1078: }
1079: break;
1080: case TOK_C_LIT:
1081: STATE(token->text, "TOK_C_LIT");
1082: emit("b(lit)");
1083: spit(0);
1084: spit(0);
1085: spit(0);
1086: spit(token->text[1]);
1087: break;
1088: case TOK_STRING_LIT:
1089: STATE(token->text, "TOK_STRING_LIT:");
1090: {
1091: int len;
1092: char *p = token->text;
1093:
1094: ++p; /* Skip the quote */
1095: len = strlen(++p); /* Skip the 1st space */
1096:
1097: #define ERR_TOOLONG \
1098: token_err(yylineno, infile, yytext, "string length %d too long", len)
1099:
1100: if (len > 255)
1101: ERR_TOOLONG;
1102:
1103: if (p[len-1] == ')' ||
1104: p[len-1] == '"') {
1105: p[len-1] = 0;
1106: }
1107: emit("b(\")");
1108: sspit(p);
1109: }
1110: break;
1111: case TOK_PSTRING:
1112: STATE(token->text, "TOK_PSTRING:");
1113: {
1114: int len;
1115: char *p = token->text;
1116:
1117: if (*p++ == '.') p++; /* Skip over delimiter */
1118: p++; /* Skip over space/tab */
1119:
1120: len = strlen(p);
1121: if (len > 255)
1122: ERR_TOOLONG;
1123:
1124: if (p[len-1] == ')' ||
1125: p[len-1] == '"') {
1126: p[len-1] = 0;
1127: }
1128: emit("b(\")");
1129: sspit(p);
1130: emit("type");
1131: }
1132: break;
1133: case TOK_TOKENIZE:
1134: STATE(token->text, "TOK_TOKENIZE");
1135: /* The next pass should tokenize the FCODE number */
1136: emit("b(')");
1137: break;
1138: case TOK_COMMENT:
1139: STATE(token->text, "TOK_COMMENT:");
1140: while (((token = yylex()) != NULL) && token->type != TOK_ENDCOMMENT)
1141: ;
1142: break;
1143: case TOK_ENDCOMMENT:
1144: STATE(token->text, "TOK_ENDCOMMENT");
1145: token_err(yylineno, infile, NULL,
1146: "ENDCOMMENT encountered outside comment");
1147: break;
1148: case TOK_COLON:
1149: STATE(token->text, "TOK_COLON:");
1150:
1151: token = yylex();
1152: if (token == NULL)
1153: token_err(yylineno, infile, yytext,
1154: "EOF in colon definition");
1155:
1156: /* Add new code to dictionary */
1157: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1158: if (fcode == NULL)
1159: (void)err(1, "out of memory");
1160:
1.1 jason 1161: fcode->num = nextfcode++;
1162: fcode->name = strdup(token->text);
1.5 deraadt 1163: if (fcode->name == NULL)
1164: (void)err(1, "out of memory");
1165:
1.1 jason 1166: if (!fadd(dictionary, fcode))
1167: token_err(yylineno, infile, NULL,
1168: "Duplicate definition: `%s'\n", fcode->name);
1169: #ifdef DEBUG
1170: if (debug)
1171: (void)printf("Adding %s to dictionary\n", token->text);
1172: #endif
1173: if (state == 0)
1174: emit("new-token");
1175: else {
1176: if (state == TOK_EXTERNAL)
1177: emit("external-token");
1178: else
1179: /* Here we have a choice of new-token or named-token */
1180: emit("named-token");
1181: sspit(token->text);
1182: }
1183: spit(fcode->num);
1184: emit("b(:)");
1185: last_token = fcode->name;
1186: defining = 1;
1187: break;
1188: case TOK_SEMICOLON:
1189: STATE(token->text, "TOK_SEMICOLON:");
1190: emit("b(;)");
1191: defining = 0;
1192: if (depth()) {
1193: token_err(yylineno, infile, NULL,
1194: "Warning: stack depth %d at end of %s\n",
1195: depth(), last_token);
1196: }
1197: last_token = "";
1198: break;
1199:
1200: /* These are special */
1201: case TOK_AGAIN:
1202: STATE(token->text, "TOK_AGAIN");
1203: emit("bbranch");
1204: pos = pop();
1205: pos -= outpos;
1206: if (offsetsize == 16) {
1207: spit((pos>>8)&0xff);
1208: }
1209: spit(pos&0xff);
1210: break;
1211: case TOK_ALIAS:
1212: STATE(token->text, "TOK_ALIAS");
1213: {
1214: struct macro *alias;
1215:
1216: token = yylex();
1217: if (token == NULL) {
1218: (void)printf( "EOF in alias definition\n");
1219: return;
1220: }
1221: if (token->type != TOK_OTHER) {
1222: (void)printf( "ENDCOMMENT aliasing weird token type %d\n",
1223: token->type);
1224: }
1225: alias = malloc(sizeof(*alias));
1.5 deraadt 1226: if (alias == NULL)
1227: (void)err(1, "out of memory");
1228:
1.1 jason 1229: alias->name = strdup(token->text);
1.5 deraadt 1230: if (alias->name == NULL)
1231: (void)err(1, "out of memory");
1232:
1.1 jason 1233: token = yylex();
1234: if (token == NULL) {
1.9 ! deraadt 1235: free(alias);
1.1 jason 1236: (void)printf( "EOF in alias definition\n");
1237: return;
1238: }
1239: alias->equiv = strdup(token->text);
1.5 deraadt 1240: if (alias->equiv == NULL)
1241: (void)err(1, "out of memory");
1242:
1.1 jason 1243: if (!aadd(aliases, alias)) {
1244: (void)printf( "ERROR: Duplicate alias %s\n",
1245: alias->name);
1246: exit(1);
1247: }
1248: }
1249: break;
1250: case TOK_GETTOKEN:
1251: STATE(token->text, "TOK_GETTOKEN");
1252: /* This is caused by ['] */
1253: emit("b(')");
1254: token = yylex();
1255: if (token == NULL) {
1256: (void)printf( "EOF in [']\n");
1257: return;
1258: }
1259: if ((fcode = flookup(dictionary, token->text)) == NULL) {
1260: (void)printf( "[']: %s not found\n", token->text);
1261: exit(1);
1262: }
1263: spit(fcode->num);
1264: break;
1265: case TOK_ASCII:
1266: STATE(token->text, "TOK_ASCII");
1267: token = yylex();
1268: if (token == NULL) {
1269: (void)printf( "EOF after \"ascii\"\n");
1270: exit(1);
1271: }
1272: emit("b(lit)");
1273: spit(0);
1274: spit(0);
1275: spit(0);
1276: spit(token->text[0]);
1277: break;
1278: case TOK_BEGIN:
1279: STATE(token->text, "TOK_BEGIN");
1280: emit("b(<mark)");
1281: push(outpos);
1282: break;
1283: case TOK_BUFFER:
1284: STATE(token->text, "TOK_BUFFER");
1285:
1286: token = yylex();
1287: if (token == NULL) {
1288: (void)printf( "EOF in colon definition\n");
1289: return;
1290: }
1291:
1292: /* Add new code to dictionary */
1293: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1294: if (fcode == NULL)
1295: (void)err(1, "out of memory");
1296:
1.1 jason 1297: fcode->num = nextfcode++;
1298: fcode->name = strdup(token->text);
1.5 deraadt 1299: if (fcode->name == NULL)
1300: (void)err(1, "out of memory");
1301:
1.1 jason 1302: fadd(dictionary, fcode);
1303:
1304: if (state == 0)
1305: emit("new-token");
1306: else {
1307: if (state == TOK_EXTERNAL)
1308: emit("external-token");
1309: else
1310: /* Here we have a choice of new-token or named-token */
1311: emit("named-token");
1312: sspit(token->text);
1313: }
1314: spit(fcode->num);
1315: emit("b(buffer:)");
1316: break;
1317: case TOK_CASE:
1318: STATE(token->text, "TOK_CASE");
1319: emit("b(case)");
1320: push(0);
1321: break;
1322: case TOK_CONSTANT:
1323: STATE(token->text, "TOK_CONSTANT");
1324:
1325: token = yylex();
1326: if (token == NULL) {
1327: (void)printf( "EOF in constant definition\n");
1328: return;
1329: }
1330:
1331: /* Add new code to dictionary */
1332: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1333: if (fcode == NULL)
1334: (void)err(1, "out of memory");
1.1 jason 1335: fcode->num = nextfcode++;
1336: fcode->name = strdup(token->text);
1.5 deraadt 1337: if (fcode->name == NULL)
1338: (void)err(1, "out of memory");
1339:
1.1 jason 1340: fadd(dictionary, fcode);
1341:
1342: if (state == 0)
1343: emit("new-token");
1344: else {
1345: if (state == TOK_EXTERNAL)
1346: emit("external-token");
1347: else
1348: /* Here we have a choice of new-token or named-token */
1349: emit("named-token");
1350: sspit(token->text);
1351: }
1352: spit(fcode->num);
1353: emit("b(constant)");
1354: break;
1355: case TOK_CONTROL:
1356: STATE(token->text, "TOK_CONTROL");
1357: token = yylex();
1358: if (token == NULL) {
1359: (void)printf( "EOF after \"ascii\"\n");
1360: exit(1);
1361: }
1362: emit("b(lit)");
1363: spit(0);
1364: spit(0);
1365: spit(0);
1366: spit(token->text[0]&0x1f);
1367: break;
1368: case TOK_CREATE:
1369: STATE(token->text, "TOK_CREATE");
1370: /* Don't know what this does or if it's right */
1371: token = yylex();
1372: if (token == NULL) {
1373: (void)printf( "EOF in create definition\n");
1374: return;
1375: }
1376:
1377: /* Add new code to dictionary */
1378: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1379: if (fcode == NULL)
1380: err(1, "out of memory");
1381:
1.1 jason 1382: fcode->num = nextfcode++;
1383: fcode->name = strdup(token->text);
1.5 deraadt 1384: if (fcode->name == NULL)
1385: (void)err(1, "out of memory");
1386:
1.1 jason 1387: fadd(dictionary, fcode);
1388:
1389: if (state == 0)
1390: emit("new-token");
1391: else {
1392: if (state == TOK_EXTERNAL)
1393: emit("external-token");
1394: else
1395: /* Here we have a choice of new-token or named-token */
1396: emit("named-token");
1397: sspit(token->text);
1398: }
1399: spit(fcode->num);
1400: emit("b(create)");
1401: break;
1402: case TOK_DECIMAL:
1403: STATE(token->text, "TOK_DECIMAL");
1404: if (token->text[1] != '#') {
1405: if (defining) {
1406: spit(10);
1407: emit("base");
1408: emit("!");
1409: } else
1410: base = TOK_DECIMAL;
1411: } else {
1412: char *end;
1413: Cell value;
1414:
1415: token = yylex();
1416: if (token == NULL) {
1417: (void)printf( "EOF after d#\n");
1418: return;
1419: }
1420: if (token->type == TOK_OTHER) {
1421: if (strcmp("-1", token->text) == 0) {
1422: emit(token->text);
1423: break;
1424: }
1425: }
1426: value = strtol(token->text, &end, 10);
1427: if (*end != 0)
1428: token_err(yylineno, infile, NULL,
1429: "Illegal number conversion: %s", token->text);
1430:
1431: /*
1432: * If this is a 64-bit value we need to store two literals
1433: * and issue a `lxjoin' to combine them. But that's a future
1434: * project.
1435: */
1436: emit("b(lit)");
1437: spit(value>>24);
1438: spit((value>>16)&0x0ff);
1439: spit((value>>8)&0x0ff);
1440: spit(value&0x0ff);
1441: }
1442: break;
1443: case TOK_DEFER:
1444: STATE(token->text, "TOK_DEFER");
1445: /* Don't know what this does or if it's right */
1446: token = yylex();
1447: if (token == NULL) {
1448: (void)printf( "EOF in colon definition\n");
1449: return;
1450: }
1451:
1452: /* Add new code to dictionary */
1453: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1454: if (fcode == NULL)
1455: (void)err(1, "out of memory");
1456:
1.1 jason 1457: fcode->num = nextfcode++;
1458: fcode->name = strdup(token->text);
1.5 deraadt 1459: if (fcode->name == NULL)
1460: (void)err(1, "out of memory");
1461:
1.1 jason 1462: fadd(dictionary, fcode);
1463:
1464: if (state == 0)
1465: emit("new-token");
1466: else {
1467: if (state == TOK_EXTERNAL)
1468: emit("external-token");
1469: else
1470: /* Here we have a choice of new-token or named-token */
1471: emit("named-token");
1472: sspit(token->text);
1473: }
1474: spit(fcode->num);
1475: emit("b(defer)");
1476: break;
1477: case TOK_DO:
1478: STATE(token->text, "TOK_DO");
1479: /*
1480: * From the 1275 spec. B is branch location, T is branch target.
1481: *
1482: * b(do) offset1 ... b(loop) offset2 ...
1483: * b(do) offset1 ... b(+loop) offset2 ...
1484: * b(?do) offset1 ... b(loop) offset2 ...
1485: * b(?do) offset1 ... b(+loop) offset2 ...
1486: * ^ ^
1487: * B1 ^ ^ T1
1488: * T2 B2
1489: *
1490: * How we do this is we generate the b(do) or b(?do), spit out a
1491: * zero offset while remembering b1 and t2. Then we call tokenize()
1492: * to generate the body. When tokenize() finds a b(loop) or b(+loop),
1493: * it generates the FCode and returns, with outpos at b2. We then
1494: * calculate the offsets, put them in the right slots and finishup.
1495: */
1496:
1497: if (token->text[0] == '?')
1498: emit("b(?do)");
1499: else
1500: emit("b(do)");
1501: push(outpos);
1502: if (offsetsize == 16) {
1503: spit(0);
1504: }
1505: spit(0); /* Place holder for later */
1506: push(outpos);
1507: break;
1508: case TOK_ELSE:
1509: STATE(token->text, "TOK_ELSE");
1510: /* Get where we need to patch */
1511: off = pop();
1512: emit("bbranch");
1513: /* Save where we are now. */
1514: push(outpos);
1515: if (offsetsize == 16) {
1516: spit(0); /* Place holder for later */
1517: }
1518: spit(0); /* Place holder for later */
1519: emit("b(>resolve)");
1520: /* Rewind and patch the if branch */
1521: pos = outpos;
1522: outpos = off;
1523: off = pos - off;
1524: if (offsetsize == 16) {
1525: spit(0); /* Place holder for later */
1526: }
1527: spit(0); /* Place holder for later */
1528: /* revert to the end */
1529: outpos = pos;
1530: break;
1531: case TOK_ENDCASE:
1532: STATE(token->text, "TOK_ENDCASE:");
1533: pos = outpos; /* Remember where we need to branch to */
1534:
1535: /* Thread our way backwards and install proper offsets */
1536: off = pop();
1537: while (off) {
1538: int tmp;
1539:
1540: /* Move to this offset */
1541: outpos = off;
1542: /* Load next offset to process */
1543: tmp = outbuf[outpos];
1544:
1545: /* process this offset */
1546: off = pos - outpos;
1547: if (offsetsize == 16) {
1548: spit((off>>8)&0xff);
1549: }
1550: spit(off&0xff);
1551: off = tmp;
1552: }
1553: outpos = pos;
1554: emit("b(endcase)");
1555: break;
1556: case TOK_ENDOF:
1557: STATE(token->text, "TOK_ENDOF");
1558: off = pop();
1559: emit("b(endof)");
1560: /*
1561: * Save back pointer in the offset field so we can traverse
1562: * the linked list and patch it in the endcase.
1563: */
1564: pos = pop(); /* get position of prev link. */
1565: push(outpos); /* save position of this link. */
1.7 otto 1566: spit(pos); /* save position of prev link. */
1.1 jason 1567: if (offsetsize == 16) {
1568: spit(0);
1569: }
1570: pos = outpos;
1571: /* Now point the offset from b(of) here. */
1572: outpos = off;
1573: off = outpos - off;
1574: if (offsetsize == 16) {
1575: spit((off>>8)&0xff);
1576: }
1577: spit(off&0xff);
1578: /* Restore position */
1579: outpos = pos;
1580: break;
1581: case TOK_EXTERNAL:
1582: STATE(token->text, "TOK_EXTERNAL");
1583: state = TOK_EXTERNAL;
1584: break;
1585: case TOK_FIELD:
1586: STATE(token->text, "TOK_FIELD");
1587:
1588: token = yylex();
1589: if (token == NULL) {
1590: (void)printf( "EOF in field definition\n");
1591: return;
1592: }
1593:
1594: /* Add new code to dictionary */
1595: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1596: if (fcode == NULL)
1597: (void)err(1, "out of memory");
1598:
1.1 jason 1599: fcode->num = nextfcode++;
1600: fcode->name = strdup(token->text);
1.5 deraadt 1601: if (fcode->name == NULL)
1602: (void)err(1, "out of memory");
1603:
1.1 jason 1604: fadd(dictionary, fcode);
1605:
1606: if (state == 0)
1607: emit("new-token");
1608: else {
1609: if (state == TOK_EXTERNAL)
1610: emit("external-token");
1611: else
1612: /* Here we have a choice of new-token or named-token */
1613: emit("named-token");
1614: sspit(token->text);
1615: }
1616: spit(fcode->num);
1617: emit("b(field)");
1618: break;
1619:
1620: case TOK_HEX:
1621: STATE(token->text, "TOK_HEX");
1622: if (token->text[1] != '#') {
1623: if (defining) {
1624: spit(16);
1625: emit("base");
1626: emit("!");
1627: } else
1628: base = TOK_HEX;
1629: } else {
1630: char *end;
1631: Cell value;
1632:
1633: token = yylex();
1634: if (token == NULL) {
1635: (void)printf( "EOF after h#\n");
1636: return;
1637: }
1638: value = strtol(token->text, &end, 16);
1639: if (*end != 0) {
1640: (void)printf("Illegal number conversion:%s:%d: %s\n",
1641: infile, yylineno, yytext);
1642: exit(1);
1643: }
1644: /*
1645: * If this is a 64-bit value we need to store two literals
1646: * and issue a `lxjoin' to combine them. But that's a future
1647: * project.
1648: */
1649: emit("b(lit)");
1650: spit(value>>24);
1651: spit((value>>16)&0x0ff);
1652: spit((value>>8)&0x0ff);
1653: spit(value&0x0ff);
1654: }
1655: break;
1656: case TOK_HEADERLESS:
1657: STATE(token->text, "TOK_HEADERLESS");
1658: state = 0;
1659: break;
1660: case TOK_HEADERS:
1661: STATE(token->text, "TOK_HEADERS");
1662: state = TOK_HEADERS;
1663: break;
1664: case TOK_OFFSET16:
1665: STATE(token->text, "TOK_OFFSET16");
1666: offsetsize = 16;
1667: emit("offset16");
1668: break;
1669: case TOK_IF:
1670: STATE(token->text, "TOK_IF");
1671: /*
1672: * Similar to do but simpler since we only deal w/one branch.
1673: */
1674: emit("b?branch");
1675: push(outpos);
1676: if (offsetsize == 16) {
1677: spit(0); /* Place holder for later */
1678: }
1679: spit(0); /* Place holder for later */
1680: break;
1681: case TOK_LEAVE:
1682: STATE(token->text, "TOK_LEAVE");
1683: emit("b(leave)");
1684: break;
1685: case TOK_LOOP:
1686: STATE(token->text, "TOK_LOOP");
1687:
1688: if (token->text[0] == '+')
1689: emit("b(+loop)");
1690: else
1691: emit("b(loop)");
1692: /* First do backwards branch of loop */
1693: pos = pop();
1694: off = pos - outpos;
1695: if (offsetsize == 16) {
1696: spit((off>>8)&0xff);
1697: }
1698: spit(off&0xff);
1699: /* Now do forward branch of do */
1700: pos = outpos;
1701: outpos = pop();
1702: off = pos - outpos;
1703: if (offsetsize == 16) {
1704: spit((off>>8)&0xff);
1705: }
1706: spit(off&0xff);
1707: /* Restore output position */
1708: outpos = pos;
1709: break;
1710: case TOK_OCTAL:
1711: STATE(token->text, "TOK_OCTAL");
1712: if (token->text[1] != '#') {
1713: if (defining) {
1714: spit(16);
1715: emit("base");
1716: emit("!");
1717: } else
1718: base = TOK_OCTAL;
1719: } else {
1720: char *end;
1721: Cell value;
1722:
1723: token = yylex();
1724: if (token == NULL) {
1725: (void)printf( "EOF after o#\n");
1726: return;
1727: }
1728: value = strtol(token->text, &end, 8);
1729: if (*end != 0) {
1730: (void)printf("Illegal number conversion:%s:%d: %s\n",
1731: infile, yylineno, yytext);
1732: exit(1);
1733: }
1734: /*
1735: * If this is a 64-bit value we need to store two literals
1736: * and issue a `lxjoin' to combine them. But that's a future
1737: * project.
1738: */
1739: emit("b(lit)");
1740: spit(value>>24);
1741: spit((value>>16)&0x0ff);
1742: spit((value>>8)&0x0ff);
1743: spit(value&0x0ff);
1744: }
1745: break;
1746: case TOK_OF:
1747: STATE(token->text, "TOK_OF");
1748: /*
1749: * Let's hope I get the semantics right.
1750: *
1751: * The `of' behaves almost the same as an
1752: * `if'. The difference is that `endof'
1753: * takes a branch offset to the associated
1754: * `endcase'. Here we will generate a temporary
1755: * offset of the `of' associated with the `endof'.
1756: * Then in `endcase' we should be pointing just
1757: * after the offset of the last `endof' so we
1758: * calculate the offset and thread our way backwards
1759: * searching for the previous `b(case)' or `b(endof)'.
1760: */
1761: emit("b(of)");
1762: push(outpos);
1763: if (offsetsize == 16) {
1764: spit(0);
1765: }
1766: spit(0); /* Place holder for later */
1767: break;
1768: case TOK_REPEAT:
1769: STATE(token->text, "TOK_REPEAT");
1770: emit("bbranch");
1771: pos = pop();
1772: off = pop();
1773: /* First the offset for the branch back to the begin */
1774: off -= outpos;
1775: if (offsetsize == 16) {
1776: spit((off>>8)&0xff);
1777: }
1778: spit(off&0xff);
1779: emit("b(>resolve)");
1780: /* Now point the offset of the while here. */
1781: off = outpos;
1782: outpos = pos;
1783: pos = off - pos;
1784: if (offsetsize == 16) {
1785: spit((pos>>8)&0xff);
1786: }
1787: spit(pos&0xff);
1788: /* Return to the end of the output */
1789: outpos = off;
1790: break;
1791: case TOK_THEN:
1792: STATE(token->text, "TOK_THEN");
1793: emit("b(>resolve)");
1794: pos = outpos;
1795: outpos = pop();
1796: off = pos - outpos;
1797: if (offsetsize == 16) {
1798: spit((off>>8)&0xff);
1799: }
1800: spit(off&0xff);
1801: outpos = pos;
1802: break;
1803: case TOK_TO:
1804: STATE(token->text, "TOK_TO");
1805: /* The next pass should tokenize the FCODE number */
1806: emit("b(to)");
1807: break;
1808: case TOK_UNTIL:
1809: STATE(token->text, "TOK_UNTIL");
1810: {
1811: int pos;
1812:
1813: emit("b?branch");
1814: pos = pop();
1815: pos -= outpos;
1816: if (offsetsize == 16) {
1817: spit((pos>>8)&0xff);
1818: }
1819: spit(pos&0xff);
1820: }
1821: break;
1822: case TOK_VALUE:
1823: STATE(token->text, "TOK_VALUE");
1824:
1825: token = yylex();
1826: if (token == NULL) {
1827: (void)printf( "EOF in value definition\n");
1828: return;
1829: }
1830:
1831: /* Add new code to dictionary */
1832: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1833: if (fcode == NULL)
1834: (void)err(1, "out of memory");
1835:
1.1 jason 1836: fcode->num = nextfcode++;
1837: fcode->name = strdup(token->text);
1.5 deraadt 1838: if (fcode->name == NULL)
1839: (void)err(1, "out of memory");
1840:
1.1 jason 1841: fadd(dictionary, fcode);
1842:
1843: if (state == 0)
1844: emit("new-token");
1845: else {
1846: if (state == TOK_EXTERNAL)
1847: emit("external-token");
1848: else
1849: /* Here we have a choice of new-token or named-token */
1850: emit("named-token");
1851: sspit(token->text);
1852: }
1853: spit(fcode->num);
1854: emit("b(value)");
1855: break;
1856: case TOK_VARIABLE:
1857: STATE(token->text, "TOK_VARIABLE");
1858:
1859: token = yylex();
1860: if (token == NULL) {
1861: (void)printf( "EOF in variable definition\n");
1862: return;
1863: }
1864:
1865: /* Add new code to dictionary */
1866: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1867: if (fcode == NULL)
1868: (void)err(1, "out of memory");
1869:
1.1 jason 1870: fcode->num = nextfcode++;
1871: fcode->name = strdup(token->text);
1.5 deraadt 1872: if (fcode->name == NULL)
1873: (void)err(1, "out of memory");
1874:
1.1 jason 1875: fadd(dictionary, fcode);
1876:
1877: if (state == 0)
1878: emit("new-token");
1879: else {
1880: if (state == TOK_EXTERNAL)
1881: emit("external-token");
1882: else
1883: /* Here we have a choice of new-token or named-token */
1884: emit("named-token");
1885: sspit(token->text);
1886: }
1887: spit(fcode->num);
1888: emit("b(variable)");
1889: break;
1890: case TOK_WHILE:
1891: STATE(token->text, "TOK_WHILE");
1892: emit("b?branch");
1893: push(outpos);
1894: if (offsetsize == 16) {
1895: spit(0);
1896: }
1897: spit(0);
1898: break;
1899:
1900: /* Tokenizer directives */
1901: case TOK_BEGTOK:
1902: STATE(token->text, "TOK_BEGTOK");
1903: tokenizer = 1;
1904: break;
1905: case TOK_EMIT_BYTE:
1906: STATE(token->text, "TOK_EMIT_BYTE");
1907: spit(pop());
1908: break;
1909: case TOK_ENDTOK:
1910: STATE(token->text, "TOK_ENDTOK");
1911: tokenizer = 0;
1912: break;
1913: case TOK_FLOAD:
1914: STATE(token->text, "TOK_FLOAD");
1915: /* Parse a different file for a while */
1916: token = yylex();
1917: if ((inf = fopen(token->text, "r")) == NULL) {
1918: (void)printf("%s: Could not open %s: %s\n",
1919: myname, token->text, strerror(errno));
1920: break;
1921: }
1922: inbuf = yy_create_buffer(inf, YY_BUF_SIZE);
1923: yy_switch_to_buffer(inbuf);
1924: {
1925: char *oldinfile = infile;
1926:
1927: infile = token->text;
1928: tokenize(inbuf);
1929: infile = oldinfile;
1930: }
1931: yy_switch_to_buffer(input);
1932: yy_delete_buffer(inbuf);
1933: fclose(inf);
1934: break;
1935: case TOK_OTHER:
1936: STATE(token->text, "TOK_OTHER");
1937: if (apply_macros(input, token->text))
1938: break;
1939: if (emit(token->text)) {
1940: #if 0
1941: /*
1942: * Call an external command
1943: *
1944: * XXXXX assumes it will always find the command
1945: */
1946: sspit(token->text);
1947: emit("$find");
1948: emit("drop");
1949: emit("execute");
1950: #else
1951: (void)printf( "%s: undefined token `%s'\n",
1952: myname, token->text);
1953: fflush(stderr);
1954: exit(1);
1955: #endif
1956: }
1957: break;
1958: default:
1.6 millert 1959: break;
1.1 jason 1960: }
1961: }
1962: return;
1963: }
1964:
1965: /*
1966: * print a tokenizer error message
1967: */
1968: void
1969: token_err(int lineno, char *infile, char *text, char *fmt, ...)
1970: {
1971: va_list ap;
1972:
1973: va_start(ap, fmt);
1974: if (infile)
1975: (void)fprintf(stderr, "%s:%d: ", infile, lineno);
1976: if (fmt)
1977: (void)vfprintf(stderr, fmt, ap);
1978: fputc('\n', stderr);
1979: if (text)
1980: fprintf(stderr, "\t%s", text);
1981: va_end(ap);
1982: exit(1);
1983: }
1984:
1985: /*
1986: * Lookup fcode string in dictionary and spit it out.
1987: *
1988: * Fcode must be in dictionary. No alias conversion done.
1989: */
1990: int
1991: emit(str)
1992: char *str;
1993: {
1994: struct fcode *code;
1995: if ((code = flookup( dictionary, str)))
1996: spit(code->num);
1997: #ifdef DEBUG
1998: if (debug > 1) {
1999: if (code)
2000: (void)printf( "emitting `%s'\n", code->name);
2001: else
2002: (void)printf( "emit: not found `%s'\n", str);
2003: }
2004: #endif
2005: return (code == NULL);
2006: }
2007:
2008: /*
2009: * Spit out an integral value as a series of FCodes.
2010: *
2011: * It will spit out one zero byte or as many bytes as are
2012: * non-zero.
2013: */
2014: int
2015: spit(n)
2016: long n;
2017: {
2018: int count = 1;
2019:
2020: if (n >> 8)
2021: count += spit(n >> 8);
2022: if (outpos >= outbufsiz) {
2023: while (outpos >= outbufsiz) outbufsiz += BUFCLICK;
2024: if (!(outbuf = realloc(outbuf, outbufsiz))) {
2025: (void)printf( "realloc of %ld bytes failed -- out of memory\n",
2026: (long)outbufsiz);
2027: exit(1);
2028: }
2029: }
2030: outbuf[outpos++] = n;
2031: return (count);
2032: }
2033:
2034: /*
2035: * Spit out an FCode string.
2036: */
2037: void
2038: sspit(s)
2039: char *s;
2040: {
2041: int len = strlen(s);
2042:
2043: if (len > 255) {
2044: (void)printf( "string length %d too long\n", len);
2045: return;
2046: }
2047: #ifdef DEBUG
2048: if (debug > 1)
2049: (void)printf( "sspit: len %d str `%s'\n", len, s);
2050: #endif
2051: spit(len);
2052: while (*s)
2053: spit(*s++);
2054: }
2055:
2056: int
2057: yywrap()
2058: {
2059: /* Always generate EOF */
2060: return (1);
2061: }