Annotation of src/usr.bin/fgen/fgen.l, Revision 1.8
1.1 jason 1: %{
1.8 ! sobrado 2: /* $OpenBSD: fgen.l,v 1.7 2003/11/09 20:13:57 otto Exp $ */
1.1 jason 3: /* $NetBSD: fgen.l,v 1.12 2001/06/13 10:46:05 wiz Exp $ */
4: /* FLEX input for FORTH input file scanner */
5: /*
6: * Copyright (c) 1998 Eduardo Horvath.
7: * All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed by Eduardo Horvath.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: */
34: /*
35: Specifications are as follows:
36:
37: The function "yylex()" always returns a pointer to a structure:
38:
39: struct tok {
40: int type;
41: char *text;
42: }
43: #define TOKEN struct tok
44: */
45: %}
46:
47: %option yylineno
48:
49: decimal [0-9]
50: hex [0-9A-Fa-f]
51: octal [0-7]
52: white [ \t\n\r\f]
53: tail {white}
54:
55: %{
56: #include <sys/types.h>
57:
58: #include <assert.h>
59: #include <err.h>
60: #include <errno.h>
61: #include <fcntl.h>
62: #include <stdarg.h>
63: #include <stdio.h>
64: #include <string.h>
65: #include <unistd.h>
66:
67: #include "fgen.h"
68: TOKEN token;
69:
70: /*
71: * Global variables that control the parse state.
72: */
73:
74: struct fcode *dictionary = NULL;
75: struct macro *aliases = NULL;
76: int outf = 1; /* stdout */
77: int state = 0;
78: int nextfcode = 0x800;
79: int base = TOK_HEX;
80: long outpos;
81: char *outbuf = NULL;
82: char *outfile, *infile;
83: #define BUFCLICK (1024*1024)
84: size_t outbufsiz = 0;
85: char *myname = NULL;
86: int offsetsize = 8;
87: int defining = 0;
88: int tokenizer = 0;
89:
90: #define PSTKSIZ 1024
91: Cell parse_stack[PSTKSIZ];
92: int parse_stack_ptr = 0;
93:
1.2 millert 94: void token_err(int, char *, char *, char *, ...)
1.1 jason 95: __attribute__((__format__(__printf__, 4, 5)));
96: YY_DECL;
97:
98: int debug = 0;
99: #define ASSERT if (debug) assert
100: #define STATE(y, x) do { if (debug) printf( "%ld State %s: token `%s'\n", outpos, x, y); } while (0)
101:
102: #define YY_NO_UNPUT
103: %}
104:
105: %%
106:
107: 0 { token.type = TOK_OTHER; token.text = yytext;
108: return &token; }
109:
110: 1 { token.type = TOK_OTHER; token.text = yytext;
111: return &token; }
112:
113: 2 { token.type = TOK_OTHER; token.text = yytext;
114: return &token; }
115:
116: 3 { token.type = TOK_OTHER; token.text = yytext;
117: return &token; }
118:
119: -1 { token.type = TOK_OTHER; token.text = yytext;
120: return &token; }
121:
122: {white}* /* whitespace -- keep looping */ ;
123:
124: \\[^\n]*\n /* end of line comment -- keep looping */ { STATE(yytext, "EOL comment"); }
125:
126: -?{hex}+ { token.type = TOK_NUMBER; token.text = yytext;
127: return &token; }
128:
129: \'.\' { token.type = TOK_C_LIT; token.text = yytext; return &token; }
130:
131: \"{white}*(\\\"|[^"])*\" { token.type = TOK_STRING_LIT; token.text = yytext;
132: return &token; } /* String started by `"' or `."' */
133:
134: \.\({white}*(\\\"|[^)])*\) { token.type = TOK_PSTRING; token.text = yytext;
135: return &token; } /* String of type `.(.....)' */
136:
137: \.\"{white}*(\\\"|[^"])*\" { token.type = TOK_PSTRING; token.text = yytext;
138: return &token; }
139:
140: "(" { token.type = TOK_COMMENT; token.text = yytext;
141: return &token; }
142:
143: ")" { token.type = TOK_ENDCOMMENT; token.text = yytext;
144: return &token; }
145:
146: ":" { token.type = TOK_COLON; token.text = yytext;
147: return &token; }
148:
149: ";" { token.type = TOK_SEMICOLON; token.text = yytext;
150: return &token; }
151:
152: \' { token.type = TOK_TOKENIZE; token.text = yytext;
153: return &token; }
154:
155: [aA][gG][aA][iI][nN] { token.type = TOK_AGAIN; token.text = yytext;
156: return &token; }
157:
158: [aA][lL][iI][aA][sS] { token.type = TOK_ALIAS; token.text = yytext;
159: return &token; }
160:
161: \[\'\] { token.type = TOK_GETTOKEN; token.text = yytext;
162: return &token; }
163:
164: [aA][sS][cC][iI][iI] { token.type = TOK_ASCII; token.text = yytext;
165: return &token; }
166:
167: [bB][eE][gG][iI][nN] { token.type = TOK_BEGIN; token.text = yytext;
168: return &token; }
169:
170: [bB][uU][fF][fF][eE][rR]: { token.type = TOK_BUFFER; token.text = yytext;
171: return &token; }
172:
173: [cC][aA][sS][eE] { token.type = TOK_CASE; token.text = yytext;
174: return &token; }
175:
176: [cC][oO][nN][sS][tT][aA][nN][tT] { token.type = TOK_CONSTANT; token.text = yytext;
177: return &token; }
178:
179: [cC][oO][nN][tT][rR][oO][lL] { token.type = TOK_CONTROL; token.text = yytext;
180: return &token; }
181:
182: [cC][rR][eE][aA][tT][eE] { token.type = TOK_CREATE; token.text = yytext;
183: return &token; }
184:
185: [dD]# { token.type = TOK_DECIMAL; token.text = yytext;
186: return &token; }
187:
188: [dD][eE][cC][iI][mM][aA][lL] { token.type = TOK_DECIMAL; token.text = yytext;
189: return &token; }
190:
191: [dD][eE][fF][eE][rR] { token.type = TOK_DEFER; token.text = yytext;
192: return &token; }
193:
194: \??[dD][oO] { token.type = TOK_DO; token.text = yytext;
195: return &token; }
196:
197: [eE][lL][sS][eE] { token.type = TOK_ELSE; token.text = yytext;
198: return &token; }
199:
200: [eE][nN][dD][cC][aA][sS][eE] { token.type = TOK_ENDCASE; token.text = yytext;
201: return &token; }
202:
203: [eE][nN][dD][oO][fF] { token.type = TOK_ENDOF; token.text = yytext;
204: return &token; }
205:
206: [eE][xX][tT][eE][rR][nN][aA][lL] { token.type = TOK_EXTERNAL; token.text = yytext;
207: return &token; }
208:
209: [fF][iI][eE][lL][dD] { token.type = TOK_FIELD; token.text = yytext;
210: return &token; }
211:
212: [hH]# { token.type = TOK_HEX; token.text = yytext;
213: return &token; }
214:
215: [hH][eE][aA][dD][eE][rR][lL][eE][sS][sS] { token.type = TOK_HEADERLESS; token.text = yytext;
216: return &token; }
217:
218: [hH][eE][aA][dD][eE][rR][sS] { token.type = TOK_HEADERS; token.text = yytext;
219: return &token; }
220:
221: [hH][eE][xX] { token.type = TOK_HEX; token.text = yytext;
222: return &token; }
223:
224: [iI][fF] { token.type = TOK_IF; token.text = yytext;
225: return &token; }
226:
227: \??[lL][eE][aA][vV][eE] { token.type = TOK_LEAVE; token.text = yytext;
228: return &token; }
229:
230: \+?[lL][oO][oO][pP] { token.type = TOK_LOOP; token.text = yytext;
231: return &token; }
232:
233: [oO]# { token.type = TOK_OCTAL; token.text = yytext;
234: return &token; }
235:
236: [oO][cC][tT][aA][lL] { token.type = TOK_OCTAL; token.text = yytext;
237: return &token; }
238:
239: [oO][fF] { token.type = TOK_OF; token.text = yytext;
240: return &token; }
241:
242: [rR][eE][pP][eE][aA][tT] { token.type = TOK_REPEAT; token.text = yytext;
243: return &token; }
244:
245: [tT][hH][eE][nN] { token.type = TOK_THEN; token.text = yytext;
246: return &token; }
247:
248: [tT][oO] { token.type = TOK_TO; token.text = yytext;
249: return &token; }
250:
251: [uU][nN][tT][iI][lL] { token.type = TOK_UNTIL; token.text = yytext;
252: return &token; }
253:
254: [vV][aA][lL][uU][eE] { token.type = TOK_VALUE; token.text = yytext;
255: return &token; }
256:
257: [vV][aA][rR][iI][aA][bB][lL][eE] { token.type = TOK_VARIABLE; token.text = yytext;
258: return &token; }
259:
260: [wW][hH][iI][lL][eE] { token.type = TOK_WHILE; token.text = yytext;
261: return &token; }
262:
263: offset16 { token.type = TOK_OFFSET16; token.text = yytext;
264: return &token; }
265:
266: tokenizer\[ { token.type = TOK_BEGTOK; token.text = yytext;
267: return &token; }
268:
269: emit-byte { token.type = TOK_EMIT_BYTE; token.text = yytext;
270: return &token; }
271:
272: \]tokenizer { token.type = TOK_ENDTOK; token.text = yytext;
273: return &token; }
274:
275: fload { token.type = TOK_FLOAD; token.text = yytext;
276: return &token; }
277:
278:
279: [^ \n\t\r\f]+ { token.type = TOK_OTHER; token.text = yytext;
280: return &token; }
281:
282: <<EOF>> { return NULL; }
283: %%
284:
285: /* Function definitions */
1.2 millert 286: void push(Cell);
287: Cell pop(void);
288: int depth(void);
289: int fadd(struct fcode *, struct fcode *);
290: struct fcode *flookup(struct fcode *, char *);
291: int aadd(struct macro *, struct macro *);
292: struct macro *alookup(struct macro *, char *);
293: void initdic(void);
294: void usage(char *);
295: void tokenize(YY_BUFFER_STATE);
296: int emit(char *);
297: int spit(long);
298: void sspit(char *);
299: int apply_macros(YY_BUFFER_STATE, char *);
300: int main(int argc, char *argv[]);
1.1 jason 301:
302: /*
303: * Standard FCode names and numbers. Includes standard
304: * tokenizer aliases.
305: */
306: struct fcode fcodes[] = {
307: { "end0", 0x0000 },
308: { "b(lit)", 0x0010 },
309: { "b(')", 0x0011 },
310: { "b(\")", 0x0012 },
311: { "bbranch", 0x0013 },
312: { "b?branch", 0x0014 },
313: { "b(loop)", 0x0015 },
314: { "b(+loop)", 0x0016 },
315: { "b(do)", 0x0017 },
316: { "b(?do)", 0x0018 },
317: { "i", 0x0019 },
318: { "j", 0x001a },
319: { "b(leave)", 0x001b },
320: { "b(of)", 0x001c },
321: { "execute", 0x001d },
322: { "+", 0x001e },
323: { "-", 0x001f },
324: { "*", 0x0020 },
325: { "/", 0x0021 },
326: { "mod", 0x0022 },
327: { "and", 0x0023 },
328: { "or", 0x0024 },
329: { "xor", 0x0025 },
330: { "invert", 0x0026 },
331: { "lshift", 0x0027 },
332: { "rshift", 0x0028 },
333: { ">>a", 0x0029 },
334: { "/mod", 0x002a },
335: { "u/mod", 0x002b },
336: { "negate", 0x002c },
337: { "abs", 0x002d },
338: { "min", 0x002e },
339: { "max", 0x002f },
340: { ">r", 0x0030 },
341: { "r>", 0x0031 },
342: { "r@", 0x0032 },
343: { "exit", 0x0033 },
344: { "0=", 0x0034 },
345: { "0<>", 0x0035 },
346: { "0<", 0x0036 },
347: { "0<=", 0x0037 },
348: { "0>", 0x0038 },
349: { "0>=", 0x0039 },
350: { "<", 0x003a },
351: { ">", 0x003b },
352: { "=", 0x003c },
353: { "<>", 0x003d },
354: { "u>", 0x003e },
355: { "u<=", 0x003f },
356: { "u<", 0x0040 },
357: { "u>=", 0x0041 },
358: { ">=", 0x0042 },
359: { "<=", 0x0043 },
360: { "between", 0x0044 },
361: { "within", 0x0045 },
362: { "drop", 0x0046 },
363: { "dup", 0x0047 },
364: { "over", 0x0048 },
365: { "swap", 0x0049 },
366: { "rot", 0x004a },
367: { "-rot", 0x004b },
368: { "tuck", 0x004c },
369: { "nip", 0x004d },
370: { "pick", 0x004e },
371: { "roll", 0x004f },
372: { "?dup", 0x0050 },
373: { "depth", 0x0051 },
374: { "2drop", 0x0052 },
375: { "2dup", 0x0053 },
376: { "2over", 0x0054 },
377: { "2swap", 0x0055 },
378: { "2rot", 0x0056 },
379: { "2/", 0x0057 },
380: { "u2/", 0x0058 },
381: { "2*", 0x0059 },
382: { "/c", 0x005a },
383: { "/w", 0x005b },
384: { "/l", 0x005c },
385: { "/n", 0x005d },
386: { "ca+", 0x005e },
387: { "wa+", 0x005f },
388: { "la+", 0x0060 },
389: { "na+", 0x0061 },
390: { "char+", 0x0062 },
391: { "wa1+", 0x0063 },
392: { "la1+", 0x0064 },
393: { "cell+", 0x0065 },
394: { "chars", 0x0066 },
395: { "/w*", 0x0067 },
396: { "/l*", 0x0068 },
397: { "cells", 0x0069 },
398: { "on", 0x006a },
399: { "off", 0x006b },
400: { "+!", 0x006c },
401: { "@", 0x006d },
402: { "l@", 0x006e },
403: { "w@", 0x006f },
404: { "<w@", 0x0070 },
405: { "c@", 0x0071 },
406: { "!", 0x0072 },
407: { "l!", 0x0073 },
408: { "w!", 0x0074 },
409: { "c!", 0x0075 },
410: { "2@", 0x0076 },
411: { "2!", 0x0077 },
412: { "move", 0x0078 },
413: { "fill", 0x0079 },
414: { "comp", 0x007a },
415: { "noop", 0x007b },
416: { "lwsplit", 0x007c },
417: { "wjoin", 0x007d },
418: { "lbsplit", 0x007e },
419: { "bljoin", 0x007f },
420: { "wbflip", 0x0080 },
421: { "upc", 0x0081 },
422: { "lcc", 0x0082 },
423: { "pack", 0x0083 },
424: { "count", 0x0084 },
425: { "body>", 0x0085 },
426: { ">body", 0x0086 },
427: { "fcode-revision", 0x0087 },
428: { "span", 0x0088 },
429: { "unloop", 0x0089 },
430: { "expect", 0x008a },
431: { "alloc-mem", 0x008b },
432: { "free-mem", 0x008c },
433: { "key?", 0x008d },
434: { "key", 0x008e },
435: { "emit", 0x008f },
436: { "type", 0x0090 },
437: { "(cr", 0x0091 },
438: { "cr", 0x0092 },
439: { "#out", 0x0093 },
440: { "#line", 0x0094 },
441: { "hold", 0x0095 },
442: { "<#", 0x0096 },
443: { "u#>", 0x0097 },
444: { "sign", 0x0098 },
445: { "u#", 0x0099 },
446: { "u#s", 0x009a },
447: { "u.", 0x009b },
448: { "u.r", 0x009c },
449: { ".", 0x009d },
450: { ".r", 0x009e },
451: { ".s", 0x009f },
452: { "base", 0x00a0 },
453: { "convert", 0x00a1 },
454: { "$number", 0x00a2 },
455: { "digit", 0x00a3 },
456: { "-1", 0x00a4 },
457: { "true", 0x00a4 },
458: { "0", 0x00a5 },
459: { "1", 0x00a6 },
460: { "2", 0x00a7 },
461: { "3", 0x00a8 },
462: { "bl", 0x00a9 },
463: { "bs", 0x00aa },
464: { "bell", 0x00ab },
465: { "bounds", 0x00ac },
466: { "here", 0x00ad },
467: { "aligned", 0x00ae },
468: { "wbsplit", 0x00af },
469: { "bwjoin", 0x00b0 },
470: { "b(<mark)", 0x00b1 },
471: { "b(>resolve)", 0x00b2 },
472: { "set-token-table", 0x00b3 },
473: { "set-table", 0x00b4 },
474: { "new-token", 0x00b5 },
475: { "named-token", 0x00b6 },
476: { "b(:)", 0x00b7 },
477: { "b(value)", 0x00b8 },
478: { "b(variable)", 0x00b9 },
479: { "b(constant)", 0x00ba },
480: { "b(create)", 0x00bb },
481: { "b(defer)", 0x00bc },
482: { "b(buffer:)", 0x00bd },
483: { "b(field)", 0x00be },
484: { "b(code)", 0x00bf },
485: { "instance", 0x00c0 },
486: { "b(;)", 0x00c2 },
487: { "b(to)", 0x00c3 },
488: { "b(case)", 0x00c4 },
489: { "b(endcase)", 0x00c5 },
490: { "b(endof)", 0x00c6 },
491: { "#", 0x00c7 },
492: { "#s", 0x00c8 },
493: { "#>", 0x00c9 },
494: { "external-token", 0x00ca },
495: { "$find", 0x00cb },
496: { "offset16", 0x00cc },
497: { "evaluate", 0x00cd },
498: { "c,", 0x00d0 },
499: { "w,", 0x00d1 },
500: { "l,", 0x00d2 },
501: { "'", 0x00d3 },
502: { "um*", 0x00d4 },
503: { "um/mod", 0x00d5 },
504: { "d+", 0x00d8 },
505: { "d-", 0x00d9 },
506: { "get-token", 0x00da },
507: { "set-token", 0x00db },
508: { "state", 0x00dc },
509: { "compile,", 0x00dd },
510: { "behavior", 0x00de },
511: { "start0", 0x00f0 },
512: { "start1", 0x00f1 },
513: { "start2", 0x00f2 },
514: { "start4", 0x00f3 },
515: { "ferror", 0x00fc },
516: { "version1", 0x00fd },
517: { "4-byte-id", 0x00fe },
518: { "end1", 0x00ff },
519: { "dma-alloc", 0x0101 },
520: { "my-address", 0x0102 },
521: { "my-space", 0x0103 },
522: { "memmap", 0x0104 },
523: { "free-virtual", 0x0105 },
524: { ">physical", 0x0106 },
525: { "my-params", 0x010f },
526: { "property", 0x0110 },
527: { "encode-int", 0x0111 },
528: { "encode+", 0x0112 },
529: { "encode-phys", 0x0113 },
530: { "encode-string", 0x0114 },
531: { "encode-bytes", 0x0115 },
532: { "reg", 0x0116 },
533: { "intr", 0x0117 },
534: { "driver", 0x0118 },
535: { "model", 0x0119 },
536: { "device-type", 0x011a },
537: { "parse-2int", 0x011b },
538: { "is-install", 0x011c },
539: { "is-remove", 0x011d },
540: { "is-selftest", 0x011e },
541: { "new-device", 0x011f },
542: { "diagnostic-mode?", 0x0120 },
543: { "display-status", 0x0121 },
544: { "memory-test-suite", 0x0122 },
545: { "group-code", 0x0123 },
546: { "mask", 0x0124 },
547: { "get-msecs", 0x0125 },
548: { "ms", 0x0126 },
549: { "find-device", 0x0127 },
550: { "decode-phys", 0x0128 },
551: { "map-low", 0x0130 },
552: { "sbus-intr>cpu", 0x0131 },
553: { "#lines", 0x0150 },
554: { "#columns", 0x0151 },
555: { "line#", 0x0152 },
556: { "column#", 0x0153 },
557: { "inverse?", 0x0154 },
558: { "inverse-screen?", 0x0155 },
559: { "frame-buffer-busy?", 0x0156 },
560: { "draw-character", 0x0157 },
561: { "reset-screen", 0x0158 },
562: { "toggle-cursor", 0x0159 },
563: { "erase-screen", 0x015a },
564: { "blink-screen", 0x015b },
565: { "invert-screen", 0x015c },
566: { "insert-characters", 0x015d },
567: { "delete-characters", 0x015e },
568: { "insert-lines", 0x015f },
569: { "delete-lines", 0x0160 },
570: { "draw-logo", 0x0161 },
571: { "frame-buffer-addr", 0x0162 },
572: { "screen-height", 0x0163 },
573: { "screen-width", 0x0164 },
574: { "window-top", 0x0165 },
575: { "window-left", 0x0166 },
576: { "default-font", 0x016a },
577: { "set-font", 0x016b },
578: { "char-height", 0x016c },
579: { "char-width", 0x016d },
580: { ">font", 0x016e },
581: { "fontbytes", 0x016f },
582: { "fb8-draw-character", 0x0180 },
583: { "fb8-reset-screen", 0x0181 },
584: { "fb8-toggle-cursor", 0x0182 },
585: { "fb8-erase-screen", 0x0183 },
586: { "fb8-blink-screen", 0x0184 },
587: { "fb8-invert-screen", 0x0185 },
588: { "fb8-insert-characters", 0x0186 },
589: { "fb8-delete-characters", 0x0187 },
590: { "fb8-inisert-lines", 0x0188 },
591: { "fb8-delete-lines", 0x0189 },
592: { "fb8-draw-logo", 0x018a },
593: { "fb8-install", 0x018b },
594: { "return-buffer", 0x01a0 },
595: { "xmit-packet", 0x01a1 },
596: { "poll-packet", 0x01a2 },
597: { "mac-address", 0x01a4 },
598: { "device-name", 0x0201 },
599: { "my-args", 0x0202 },
600: { "my-self", 0x0203 },
601: { "find-package", 0x0204 },
602: { "open-package", 0x0205 },
603: { "close-package", 0x0206 },
604: { "find-method", 0x0207 },
605: { "call-package", 0x0208 },
606: { "$call-parent", 0x0209 },
607: { "my-parent", 0x020a },
608: { "ihandle>phandle", 0x020b },
609: { "my-unit", 0x020d },
610: { "$call-method", 0x020e },
611: { "$open-package", 0x020f },
612: { "processor-type", 0x0210 },
613: { "firmware-version", 0x0211 },
614: { "fcode-version", 0x0212 },
615: { "alarm", 0x0213 },
616: { "(is-user-word)", 0x0214 },
617: { "suspend-fcode", 0x0215 },
618: { "abort", 0x0216 },
619: { "catch", 0x0217 },
620: { "throw", 0x0218 },
621: { "user-abort", 0x0219 },
622: { "get-my-property", 0x021a },
623: { "decode-int", 0x021b },
624: { "decode-string", 0x021c },
625: { "get-inherited-property", 0x021d },
626: { "delete-property", 0x021e },
627: { "get-package-property", 0x021f },
628: { "cpeek", 0x0220 },
629: { "wpeek", 0x0221 },
630: { "lpeek", 0x0222 },
631: { "cpoke", 0x0223 },
632: { "wpoke", 0x0224 },
633: { "lpoke", 0x0225 },
634: { "lwflip", 0x0226 },
635: { "lbflip", 0x0227 },
636: { "lbflips", 0x0228 },
637: { "adr-mask", 0x0229 },
638: { "rb@", 0x0230 },
639: { "rb!", 0x0231 },
640: { "rw@", 0x0232 },
641: { "rw!", 0x0233 },
642: { "rl@", 0x0234 },
643: { "rl!", 0x0235 },
644: { "wbflips", 0x0236 },
645: { "lwflips", 0x0237 },
646: { "probe", 0x0238 },
647: { "probe-virtual", 0x0239 },
648: { "child", 0x023b },
649: { "peer", 0x023c },
650: { "next-property", 0x023d },
651: { "byte-load", 0x023e },
652: { "set-args", 0x023f },
653: { "left-parse-string", 0x0240 },
654: /* 64-bit FCode extensions */
655: { "bxjoin", 0x0241 },
656: { "<l@", 0x0242 },
657: { "lxjoin", 0x0243 },
658: { "rx@", 0x022e },
659: { "rx!", 0x022f },
660: { "wxjoin", 0x0244 },
661: { "x,", 0x0245 },
662: { "x@", 0x0246 },
663: { "x!", 0x0247 },
664: { "/x", 0x0248 },
665: { "/x*", 0x0249 },
666: { "xa+", 0x024a },
667: { "xa1+", 0x024b },
668: { "xbflip", 0x024c },
669: { "xbflips", 0x024d },
670: { "xbsplit", 0x024e },
671: { "xlflip", 0x024f },
672: { "xlflips", 0x0250 },
673: { "xlsplit", 0x0251 },
674: { "xwflip", 0x0252 },
675: { "xwflips", 0x0253 },
676: { "xwsplit", 0x0254 },
1.4 jason 677: { NULL, 0 }
1.1 jason 678: };
679:
680: /*
681: * Default macros -- can be overridden by colon definitions.
682: */
683: struct macro macros[] = {
684: { "eval", "evaluate" }, /* Build a more balanced tree */
685: { "(.)", "dup abs <# u#s swap sign u#>" },
686: { "<<", "lshift" },
687: { ">>", "rshift" },
688: { "?", "@ ." },
689: { "1+", "1 +" },
690: { "1-", "1 -" },
691: { "2+", "2 +" },
692: { "2-", "2 -" },
693: { "abort\"", "-2 throw" },
694: { "accept", "span @ -rot expect span @ swap span !" },
695: { "allot", "0 max 0 ?do 0 c, loop" },
696: { "blank", "bl fill" },
697: { "/c*", "chars" },
698: { "ca1+", "char+" },
699: { "carret", "b(lit) 00 00 00 0x0d" },
700: { ".d" "base @ swap 0x0a base ! . base !" },
701: { "decode-bytes", ">r over r@ + swap r@ - rot r>" },
702: { "3drop", "drop 2drop" },
703: { "3dup", "2 pick 2 pick 2 pick" },
704: { "erase", "0 fill" },
705: { "false", "0" },
706: { ".h" "base @ swap 0x10 base ! . base !" },
707: { "linefeed", "b(lit) 00 00 00 0x0a" },
708: { "/n*", "cells" },
709: { "na1+", "cell+", },
710: { "not", "invert", },
711: { "s.", "(.) type space" },
712: { "space", "bl emit" },
713: { "spaces", "0 max 0 ?do space loop" },
714: { "struct", "0" },
715: { "true", "-1" },
716: { "(u,)", "<# u#s u#>" },
717: { NULL, NULL }
718: };
719:
720: /*
721: * Parser stack control functions.
722: */
723:
724: void
725: push(val)
726: Cell val;
727: {
728: parse_stack[parse_stack_ptr++] = val;
729: if (parse_stack_ptr >= PSTKSIZ) {
730: (void)printf( "Parse stack overflow\n");
731: exit(1);
732: }
733: }
734:
735: Cell
736: pop()
737: {
738: ASSERT(parse_stack_ptr);
739: return parse_stack[--parse_stack_ptr];
740: }
741:
742: int
743: depth()
744: {
745: return (parse_stack_ptr);
746: }
747:
748: /*
749: * Insert fcode into dictionary.
750: */
751: int
752: fadd(dict, new)
753: struct fcode *dict, *new;
754: {
755: int res = strcmp(dict->name, new->name);
756:
757: #ifdef DEBUG
758: new->type = FCODE;
759: ASSERT(dict->type == FCODE);
760: #endif
761: /* Don't allow duplicate entries. */
762: if (!res) return (0);
763: if (res < 0) {
764: if (dict->l)
765: return fadd(dict->l, new);
766: else {
767: #ifdef DEBUG
768: if (debug > 1)
769: (void)printf( "fadd: new FCode `%s' is %lx\n",
770: new->name, new->num);
771: #endif
772: new->l = new->r = NULL;
773: dict->l = new;
774: }
775: } else {
776: if (dict->r)
777: return fadd(dict->r, new);
778: else {
779: #ifdef DEBUG
780: if (debug > 1)
781: (void)printf( "fadd: new FCode `%s' is %lx\n",
782: new->name, new->num);
783: #endif
784: new->l = new->r = NULL;
785: dict->r = new;
786: }
787: }
788: return (1);
789: }
790:
791: /*
792: * Look for a code in the dictionary.
793: */
794: struct fcode *
795: flookup(dict, str)
796: struct fcode *dict;
797: char *str;
798: {
799: int res;
800: if (!dict) return (dict);
801:
802: res = strcmp(dict->name, str);
803: #ifdef DEBUG
804: ASSERT(dict->type == FCODE);
805: if (debug > 2)
806: (void)printf( "flookup: `%s' and `%s' %s match\n",
807: str, dict->name, res?"don't":"do");
808: #endif
809: if (!res) return (dict);
810: if (res < 0)
811: return (flookup(dict->l, str));
812: else
813: return (flookup(dict->r, str));
814:
815: }
816:
817: /*
818: * Insert alias into macros.
819: */
820: int
821: aadd(dict, new)
822: struct macro *dict, *new;
823: {
824: int res = strcmp(dict->name, new->name);
825:
826: #ifdef DEBUG
827: new->type = MACRO;
828: ASSERT(dict->type == MACRO);
829: #endif
830: /* Don't allow duplicate entries. */
831: if (!res) return (0);
832: if (res < 0) {
833: if (dict->l)
834: return aadd(dict->l, new);
835: else {
836: new->l = new->r = NULL;
837: dict->l = new;
838: #ifdef DEBUG
839: if (debug > 1)
840: (void)printf( "aadd: new alias `%s' to `%s'\n",
841: new->name, new->equiv);
842: #endif
843: }
844: } else {
845: if (dict->r)
846: return aadd(dict->r, new);
847: else {
848: new->l = new->r = NULL;
849: dict->r = new;
850: #ifdef DEBUG
851: if (debug > 1)
852: (void)printf( "aadd: new alias `%s' to `%s'\n",
853: new->name, new->equiv);
854: #endif
855: }
856: }
857: return (1);
858: }
859:
860: /*
861: * Look for a macro in the aliases.
862: */
863: struct macro *
864: alookup(dict, str)
865: struct macro *dict;
866: char *str;
867: {
868: int res;
869: if (!dict) return (dict);
870:
871: #ifdef DEBUG
872: ASSERT(dict->type == MACRO);
873: #endif
874: res = strcmp(dict->name, str);
875: if (!res) return (dict);
876: if (res < 0)
877: return (alookup(dict->l, str));
878: else
879: return (alookup(dict->r, str));
880:
881: }
882:
883: /*
884: * Bootstrap the dictionary and then install
885: * all the standard FCodes.
886: */
887: void
888: initdic()
889: {
890: struct fcode *code = fcodes;
891: struct macro *alias = macros;
892:
893: ASSERT(dictionary == NULL);
894: code->l = code->r = NULL;
895: dictionary = code;
896: #ifdef DEBUG
897: code->type = FCODE;
898: #endif
899:
900: while ((++code)->name) {
901: if(!fadd(dictionary, code)) {
902: printf("init: duplicate dictionary entry %s\n",
903: code->name);
904: abort();
905: }
906: }
907:
908: ASSERT(aliases == NULL);
909: aliases = alias;
910: alias->l = alias->r = NULL;
911: #ifdef DEBUG
912: alias->type = MACRO;
913: #endif
914: while ((++alias)->name) {
915: if(!aadd(aliases, alias)) {
916: printf("init: duplicate macro entry %s\n",
917: alias->name);
918: abort();
919: }
920: }
921:
922: }
923:
924: int
925: apply_macros(input, str)
926: YY_BUFFER_STATE input;
927: char *str;
928: {
929: struct macro *xform = alookup(aliases, str);
930:
931: if (xform) {
932: YY_BUFFER_STATE newbuf;
933:
934: newbuf = yy_scan_string(xform->equiv);
935: yy_switch_to_buffer(newbuf);
936: tokenize(newbuf);
937: yy_switch_to_buffer(input);
938: yy_delete_buffer(newbuf);
939: }
940: return (xform != NULL);
941: }
942:
943: void
944: usage(me)
945: char *me;
946: {
1.8 ! sobrado 947: (void)fprintf(stderr, "usage: %s [-d level] [-o outfile] infile\n", me);
1.1 jason 948: exit(1);
949: }
950:
951: int
952: main(argc, argv)
953: int argc;
954: char *argv[];
955: {
956: int bflag, ch;
957: FILE *inf;
958: struct fcode_header *fheader;
959: YY_BUFFER_STATE inbuf;
960: char *hdrtype = "version1";
961: int i;
962:
963: outf = 1; /* stdout */
964: myname = argv[0];
965:
966: bflag = 0;
967: while ((ch = getopt(argc, argv, "d:o:")) != -1)
968: switch(ch) {
969: case 'd':
970: debug = atol(optarg);
971: break;
972: case 'o':
973: outfile = optarg;
974: break;
975: case '?':
976: default:
1.3 nordin 977: warnx("Illegal argument: %c", ch);
1.1 jason 978: usage(myname);
979: }
980: argc -= optind;
981: argv += optind;
982:
983: if (argc != 1)
984: usage(myname);
985:
986: infile = argv[0];
987:
988: /*
989: * Initialization stuff.
990: */
991: initdic();
992: outbufsiz = BUFCLICK;
993: outbuf = malloc(outbufsiz);
1.5 deraadt 994: if (outbuf == NULL)
995: (void)err(1, "out of memory");
996:
1.1 jason 997: fheader = (struct fcode_header *)outbuf;
998: outpos = 0;
999: emit(hdrtype);
1000: outpos = sizeof(*fheader);
1001:
1002: /*
1003: * Do it.
1004: */
1005: if ((inf = fopen(infile, "r")) == NULL)
1006: (void)err(1, "can not open %s for reading", infile);
1007:
1008: inbuf = yy_create_buffer( inf, YY_BUF_SIZE );
1009: yy_switch_to_buffer(inbuf);
1010: tokenize(inbuf);
1011: yy_delete_buffer(inbuf);
1012: fclose(inf);
1013: emit("end0");
1014:
1015: /* Now calculate length and checksum and stick them in the header */
1016: fheader->format = 0x08;
1017: fheader->length = htonl(outpos);
1018: fheader->checksum = 0;
1019: for (i = sizeof(*fheader); i<outpos; i++)
1020: fheader->checksum += outbuf[i];
1021: fheader->checksum = htons(fheader->checksum);
1022:
1.4 jason 1023: if ((outf = open(outfile, O_WRONLY|O_CREAT|O_TRUNC, 0666)) == -1)
1.1 jason 1024: err(1, "can out open %s for writing", outfile);
1025:
1026: if (write(outf, outbuf, outpos) != outpos) {
1027: close(outf);
1028: unlink(outfile);
1029: err(1, "write error");
1030: }
1031: close(outf);
1032: return (0);
1033: };
1034:
1035: /*
1036: * Tokenize one file. This is a separate function so it can
1.7 otto 1037: * be called recursively to parse multiple levels of include files.
1.1 jason 1038: */
1039:
1040: void
1041: tokenize(input)
1042: YY_BUFFER_STATE input;
1043: {
1044: FILE *inf;
1045: YY_BUFFER_STATE inbuf;
1046: TOKEN *token;
1047: char *last_token = "";
1048: struct fcode *fcode;
1049: int pos, off;
1050:
1051: while ((token = yylex()) != NULL) {
1052: switch (token->type) {
1053: case TOK_NUMBER:
1054: STATE(token->text, "TOK_NUMBER");
1055: {
1056: char *end;
1057: Cell value;
1058:
1059: if (tokenizer) {
1060: push(strtol(token->text, &end, 16));
1061: break;
1062: }
1063: value = strtol(token->text, &end, base);
1064: if (*end != 0)
1065: token_err(yylineno, infile, yytext,
1066: "illegal number conversion");
1067:
1068: /*
1069: * If this is a 64-bit value we need to store two literals
1070: * and issue a `lxjoin' to combine them. But that's a future
1071: * project.
1072: */
1073: emit("b(lit)");
1074: spit(value>>24);
1075: spit((value>>16)&0x0ff);
1076: spit((value>>8)&0x0ff);
1077: spit(value&0x0ff);
1078: }
1079: break;
1080: case TOK_C_LIT:
1081: STATE(token->text, "TOK_C_LIT");
1082: emit("b(lit)");
1083: spit(0);
1084: spit(0);
1085: spit(0);
1086: spit(token->text[1]);
1087: break;
1088: case TOK_STRING_LIT:
1089: STATE(token->text, "TOK_STRING_LIT:");
1090: {
1091: int len;
1092: char *p = token->text;
1093:
1094: ++p; /* Skip the quote */
1095: len = strlen(++p); /* Skip the 1st space */
1096:
1097: #define ERR_TOOLONG \
1098: token_err(yylineno, infile, yytext, "string length %d too long", len)
1099:
1100: if (len > 255)
1101: ERR_TOOLONG;
1102:
1103: if (p[len-1] == ')' ||
1104: p[len-1] == '"') {
1105: p[len-1] = 0;
1106: }
1107: emit("b(\")");
1108: sspit(p);
1109: }
1110: break;
1111: case TOK_PSTRING:
1112: STATE(token->text, "TOK_PSTRING:");
1113: {
1114: int len;
1115: char *p = token->text;
1116:
1117: if (*p++ == '.') p++; /* Skip over delimiter */
1118: p++; /* Skip over space/tab */
1119:
1120: len = strlen(p);
1121: if (len > 255)
1122: ERR_TOOLONG;
1123:
1124: if (p[len-1] == ')' ||
1125: p[len-1] == '"') {
1126: p[len-1] = 0;
1127: }
1128: emit("b(\")");
1129: sspit(p);
1130: emit("type");
1131: }
1132: break;
1133: case TOK_TOKENIZE:
1134: STATE(token->text, "TOK_TOKENIZE");
1135: /* The next pass should tokenize the FCODE number */
1136: emit("b(')");
1137: break;
1138: case TOK_COMMENT:
1139: STATE(token->text, "TOK_COMMENT:");
1140: while (((token = yylex()) != NULL) && token->type != TOK_ENDCOMMENT)
1141: ;
1142: break;
1143: case TOK_ENDCOMMENT:
1144: STATE(token->text, "TOK_ENDCOMMENT");
1145: token_err(yylineno, infile, NULL,
1146: "ENDCOMMENT encountered outside comment");
1147: break;
1148: case TOK_COLON:
1149: STATE(token->text, "TOK_COLON:");
1150:
1151: token = yylex();
1152: if (token == NULL)
1153: token_err(yylineno, infile, yytext,
1154: "EOF in colon definition");
1155:
1156: /* Add new code to dictionary */
1157: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1158: if (fcode == NULL)
1159: (void)err(1, "out of memory");
1160:
1.1 jason 1161: fcode->num = nextfcode++;
1162: fcode->name = strdup(token->text);
1.5 deraadt 1163: if (fcode->name == NULL)
1164: (void)err(1, "out of memory");
1165:
1.1 jason 1166: if (!fadd(dictionary, fcode))
1167: token_err(yylineno, infile, NULL,
1168: "Duplicate definition: `%s'\n", fcode->name);
1169: #ifdef DEBUG
1170: if (debug)
1171: (void)printf("Adding %s to dictionary\n", token->text);
1172: #endif
1173: if (state == 0)
1174: emit("new-token");
1175: else {
1176: if (state == TOK_EXTERNAL)
1177: emit("external-token");
1178: else
1179: /* Here we have a choice of new-token or named-token */
1180: emit("named-token");
1181: sspit(token->text);
1182: }
1183: spit(fcode->num);
1184: emit("b(:)");
1185: last_token = fcode->name;
1186: defining = 1;
1187: break;
1188: case TOK_SEMICOLON:
1189: STATE(token->text, "TOK_SEMICOLON:");
1190: emit("b(;)");
1191: defining = 0;
1192: if (depth()) {
1193: token_err(yylineno, infile, NULL,
1194: "Warning: stack depth %d at end of %s\n",
1195: depth(), last_token);
1196: }
1197: last_token = "";
1198: break;
1199:
1200: /* These are special */
1201: case TOK_AGAIN:
1202: STATE(token->text, "TOK_AGAIN");
1203: emit("bbranch");
1204: pos = pop();
1205: pos -= outpos;
1206: if (offsetsize == 16) {
1207: spit((pos>>8)&0xff);
1208: }
1209: spit(pos&0xff);
1210: break;
1211: case TOK_ALIAS:
1212: STATE(token->text, "TOK_ALIAS");
1213: {
1214: struct macro *alias;
1215:
1216: token = yylex();
1217: if (token == NULL) {
1218: (void)printf( "EOF in alias definition\n");
1219: return;
1220: }
1221: if (token->type != TOK_OTHER) {
1222: (void)printf( "ENDCOMMENT aliasing weird token type %d\n",
1223: token->type);
1224: }
1225: alias = malloc(sizeof(*alias));
1.5 deraadt 1226: if (alias == NULL)
1227: (void)err(1, "out of memory");
1228:
1.1 jason 1229: alias->name = strdup(token->text);
1.5 deraadt 1230: if (alias->name == NULL)
1231: (void)err(1, "out of memory");
1232:
1.1 jason 1233: token = yylex();
1234: if (token == NULL) {
1235: (void)printf( "EOF in alias definition\n");
1236: return;
1237: }
1238: alias->equiv = strdup(token->text);
1.5 deraadt 1239: if (alias->equiv == NULL)
1240: (void)err(1, "out of memory");
1241:
1.1 jason 1242: if (!aadd(aliases, alias)) {
1243: (void)printf( "ERROR: Duplicate alias %s\n",
1244: alias->name);
1245: exit(1);
1246: }
1247: }
1248: break;
1249: case TOK_GETTOKEN:
1250: STATE(token->text, "TOK_GETTOKEN");
1251: /* This is caused by ['] */
1252: emit("b(')");
1253: token = yylex();
1254: if (token == NULL) {
1255: (void)printf( "EOF in [']\n");
1256: return;
1257: }
1258: if ((fcode = flookup(dictionary, token->text)) == NULL) {
1259: (void)printf( "[']: %s not found\n", token->text);
1260: exit(1);
1261: }
1262: spit(fcode->num);
1263: break;
1264: case TOK_ASCII:
1265: STATE(token->text, "TOK_ASCII");
1266: token = yylex();
1267: if (token == NULL) {
1268: (void)printf( "EOF after \"ascii\"\n");
1269: exit(1);
1270: }
1271: emit("b(lit)");
1272: spit(0);
1273: spit(0);
1274: spit(0);
1275: spit(token->text[0]);
1276: break;
1277: case TOK_BEGIN:
1278: STATE(token->text, "TOK_BEGIN");
1279: emit("b(<mark)");
1280: push(outpos);
1281: break;
1282: case TOK_BUFFER:
1283: STATE(token->text, "TOK_BUFFER");
1284:
1285: token = yylex();
1286: if (token == NULL) {
1287: (void)printf( "EOF in colon definition\n");
1288: return;
1289: }
1290:
1291: /* Add new code to dictionary */
1292: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1293: if (fcode == NULL)
1294: (void)err(1, "out of memory");
1295:
1.1 jason 1296: fcode->num = nextfcode++;
1297: fcode->name = strdup(token->text);
1.5 deraadt 1298: if (fcode->name == NULL)
1299: (void)err(1, "out of memory");
1300:
1.1 jason 1301: fadd(dictionary, fcode);
1302:
1303: if (state == 0)
1304: emit("new-token");
1305: else {
1306: if (state == TOK_EXTERNAL)
1307: emit("external-token");
1308: else
1309: /* Here we have a choice of new-token or named-token */
1310: emit("named-token");
1311: sspit(token->text);
1312: }
1313: spit(fcode->num);
1314: emit("b(buffer:)");
1315: break;
1316: case TOK_CASE:
1317: STATE(token->text, "TOK_CASE");
1318: emit("b(case)");
1319: push(0);
1320: break;
1321: case TOK_CONSTANT:
1322: STATE(token->text, "TOK_CONSTANT");
1323:
1324: token = yylex();
1325: if (token == NULL) {
1326: (void)printf( "EOF in constant definition\n");
1327: return;
1328: }
1329:
1330: /* Add new code to dictionary */
1331: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1332: if (fcode == NULL)
1333: (void)err(1, "out of memory");
1.1 jason 1334: fcode->num = nextfcode++;
1335: fcode->name = strdup(token->text);
1.5 deraadt 1336: if (fcode->name == NULL)
1337: (void)err(1, "out of memory");
1338:
1.1 jason 1339: fadd(dictionary, fcode);
1340:
1341: if (state == 0)
1342: emit("new-token");
1343: else {
1344: if (state == TOK_EXTERNAL)
1345: emit("external-token");
1346: else
1347: /* Here we have a choice of new-token or named-token */
1348: emit("named-token");
1349: sspit(token->text);
1350: }
1351: spit(fcode->num);
1352: emit("b(constant)");
1353: break;
1354: case TOK_CONTROL:
1355: STATE(token->text, "TOK_CONTROL");
1356: token = yylex();
1357: if (token == NULL) {
1358: (void)printf( "EOF after \"ascii\"\n");
1359: exit(1);
1360: }
1361: emit("b(lit)");
1362: spit(0);
1363: spit(0);
1364: spit(0);
1365: spit(token->text[0]&0x1f);
1366: break;
1367: case TOK_CREATE:
1368: STATE(token->text, "TOK_CREATE");
1369: /* Don't know what this does or if it's right */
1370: token = yylex();
1371: if (token == NULL) {
1372: (void)printf( "EOF in create definition\n");
1373: return;
1374: }
1375:
1376: /* Add new code to dictionary */
1377: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1378: if (fcode == NULL)
1379: err(1, "out of memory");
1380:
1.1 jason 1381: fcode->num = nextfcode++;
1382: fcode->name = strdup(token->text);
1.5 deraadt 1383: if (fcode->name == NULL)
1384: (void)err(1, "out of memory");
1385:
1.1 jason 1386: fadd(dictionary, fcode);
1387:
1388: if (state == 0)
1389: emit("new-token");
1390: else {
1391: if (state == TOK_EXTERNAL)
1392: emit("external-token");
1393: else
1394: /* Here we have a choice of new-token or named-token */
1395: emit("named-token");
1396: sspit(token->text);
1397: }
1398: spit(fcode->num);
1399: emit("b(create)");
1400: break;
1401: case TOK_DECIMAL:
1402: STATE(token->text, "TOK_DECIMAL");
1403: if (token->text[1] != '#') {
1404: if (defining) {
1405: spit(10);
1406: emit("base");
1407: emit("!");
1408: } else
1409: base = TOK_DECIMAL;
1410: } else {
1411: char *end;
1412: Cell value;
1413:
1414: token = yylex();
1415: if (token == NULL) {
1416: (void)printf( "EOF after d#\n");
1417: return;
1418: }
1419: if (token->type == TOK_OTHER) {
1420: if (strcmp("-1", token->text) == 0) {
1421: emit(token->text);
1422: break;
1423: }
1424: }
1425: value = strtol(token->text, &end, 10);
1426: if (*end != 0)
1427: token_err(yylineno, infile, NULL,
1428: "Illegal number conversion: %s", token->text);
1429:
1430: /*
1431: * If this is a 64-bit value we need to store two literals
1432: * and issue a `lxjoin' to combine them. But that's a future
1433: * project.
1434: */
1435: emit("b(lit)");
1436: spit(value>>24);
1437: spit((value>>16)&0x0ff);
1438: spit((value>>8)&0x0ff);
1439: spit(value&0x0ff);
1440: }
1441: break;
1442: case TOK_DEFER:
1443: STATE(token->text, "TOK_DEFER");
1444: /* Don't know what this does or if it's right */
1445: token = yylex();
1446: if (token == NULL) {
1447: (void)printf( "EOF in colon definition\n");
1448: return;
1449: }
1450:
1451: /* Add new code to dictionary */
1452: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1453: if (fcode == NULL)
1454: (void)err(1, "out of memory");
1455:
1.1 jason 1456: fcode->num = nextfcode++;
1457: fcode->name = strdup(token->text);
1.5 deraadt 1458: if (fcode->name == NULL)
1459: (void)err(1, "out of memory");
1460:
1.1 jason 1461: fadd(dictionary, fcode);
1462:
1463: if (state == 0)
1464: emit("new-token");
1465: else {
1466: if (state == TOK_EXTERNAL)
1467: emit("external-token");
1468: else
1469: /* Here we have a choice of new-token or named-token */
1470: emit("named-token");
1471: sspit(token->text);
1472: }
1473: spit(fcode->num);
1474: emit("b(defer)");
1475: break;
1476: case TOK_DO:
1477: STATE(token->text, "TOK_DO");
1478: /*
1479: * From the 1275 spec. B is branch location, T is branch target.
1480: *
1481: * b(do) offset1 ... b(loop) offset2 ...
1482: * b(do) offset1 ... b(+loop) offset2 ...
1483: * b(?do) offset1 ... b(loop) offset2 ...
1484: * b(?do) offset1 ... b(+loop) offset2 ...
1485: * ^ ^
1486: * B1 ^ ^ T1
1487: * T2 B2
1488: *
1489: * How we do this is we generate the b(do) or b(?do), spit out a
1490: * zero offset while remembering b1 and t2. Then we call tokenize()
1491: * to generate the body. When tokenize() finds a b(loop) or b(+loop),
1492: * it generates the FCode and returns, with outpos at b2. We then
1493: * calculate the offsets, put them in the right slots and finishup.
1494: */
1495:
1496: if (token->text[0] == '?')
1497: emit("b(?do)");
1498: else
1499: emit("b(do)");
1500: push(outpos);
1501: if (offsetsize == 16) {
1502: spit(0);
1503: }
1504: spit(0); /* Place holder for later */
1505: push(outpos);
1506: break;
1507: case TOK_ELSE:
1508: STATE(token->text, "TOK_ELSE");
1509: /* Get where we need to patch */
1510: off = pop();
1511: emit("bbranch");
1512: /* Save where we are now. */
1513: push(outpos);
1514: if (offsetsize == 16) {
1515: spit(0); /* Place holder for later */
1516: }
1517: spit(0); /* Place holder for later */
1518: emit("b(>resolve)");
1519: /* Rewind and patch the if branch */
1520: pos = outpos;
1521: outpos = off;
1522: off = pos - off;
1523: if (offsetsize == 16) {
1524: spit(0); /* Place holder for later */
1525: }
1526: spit(0); /* Place holder for later */
1527: /* revert to the end */
1528: outpos = pos;
1529: break;
1530: case TOK_ENDCASE:
1531: STATE(token->text, "TOK_ENDCASE:");
1532: pos = outpos; /* Remember where we need to branch to */
1533:
1534: /* Thread our way backwards and install proper offsets */
1535: off = pop();
1536: while (off) {
1537: int tmp;
1538:
1539: /* Move to this offset */
1540: outpos = off;
1541: /* Load next offset to process */
1542: tmp = outbuf[outpos];
1543:
1544: /* process this offset */
1545: off = pos - outpos;
1546: if (offsetsize == 16) {
1547: spit((off>>8)&0xff);
1548: }
1549: spit(off&0xff);
1550: off = tmp;
1551: }
1552: outpos = pos;
1553: emit("b(endcase)");
1554: break;
1555: case TOK_ENDOF:
1556: STATE(token->text, "TOK_ENDOF");
1557: off = pop();
1558: emit("b(endof)");
1559: /*
1560: * Save back pointer in the offset field so we can traverse
1561: * the linked list and patch it in the endcase.
1562: */
1563: pos = pop(); /* get position of prev link. */
1564: push(outpos); /* save position of this link. */
1.7 otto 1565: spit(pos); /* save position of prev link. */
1.1 jason 1566: if (offsetsize == 16) {
1567: spit(0);
1568: }
1569: pos = outpos;
1570: /* Now point the offset from b(of) here. */
1571: outpos = off;
1572: off = outpos - off;
1573: if (offsetsize == 16) {
1574: spit((off>>8)&0xff);
1575: }
1576: spit(off&0xff);
1577: /* Restore position */
1578: outpos = pos;
1579: break;
1580: case TOK_EXTERNAL:
1581: STATE(token->text, "TOK_EXTERNAL");
1582: state = TOK_EXTERNAL;
1583: break;
1584: case TOK_FIELD:
1585: STATE(token->text, "TOK_FIELD");
1586:
1587: token = yylex();
1588: if (token == NULL) {
1589: (void)printf( "EOF in field definition\n");
1590: return;
1591: }
1592:
1593: /* Add new code to dictionary */
1594: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1595: if (fcode == NULL)
1596: (void)err(1, "out of memory");
1597:
1.1 jason 1598: fcode->num = nextfcode++;
1599: fcode->name = strdup(token->text);
1.5 deraadt 1600: if (fcode->name == NULL)
1601: (void)err(1, "out of memory");
1602:
1.1 jason 1603: fadd(dictionary, fcode);
1604:
1605: if (state == 0)
1606: emit("new-token");
1607: else {
1608: if (state == TOK_EXTERNAL)
1609: emit("external-token");
1610: else
1611: /* Here we have a choice of new-token or named-token */
1612: emit("named-token");
1613: sspit(token->text);
1614: }
1615: spit(fcode->num);
1616: emit("b(field)");
1617: break;
1618:
1619: case TOK_HEX:
1620: STATE(token->text, "TOK_HEX");
1621: if (token->text[1] != '#') {
1622: if (defining) {
1623: spit(16);
1624: emit("base");
1625: emit("!");
1626: } else
1627: base = TOK_HEX;
1628: } else {
1629: char *end;
1630: Cell value;
1631:
1632: token = yylex();
1633: if (token == NULL) {
1634: (void)printf( "EOF after h#\n");
1635: return;
1636: }
1637: value = strtol(token->text, &end, 16);
1638: if (*end != 0) {
1639: (void)printf("Illegal number conversion:%s:%d: %s\n",
1640: infile, yylineno, yytext);
1641: exit(1);
1642: }
1643: /*
1644: * If this is a 64-bit value we need to store two literals
1645: * and issue a `lxjoin' to combine them. But that's a future
1646: * project.
1647: */
1648: emit("b(lit)");
1649: spit(value>>24);
1650: spit((value>>16)&0x0ff);
1651: spit((value>>8)&0x0ff);
1652: spit(value&0x0ff);
1653: }
1654: break;
1655: case TOK_HEADERLESS:
1656: STATE(token->text, "TOK_HEADERLESS");
1657: state = 0;
1658: break;
1659: case TOK_HEADERS:
1660: STATE(token->text, "TOK_HEADERS");
1661: state = TOK_HEADERS;
1662: break;
1663: case TOK_OFFSET16:
1664: STATE(token->text, "TOK_OFFSET16");
1665: offsetsize = 16;
1666: emit("offset16");
1667: break;
1668: case TOK_IF:
1669: STATE(token->text, "TOK_IF");
1670: /*
1671: * Similar to do but simpler since we only deal w/one branch.
1672: */
1673: emit("b?branch");
1674: push(outpos);
1675: if (offsetsize == 16) {
1676: spit(0); /* Place holder for later */
1677: }
1678: spit(0); /* Place holder for later */
1679: break;
1680: case TOK_LEAVE:
1681: STATE(token->text, "TOK_LEAVE");
1682: emit("b(leave)");
1683: break;
1684: case TOK_LOOP:
1685: STATE(token->text, "TOK_LOOP");
1686:
1687: if (token->text[0] == '+')
1688: emit("b(+loop)");
1689: else
1690: emit("b(loop)");
1691: /* First do backwards branch of loop */
1692: pos = pop();
1693: off = pos - outpos;
1694: if (offsetsize == 16) {
1695: spit((off>>8)&0xff);
1696: }
1697: spit(off&0xff);
1698: /* Now do forward branch of do */
1699: pos = outpos;
1700: outpos = pop();
1701: off = pos - outpos;
1702: if (offsetsize == 16) {
1703: spit((off>>8)&0xff);
1704: }
1705: spit(off&0xff);
1706: /* Restore output position */
1707: outpos = pos;
1708: break;
1709: case TOK_OCTAL:
1710: STATE(token->text, "TOK_OCTAL");
1711: if (token->text[1] != '#') {
1712: if (defining) {
1713: spit(16);
1714: emit("base");
1715: emit("!");
1716: } else
1717: base = TOK_OCTAL;
1718: } else {
1719: char *end;
1720: Cell value;
1721:
1722: token = yylex();
1723: if (token == NULL) {
1724: (void)printf( "EOF after o#\n");
1725: return;
1726: }
1727: value = strtol(token->text, &end, 8);
1728: if (*end != 0) {
1729: (void)printf("Illegal number conversion:%s:%d: %s\n",
1730: infile, yylineno, yytext);
1731: exit(1);
1732: }
1733: /*
1734: * If this is a 64-bit value we need to store two literals
1735: * and issue a `lxjoin' to combine them. But that's a future
1736: * project.
1737: */
1738: emit("b(lit)");
1739: spit(value>>24);
1740: spit((value>>16)&0x0ff);
1741: spit((value>>8)&0x0ff);
1742: spit(value&0x0ff);
1743: }
1744: break;
1745: case TOK_OF:
1746: STATE(token->text, "TOK_OF");
1747: /*
1748: * Let's hope I get the semantics right.
1749: *
1750: * The `of' behaves almost the same as an
1751: * `if'. The difference is that `endof'
1752: * takes a branch offset to the associated
1753: * `endcase'. Here we will generate a temporary
1754: * offset of the `of' associated with the `endof'.
1755: * Then in `endcase' we should be pointing just
1756: * after the offset of the last `endof' so we
1757: * calculate the offset and thread our way backwards
1758: * searching for the previous `b(case)' or `b(endof)'.
1759: */
1760: emit("b(of)");
1761: push(outpos);
1762: if (offsetsize == 16) {
1763: spit(0);
1764: }
1765: spit(0); /* Place holder for later */
1766: break;
1767: case TOK_REPEAT:
1768: STATE(token->text, "TOK_REPEAT");
1769: emit("bbranch");
1770: pos = pop();
1771: off = pop();
1772: /* First the offset for the branch back to the begin */
1773: off -= outpos;
1774: if (offsetsize == 16) {
1775: spit((off>>8)&0xff);
1776: }
1777: spit(off&0xff);
1778: emit("b(>resolve)");
1779: /* Now point the offset of the while here. */
1780: off = outpos;
1781: outpos = pos;
1782: pos = off - pos;
1783: if (offsetsize == 16) {
1784: spit((pos>>8)&0xff);
1785: }
1786: spit(pos&0xff);
1787: /* Return to the end of the output */
1788: outpos = off;
1789: break;
1790: case TOK_THEN:
1791: STATE(token->text, "TOK_THEN");
1792: emit("b(>resolve)");
1793: pos = outpos;
1794: outpos = pop();
1795: off = pos - outpos;
1796: if (offsetsize == 16) {
1797: spit((off>>8)&0xff);
1798: }
1799: spit(off&0xff);
1800: outpos = pos;
1801: break;
1802: case TOK_TO:
1803: STATE(token->text, "TOK_TO");
1804: /* The next pass should tokenize the FCODE number */
1805: emit("b(to)");
1806: break;
1807: case TOK_UNTIL:
1808: STATE(token->text, "TOK_UNTIL");
1809: {
1810: int pos;
1811:
1812: emit("b?branch");
1813: pos = pop();
1814: pos -= outpos;
1815: if (offsetsize == 16) {
1816: spit((pos>>8)&0xff);
1817: }
1818: spit(pos&0xff);
1819: }
1820: break;
1821: case TOK_VALUE:
1822: STATE(token->text, "TOK_VALUE");
1823:
1824: token = yylex();
1825: if (token == NULL) {
1826: (void)printf( "EOF in value definition\n");
1827: return;
1828: }
1829:
1830: /* Add new code to dictionary */
1831: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1832: if (fcode == NULL)
1833: (void)err(1, "out of memory");
1834:
1.1 jason 1835: fcode->num = nextfcode++;
1836: fcode->name = strdup(token->text);
1.5 deraadt 1837: if (fcode->name == NULL)
1838: (void)err(1, "out of memory");
1839:
1.1 jason 1840: fadd(dictionary, fcode);
1841:
1842: if (state == 0)
1843: emit("new-token");
1844: else {
1845: if (state == TOK_EXTERNAL)
1846: emit("external-token");
1847: else
1848: /* Here we have a choice of new-token or named-token */
1849: emit("named-token");
1850: sspit(token->text);
1851: }
1852: spit(fcode->num);
1853: emit("b(value)");
1854: break;
1855: case TOK_VARIABLE:
1856: STATE(token->text, "TOK_VARIABLE");
1857:
1858: token = yylex();
1859: if (token == NULL) {
1860: (void)printf( "EOF in variable definition\n");
1861: return;
1862: }
1863:
1864: /* Add new code to dictionary */
1865: fcode = malloc(sizeof(*fcode));
1.5 deraadt 1866: if (fcode == NULL)
1867: (void)err(1, "out of memory");
1868:
1.1 jason 1869: fcode->num = nextfcode++;
1870: fcode->name = strdup(token->text);
1.5 deraadt 1871: if (fcode->name == NULL)
1872: (void)err(1, "out of memory");
1873:
1.1 jason 1874: fadd(dictionary, fcode);
1875:
1876: if (state == 0)
1877: emit("new-token");
1878: else {
1879: if (state == TOK_EXTERNAL)
1880: emit("external-token");
1881: else
1882: /* Here we have a choice of new-token or named-token */
1883: emit("named-token");
1884: sspit(token->text);
1885: }
1886: spit(fcode->num);
1887: emit("b(variable)");
1888: break;
1889: case TOK_WHILE:
1890: STATE(token->text, "TOK_WHILE");
1891: emit("b?branch");
1892: push(outpos);
1893: if (offsetsize == 16) {
1894: spit(0);
1895: }
1896: spit(0);
1897: break;
1898:
1899: /* Tokenizer directives */
1900: case TOK_BEGTOK:
1901: STATE(token->text, "TOK_BEGTOK");
1902: tokenizer = 1;
1903: break;
1904: case TOK_EMIT_BYTE:
1905: STATE(token->text, "TOK_EMIT_BYTE");
1906: spit(pop());
1907: break;
1908: case TOK_ENDTOK:
1909: STATE(token->text, "TOK_ENDTOK");
1910: tokenizer = 0;
1911: break;
1912: case TOK_FLOAD:
1913: STATE(token->text, "TOK_FLOAD");
1914: /* Parse a different file for a while */
1915: token = yylex();
1916: if ((inf = fopen(token->text, "r")) == NULL) {
1917: (void)printf("%s: Could not open %s: %s\n",
1918: myname, token->text, strerror(errno));
1919: break;
1920: }
1921: inbuf = yy_create_buffer(inf, YY_BUF_SIZE);
1922: yy_switch_to_buffer(inbuf);
1923: {
1924: char *oldinfile = infile;
1925:
1926: infile = token->text;
1927: tokenize(inbuf);
1928: infile = oldinfile;
1929: }
1930: yy_switch_to_buffer(input);
1931: yy_delete_buffer(inbuf);
1932: fclose(inf);
1933: break;
1934: case TOK_OTHER:
1935: STATE(token->text, "TOK_OTHER");
1936: if (apply_macros(input, token->text))
1937: break;
1938: if (emit(token->text)) {
1939: #if 0
1940: /*
1941: * Call an external command
1942: *
1943: * XXXXX assumes it will always find the command
1944: */
1945: sspit(token->text);
1946: emit("$find");
1947: emit("drop");
1948: emit("execute");
1949: #else
1950: (void)printf( "%s: undefined token `%s'\n",
1951: myname, token->text);
1952: fflush(stderr);
1953: exit(1);
1954: #endif
1955: }
1956: break;
1957: default:
1.6 millert 1958: break;
1.1 jason 1959: }
1960: }
1961: return;
1962: }
1963:
1964: /*
1965: * print a tokenizer error message
1966: */
1967: void
1968: token_err(int lineno, char *infile, char *text, char *fmt, ...)
1969: {
1970: va_list ap;
1971:
1972: va_start(ap, fmt);
1973: if (infile)
1974: (void)fprintf(stderr, "%s:%d: ", infile, lineno);
1975: if (fmt)
1976: (void)vfprintf(stderr, fmt, ap);
1977: fputc('\n', stderr);
1978: if (text)
1979: fprintf(stderr, "\t%s", text);
1980: va_end(ap);
1981: exit(1);
1982: }
1983:
1984: /*
1985: * Lookup fcode string in dictionary and spit it out.
1986: *
1987: * Fcode must be in dictionary. No alias conversion done.
1988: */
1989: int
1990: emit(str)
1991: char *str;
1992: {
1993: struct fcode *code;
1994: if ((code = flookup( dictionary, str)))
1995: spit(code->num);
1996: #ifdef DEBUG
1997: if (debug > 1) {
1998: if (code)
1999: (void)printf( "emitting `%s'\n", code->name);
2000: else
2001: (void)printf( "emit: not found `%s'\n", str);
2002: }
2003: #endif
2004: return (code == NULL);
2005: }
2006:
2007: /*
2008: * Spit out an integral value as a series of FCodes.
2009: *
2010: * It will spit out one zero byte or as many bytes as are
2011: * non-zero.
2012: */
2013: int
2014: spit(n)
2015: long n;
2016: {
2017: int count = 1;
2018:
2019: if (n >> 8)
2020: count += spit(n >> 8);
2021: if (outpos >= outbufsiz) {
2022: while (outpos >= outbufsiz) outbufsiz += BUFCLICK;
2023: if (!(outbuf = realloc(outbuf, outbufsiz))) {
2024: (void)printf( "realloc of %ld bytes failed -- out of memory\n",
2025: (long)outbufsiz);
2026: exit(1);
2027: }
2028: }
2029: outbuf[outpos++] = n;
2030: return (count);
2031: }
2032:
2033: /*
2034: * Spit out an FCode string.
2035: */
2036: void
2037: sspit(s)
2038: char *s;
2039: {
2040: int len = strlen(s);
2041:
2042: if (len > 255) {
2043: (void)printf( "string length %d too long\n", len);
2044: return;
2045: }
2046: #ifdef DEBUG
2047: if (debug > 1)
2048: (void)printf( "sspit: len %d str `%s'\n", len, s);
2049: #endif
2050: spit(len);
2051: while (*s)
2052: spit(*s++);
2053: }
2054:
2055: int
2056: yywrap()
2057: {
2058: /* Always generate EOF */
2059: return (1);
2060: }