1    | /***************************************
2    |   $Revision: 1.30 $
3    | 
4    |   Radix tree (rx).  rx_search.c - functions to search nodes of the tree
5    | 
6    |   Status: NOT REVUED, TESTED, COMPLETE
7    | 
8    |   Design and implementation by: Marek Bukowy
9    | 
10   |   ******************/ /******************
11   |   Copyright (c) 1999                              RIPE NCC
12   |  
13   |   All Rights Reserved
14   |   
15   |   Permission to use, copy, modify, and distribute this software and its
16   |   documentation for any purpose and without fee is hereby granted,
17   |   provided that the above copyright notice appear in all copies and that
18   |   both that copyright notice and this permission notice appear in
19   |   supporting documentation, and that the name of the author not be
20   |   used in advertising or publicity pertaining to distribution of the
21   |   software without specific, written prior permission.
22   |   
23   |   THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
24   |   ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS; IN NO EVENT SHALL
25   |   AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
26   |   DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
27   |   AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
28   |   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
29   |   ***************************************/
30   | 
31   | 
32   | #include <erroutines.h>
33   | #include <rxroutines.h>
34   | #include <stubs.h>
35   | 
36   | #include "iproutines.h"
37   | 
38   | /***************************************************************************/
39   | 
40   | /*++++++++++++++
41   | Descends the given tree following the last prefix bit to get [past]
42   | the node with the given prefix.
43   | It fills up a stack of COPIES of nodes, including glue nodes.
44   | 
45   | Then it also sets the number of elements on the stack: 
46   | set maxdepth to the position where a next one would be written
47   | ( = last + 1, or number of nodes pushed)
48   | 
49   |    The dmodes:
50   |            
51   |    RX_STK_QUERY_NOGLUE = (search exact/less spec) stop when 
52   |                          * the current prefix length >= newprefix length 
53   |                          * the current prefix does not match anymore
54   |                          * do not add glue nodes
55   |                         
56   |    RX_STK_QUERY_ALLNOD = as above, except that the glue and data nodes are
57   |                          treated equally (i.e. glue nodes are not skipped)
58   | 
59   |    RX_STK_CREAT        = descend until the next non-glue node past the one found
60   |                          in exact mode (for creation)
61   | 
62   | ++++++++++++++*/
63   | 
64   | er_ret_t
65   | rx_build_stack(rx_nodcpy_t    stack[], 
66   |                int            *maxdepth, 
67   |                rx_tree_t      *tree, 
68   |                ip_prefix_t    *newpref,
69   |                rx_stk_mt      dmode
70   |                )
71   | {
72   |   register rx_node_t *curnode;
73   |   register int link, quit_now=0;
74   |   register int tracedet = ER_is_traced( FAC_RX, ASP_RX_STKBLD_DET);
75   |   char bbf[IP_PREFSTR_MAX];
76   | 
77   |   if( ER_is_traced( FAC_RX, ASP_RX_STKBLD_GEN)) {
78   |     IP_pref_b2a( newpref , bbf, IP_PREFSTR_MAX);
79   |     ER_dbg_va(FAC_RX, ASP_RX_STKBLD_GEN, 
80   |             "rx_build_stack: searching for %s in mode %d", bbf, dmode);
81   |   }
82   | 
83   |   *maxdepth = 0;
84   |   
85   |   if ( tree -> num_nodes == 0) { 
86   |     /* The tree was empty. */
87   |     return RX_OK;
88   |   }
89   |   
90   |   curnode = tree->top_ptr;
91   |   /* this works for RAM, for SQL one would have to call a 'getsqlnode' here*/
92   |   
93   |   /* OK, there is at least one node. Descend the tree */
94   |   /* as long as the correct bit length is not exceeded*/
95   |   /* or a glue is being found (take the last non-glue node then) */
96   |   /* or you run out of nodes in the direction of descending*/
97   |   
98   |   do {
99   |     /* check at the current node, where the one we look for would fit*/
100  |     /* (the second argument of IP_addr_bit_get starts with 0,*/
101  |     /* so this effectively looks at the bit next to the last significant bit*/
102  |     /* of the current node*/
103  |     
104  |     link = IP_addr_bit_get( & newpref->ip, curnode->prefix.bits );    
105  |     
106  |     /* check conditions for leaving the loop    */
107  |     if(curnode->child_ptr[link] == NULL) {
108  |       /* end of branch. quit after adding the current node to the stack*/
109  |       /* (or before - subject to bit test in QUERY mode)*/
110  |       quit_now = 1;
111  |     }
112  |     else {
113  |     /* check the node. 
114  |        BIG DIFFERENCE between the modes:
115  |        in CREAT we don't mind the stack to go too deep, 
116  |        in QUERY it can lead to false answers
117  |        (e.g. a /24 is found for a /23 query). 
118  | 
119  |        So this must be "peeled off the stack" later in the search routine,
120  |        if both types of stack are to work properly with query searches.
121  |     */
122  | 
123  | 
124  |       if( curnode->prefix.bits > newpref->bits ) {
125  |         /* deep enough.*/
126  |         quit_now = 2;
127  |       }
128  | 
129  |       if(dmode == RX_STK_CREAT && curnode->glue) {
130  |         /* mode: creation. */
131  |         /* Cancel quitting if glue -- in CREAT mode the stack building */
132  |         /* should stop at the next real (non-glue) node.*/
133  |         /* ("next" meaning following link #0)*/
134  |         quit_now = 0;
135  |       }
136  |     }
137  |     
138  |     /* now that the conditions for leaving the loop after the node is
139  |        added on the stack, see if we shouldn't leave the loop BEFOREHAND */
140  |     
141  |     /* In query mode, we should quit as soon as we see a mismatch */
142  | 
143  |     if(dmode != RX_STK_CREAT
144  |        && 0 != IP_addr_cmp(&curnode->prefix.ip, &newpref->ip, 
145  |                            curnode->prefix.bits) ) {
146  |         /*QUIT NOW! (but add this node)*/
147  |       quit_now = 4;
148  |     }
149  | 
150  |     /* push the current node on the stack. RAM only.*/
151  |     /* */
152  |     /* (unless quit_now is 64 which means do NOT copy the current node.*/
153  |     /**/
154  |     /* In CREAT and QUERY_ALLNOD modes, push everything. */
155  |     /* In QUERY_NOGLUE mode, only non-glues.*/
156  |       
157  |     if( /* quit_now < 64 &&           disabled as 64 is not in use right now */
158  |        (dmode != RX_STK_QUERY_NOGLUE || curnode->glue == 0 )) {
159  | 	memcpy( & stack[*maxdepth].cpy, curnode, sizeof(rx_node_t));
160  | 	stack[*maxdepth].srcptr = curnode;
161  | 	stack[*maxdepth].srckey = SQ_NOKEY;
162  | 	stack[*maxdepth].tree = tree;
163  | 	(*maxdepth)++;
164  |     }
165  |     
166  |     /* make debug info.*/
167  |    
168  |     if( tracedet ) {
169  |       IP_pref_b2a( & curnode->prefix , bbf, IP_PREFSTR_MAX );
170  |       ER_dbg_va(FAC_RX, ASP_RX_STKBLD_DET,
171  |                 "rx_build_stack: %s%d at %s%s (stk len: %d)",
172  |                 quit_now ? "stop/" : "link ",  
173  |                 quit_now ? quit_now : link,
174  |                 bbf, ( curnode->glue ) ? " ++glue++" : "",
175  |                 *maxdepth  );
176  |     }
177  |     
178  |     curnode = curnode -> child_ptr[link];
179  | 
180  |   } while( !quit_now ); 
181  | 
182  |   return RX_OK;
183  | }
184  | 
185  | /***************************************************************************/
186  | /*+++++++++
187  |    helper for the nod_search routine: 
188  | 
189  |    allocate a new node copy struct, copy the struct and add to nodlist
190  | ++++++++++*/
191  | 
192  | static
193  | er_ret_t
194  | rx_nod_append( GList **nodlist, rx_nodcpy_t *element) 
195  | {
196  |   rx_nodcpy_t *newcpy;
197  |   er_ret_t err;
198  |   
199  |   if( (err=wr_calloc( (void **) & newcpy, 1, sizeof(rx_nodcpy_t))) != UT_OK) {
200  |     return err; /*    die;*/
201  |   }
202  |   memcpy(newcpy, element, sizeof(rx_nodcpy_t));        
203  |   (*nodlist) = g_list_prepend( *nodlist, newcpy );
204  | 
205  |   return RX_OK;
206  | }
207  | 
208  | 
209  | 
210  | 
211  | /***************************************************************************/
212  | 
213  | /*+++++++++++
214  |   helper for MORE specific lookup in rx_nod_search 
215  | 
216  |   adds a node to the list of answers.
217  | +++++++++++*/
218  | 
219  | static
220  | er_ret_t
221  | rx_walk_hook_addnode(rx_node_t *node, int level, int nodecounter, 
222  |                      void *userptr)
223  | {   
224  |   rx_nodcpy_t nodcpy;
225  |   hook_addnode_userdat_t *userdat = userptr;
226  |   
227  | 
228  |   /* do not append glue nodes*/
229  |   if( node->glue == 1 ) return RX_OK;
230  |   
231  |   /* in RAM mode, do not copy the node.*/
232  |   /*  memcpy( &nodcpy.cpy, node, sizeof(rx_node_t));*/
233  | 
234  |   /* XXX reset to 0 to avoid warnings from workshop: but it 
235  |      slows things down! */
236  |   memset( &nodcpy.cpy, 0, sizeof(rx_node_t));
237  | 
238  |   nodcpy.srcptr = node;
239  |   nodcpy.srckey = SQ_NOKEY;
240  |   nodcpy.tree = userdat->tree;
241  |   
242  |   return rx_nod_append( userdat->nodlist, &nodcpy);
243  | }
244  | 
245  | 
246  | /***************************************************************************/
247  | 
248  | /*+++++++++++
249  |   helper for DBLS lookup in rx_nod_search 
250  | 
251  |   adds a node to the list of answers.
252  | +++++++++++*/
253  | 
254  | static
255  | er_ret_t
256  | rx_walk_hook_adddoubles(rx_node_t *node, int level, int nodecounter, 
257  |                         void *userptr)
258  | {
259  |   rx_nodcpy_t nodcpy;
260  |   hook_addnode_userdat_t  *userdat = userptr;
261  |   int leaves = g_list_length(node->leaves_ptr);
262  |   char buf[1024];
263  |   
264  |   /* do not append glue nodes*/
265  |   if( node->glue == 1 ) return RX_OK;
266  | 
267  |  
268  |   /* add only nodes with more than 1 dataleaf*/
269  |   if( leaves < 2 ) return RX_OK;
270  | 
271  |   if( ER_is_traced( FAC_RX, ASP_RX_SRCH_DET)) {
272  |     rx_nod_print(node, buf, 1024);
273  |     ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
274  |               "rx_walk_hook_adddoubles: %30s, %d leaves", buf, leaves);
275  |   }
276  | 
277  |   /*  memcpy( &nodcpy.cpy, node, sizeof(rx_node_t));*/
278  |   nodcpy.srcptr = node;
279  |   nodcpy.srckey = SQ_NOKEY;
280  |   nodcpy.tree = userdat->tree;
281  |   
282  |   return rx_nod_append( userdat->nodlist, &nodcpy);
283  | }
284  | 
285  | 
286  | /***************************************************************************/
287  | er_ret_t
288  | rx_nod_search (
289  |                rx_srch_mt  search_mode,
290  |                int         par_a,
291  |                int         par_b,
292  |                /* see rx_asc_search() for explanation */
293  |                rx_tree_t  *tree,           /* tree ptr*/
294  |                ip_prefix_t  *prefix,          /* binary prefix*/
295  | 
296  |                rx_nodcpy_t stack[],         /* stack==array of node_copies*/
297  |                int         stackcount,      /* number of element on the stack,*/
298  |                                             /* can come from a creat stack!*/
299  | 
300  |                GList       **nodlist,       /* answers go here*/
301  |                int         max_count        /* max # of answers*/
302  |                )                        
303  |      /*
304  |         searches the stack for a given prefix, finds *nodes* in the stack 
305  |         and appends *copies of the nodes* to the nodlist;
306  | 
307  |         finds
308  |         0 or 1 nodes for exact search
309  |         0 or 1 nodes for exless (0 if no less specific node found)
310  |         any number (incl. 0) for {more|less}^n-m specific 
311  |      
312  |        returns errcode.
313  | 
314  |        
315  |      */
316  | {
317  |   char buf[1024];
318  |   int sps = stackcount-1;       /* stack position.*/
319  |   int depthcounter=0;
320  |   er_ret_t err=RX_OK;
321  |   int i;
322  |   hook_addnode_userdat_t datstr;
323  |   er_ret_t (*hook_function)();  /* pointer to the walk_hook function*/
324  |                                 /* (see MORE spec lookup)*/
325  | 
326  |   /* structure for carrying data to walk_tree hook functions, used only
327  |      in MORE, DBLS and RANG search modes 
328  |   */
329  |   datstr.nodlist = nodlist;
330  |   datstr.tree    = tree;
331  |   datstr.prefix  = prefix;
332  |     
333  |   
334  |   if( ER_is_traced( FAC_RX, ASP_RX_SRCH_BOT)) {
335  |     IP_pref_b2a( prefix , buf, IP_PREFSTR_MAX);
336  |     ER_dbg_va(FAC_RX, ASP_RX_SRCH_BOT,
337  |               "rx_nod_search: searching for %s in mode %d", buf, search_mode);
338  |   }
339  | 
340  |   /* in non-CREAT modes, glue nodes are skipped anyway. 
341  |      (they should normally not be there if the stack was created in
342  |      the STK_QUERY mode, but it's possible to use a CREAT stack too).
343  | 
344  |      It's also possible that the stack is too deep.
345  |      So, truncate the stack to the last non-glue node 
346  |      of the length <= search term.
347  |      otherwise a /24 would be returned for a /23 query.
348  |      
349  |      For LESS SPECIFIC searches one has to peel off entries
350  |      whose prefixes do not contain the search term, 
351  |   */
352  | 
353  |   if( search_mode != RX_SRCH_CREAT ) {
354  |     
355  |     while( sps >= 0 ) {
356  |       char *reason = NULL;
357  | 
358  |       if( stack[sps].cpy.prefix.bits > prefix->bits ) {            /* too deep*/ 
359  | 	reason = "2deep";
360  |       } 
361  |       else if( 0 != IP_addr_cmp(& stack[sps].cpy.prefix.ip, &prefix->ip, 
362  | 				stack[sps].cpy.prefix.bits) ) {   /* mismatch */
363  | 	reason = "mismatch";
364  |       }
365  |       else if ( search_mode != RX_SRCH_MORE && search_mode != RX_SRCH_DBLS 
366  | 		&& search_mode != RX_SRCH_RANG && stack[sps].cpy.glue == 1 ) {                    /* is glue*/
367  | 	reason = "glue";
368  |       }	
369  |       else if ( search_mode == RX_SRCH_LESS
370  | 		&& stack[sps].cpy.prefix.bits == prefix->bits ) {  /* too deep*/
371  | 	reason = "2deep4less";
372  |       }	else {
373  | 	
374  | 	break;  /* stop peeling off */
375  |       }
376  |       
377  |       if( ER_is_traced( FAC_RX, ASP_RX_SRCH_DET)) {
378  | 	rx_nod_print( & stack[sps].cpy , buf, IP_PREFSTR_MAX);
379  | 	ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
380  | 		  "rx_nod_search: peeling off %d: %s (%s)", sps, buf, reason);
381  |       }
382  |       sps--;
383  |     }
384  |   }
385  |   
386  |   /* nothing left on the stack. Sorry.*/
387  |   /* we allow that for more spec search -- this means*/
388  |   /* that the search term is a shorter prefix than the one*/
389  |   /* in the top node. Possibly it's 0/0 which is valid for more spec search.*/
390  | 
391  |   if( search_mode != RX_SRCH_MORE && search_mode != RX_SRCH_DBLS 
392  |       && sps < 0 ) {       
393  |     return RX_OK;
394  |   }
395  |       
396  |   switch(search_mode) {
397  |   case RX_SRCH_EXACT:
398  |   case RX_SRCH_CREAT:
399  |     /* go up the tree (stack) and exit when the proper prefix is found.*/
400  |     /* For RX_SRCH_EXACT skip glue nodes, for RX_SRCH_CREAT take all.*/
401  |     /* They may contain a valid prefix, so watch out.*/
402  | 
403  |     while(sps >= 0) {
404  | 
405  |       if( ER_is_traced( FAC_RX, ASP_RX_SRCH_DET)) {
406  |         rx_nod_print(& stack[sps].cpy, buf, 1024);
407  |         ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
408  |                   "rx_nod_search: position %d: %s", sps, buf);
409  |       }
410  |       
411  |       if ( search_mode == RX_SRCH_EXACT 
412  |            && stack[sps].cpy.glue ) {
413  |         die;
414  |       }
415  |       
416  |       if ( memcmp( & stack[sps].cpy.prefix, 
417  |                    prefix, 
418  |                    sizeof(ip_prefix_t)) == 0 ) {
419  |         /* FOUND!!*/
420  |         /* add to the nodlist.*/
421  | 
422  |         if( (err=rx_nod_append( nodlist, & stack[sps])) != RX_OK ) {
423  |           return err;
424  |         }
425  | 
426  |         ER_dbg_va(FAC_RX, ASP_RX_SRCH_BOT, "rx_nod_search: found!");
427  |         break;
428  |       }
429  |       sps--;
430  |     }
431  |     break;
432  | 
433  |   case RX_SRCH_EXLESS:
434  |     /* just fetch the last element off the stack (if any). */
435  |     /* Must be non-glue for EXLESS.*/
436  | 
437  |     if( sps >= 0 ) {
438  |       rx_nod_append( nodlist, & stack[sps]); 
439  |     }
440  | 
441  |     /* else : nothing found.*/
442  |     /* For EXLESS: check if the stack contains only non-glue nodes.*/
443  |     /* If it contains a glue, it means it was created in the CREAT mode,*/
444  |     /* which renders the above algorithm absolutely useless. Then crash,*/
445  |     /* this is a programmer's error.*/
446  | 
447  |     while( sps >= 0 ) {
448  |       if( stack[sps].cpy.glue ) {
449  |         die;
450  |       }
451  |       sps--;
452  |     }
453  | 
454  |     break;
455  | 
456  |   case RX_SRCH_LESS:
457  |     while( sps >= 0 && depthcounter < par_a ) {
458  |       if( stack[sps].cpy.glue == 0 ) {
459  |         rx_nod_append( nodlist, & stack[sps]); 
460  |         depthcounter++;
461  |       }
462  |       sps--;
463  |     }
464  |     break;
465  | 
466  |   case RX_SRCH_MORE:
467  |   case RX_SRCH_DBLS:   /* special (debug?) mode : find nodes with multiple*/
468  |                        /* data leaves. Much like more specific, except that*/
469  |                        /* most nodes will be skipped.*/
470  |                        /* The difference is in calling another hook function*/
471  |     hook_function = ( search_mode == RX_SRCH_MORE )  
472  |       ? rx_walk_hook_addnode
473  |       : rx_walk_hook_adddoubles;
474  |     
475  |     /* the result of a more spec search should NOT contain the object exactly*/
476  |     /* matching the query, even if it exists in the database. So two walks are */
477  |     /* performed, one for each child (if it exists). */
478  |     /* MEMORY IMPLEMENTATION ONLY FOR THE MOMENT*/
479  | 
480  |     /* start from the top node if the searched prefix is between the 
481  |        top node and the first node on the stack (i.e. the first node is
482  |        contained within the search term) */
483  |     
484  |     /* COVERS THE CASE 0.0.0.0/0 */
485  |     /* or any other prefix that the tree might be set to represent,*/
486  |     /* but there is no actual object for it (not even glue)*/
487  | 
488  |     if( sps < 0 ) {
489  |       if( stackcount > 0             /* there is any node in the tree */  
490  | 	  && 0 == IP_addr_cmp( & prefix->ip, 
491  | 			       & stack[0].cpy.prefix.ip,
492  | 			       prefix->bits) ) {     /* addr match */
493  | 	rx_walk_tree( tree->top_ptr, hook_function,
494  | 		      RX_WALK_REVERS | RX_WALK_SKPGLU, /* skip glue nodes while counting*/
495  | 		      par_a, /* display this many levels */
496  | 		      0, 0, &datstr, &err);
497  | 	if( err != RX_OK ) {
498  | 	  return err;
499  | 	}
500  |       }
501  |     }
502  |     else { 
503  |       for( i = 1; i >= 0; i--) {
504  |         if( stack[sps].cpy.child_ptr[i] != NULL ) {
505  |           if( ER_is_traced( FAC_RX, ASP_RX_SRCH_DET)) {
506  |             IP_pref_b2a(& stack[sps].cpy.child_ptr[i]->prefix, buf, 1023);
507  |           }
508  | 
509  |           if( 0 == IP_addr_cmp( & stack[sps].cpy.child_ptr[i]->prefix.ip, 
510  |                                 & prefix->ip, 
511  |                                 prefix->bits) ) {
512  |             
513  |             ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
514  |                       "rx_nod_search: digging child %d: %s", i, buf);
515  |             
516  |             rx_walk_tree( stack[sps].cpy.child_ptr[i],  hook_function, 
517  |                           RX_WALK_REVERS | RX_WALK_SKPGLU, /* skip glue nodes while counting*/
518  |                           par_a, /* display this many levels */
519  |                           0, 0, &datstr, &err);
520  |             if( err != RX_OK ) {
521  |               return err;
522  |             }
523  |           }
524  |           else {
525  |             ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
526  |                       "rx_nod_search: prefix mismatch with child %d: %s", 
527  |                       i, buf);
528  |           }
529  |         }
530  |       }
531  |     }
532  |     break;
533  | 
534  |   case RX_SRCH_RANG:
535  |     /* OK, start from the node at the end of the stack (exless match including
536  |        glue nodes) then
537  | 
538  |        
539  |        if its prefix length is 
540  |          OK -> found! descend from here as long as the prefixes are in range
541  |          shorter -> apparently there is even no such glue node. come back down
542  |                     one step
543  |                    
544  |     */
545  |     
546  |     i = sps;               /* go up the tree (down the stack) */
547  |                            /* until too far (one node too much, after >= )*/
548  |     while( i >= 0 && stack[i].cpy.prefix.bits >= prefix->bits ) {
549  |       i--;
550  |     }
551  |     
552  |     /* look where you are:*/
553  |     
554  |     if( i < 0 )          /* it was the top object, but its prefix was too long*/
555  |       i=0;               /* take the top object as the base*/
556  |     else
557  |       i++;               /* went one too much, now come back one step*/
558  |     
559  |     
560  |     rx_walk_tree( stack[i].srcptr,  rx_walk_hook_addnode, 
561  |                   RX_WALK_PRFLEN, /* skip glue nodes while counting*/
562  |                   par_a, /* display up to this max length*/
563  |                   0, 0, &datstr, &err);
564  |     if( err != RX_OK ) {
565  |       return err;
566  |     }
567  |     
568  |     break;    
569  | 
570  |     /* return RX_NOYETI;*/
571  |     /*not implemented*/
572  |     /*    die; */
573  |   default:
574  |     die; /* are you nuts??*/
575  |   }
576  | 
577  |   return err;
578  | 
579  | }
580  | 
581  | 
582  | 
583  | /*****************************************************************************/
584  | 
585  | /*+++++++++++++
586  |   builds a stack for this prefix, finds *nodes* in the stack 
587  |   and appends *copies of the data leaves* to the LL of answers;
588  |   
589  |   sorts by SQL object keys and uniq's the data
590  |   
591  |   finds:
592  |   0 or 1 nodes for exact search
593  |   0 or 1 nodes for exless (0 if no less specific node found)
594  |   any number (incl. 0) for {more|less}-n specific 
595  |   
596  |   then copies the nodes/dataleaves to the answer structs and appends them
597  |   to the given LL. So, effectively, the number of answers can be
598  |   anything from 0 to infinity, because objects may be duplicate 
599  |   even at the same node.
600  |   
601  |   returns errcode.
602  |   
603  |   algorithm:
604  |   
605  |   builds stack[MAXBIT (==128)];
606  |   
607  |   if( more/less-depth && par_a == 0)
608  |   
609  |   run rx_nod_search, then 
610  |   
611  |   if(more spec) rx_nod_walk(maxdepth=n, append_to_LL() );
612  |   if(less spec) do { append(LL, stack[i]) } while(i-- && n--);
613  |   otherwise just set LL
614  |   
615  |   
616  |   The routine provides _at_least_ max_count answers. 
617  |   It will *try* to stop after max_count as soon as possible 
618  |   - but it's the higher level routine that should do the final cut.
619  | +++++++++++++++*/
620  | 
621  | er_ret_t
622  | RX_bin_search (
623  |                rx_srch_mt  search_mode,
624  |                int         par_a,
625  |                int         par_b,
626  |                rx_tree_t  *tree,           /* tree ptr*/
627  |                ip_prefix_t *prefix,         /* binary prefix*/
628  |                GList       **datleaves,    /* data leaves go here*/
629  |                int         max_count 
630  |                )
631  |    
632  | {
633  |   rx_nodcpy_t  stack[128];
634  |   int k;
635  |   int stkcnt, resnum = 0, maxleaves;
636  |   GList  *nodlist = NULL, *nitem;
637  |   rx_node_t *curnode;
638  |   rx_nodcpy_t *curcpy;
639  |   rx_datref_t *datref;
640  |   rx_stk_mt     dmode;
641  | 
642  |   /* more specific node search may start from a glue node, */
643  |   /* for all others the stack should not contain glues.*/
644  | 
645  |   dmode = ( search_mode == RX_SRCH_MORE 
646  |             || search_mode == RX_SRCH_DBLS
647  |             || search_mode == RX_SRCH_RANG ) 
648  |     ? RX_STK_QUERY_ALLNOD
649  |     : RX_STK_QUERY_NOGLUE;
650  |   
651  |   rx_build_stack(stack, &stkcnt, tree, prefix, dmode);
652  | 
653  |   rx_nod_search( search_mode, par_a, par_b, tree, prefix, 
654  |                  stack, stkcnt, &nodlist, 1000);
655  |   
656  |   ER_dbg_va(FAC_RX, ASP_RX_SRCH_BOT, "RX_bin_search: processing nodes");
657  | 
658  |   for( nitem = g_list_first(nodlist);
659  |        nitem != NULL;
660  |        nitem = g_list_next(nitem)) {    
661  |     
662  |     resnum++;
663  |     curcpy = nitem->data;
664  |     
665  |     /*
666  |       if memory mode includes RAM:
667  |       * do not expect copies of nodes in the list received from bin_search.
668  |       * iterate through data leaves with g_list_nth_data.
669  |       */
670  |     
671  |     curnode = curcpy->srcptr;
672  |     
673  |     /*    rx_nod_print( curnode, buf, 1024 );*/
674  |     
675  |     maxleaves = g_list_length(curnode->leaves_ptr);
676  |     /*    fprintf(stderr,"###node %d, %d dataleaves attached:", i, maxleaves);*/
677  | 
678  |     /* iterate through dataleafs attached to this node*/
679  |     for(k=0; k<maxleaves; k++) {
680  |       rx_dataleaf_t *leafptr = g_list_nth_data(curnode->leaves_ptr, k);
681  | 
682  |       /* 
683  | 	 check the conditions to add the leaf:
684  | 
685  | 	 1. never add the same leaf twice (can occur for repeated queries
686  | 	 because of composed ranges)
687  | 	 2. never add composed inetnum for exact prefix search
688  | 	 (but do for exact range search...) - must be solved in upper layer.
689  | 
690  |       */
691  | 
692  |       /* add only if not yet on the list, i.e if it's composed then check,*/
693  |       /* otherwise just add*/
694  | 
695  |       /*      if( tree->family == RX_FAM_IN && leafptr->composed > 0 ) {*/
696  |         GList *item;
697  |         int already_there = 0;
698  |         
699  |         for(item = g_list_first(*datleaves);
700  |             item != NULL;
701  |             item = g_list_next(item)) {
702  | 	  rx_datref_t *tmpref = (rx_datref_t *) item->data;
703  | 	  
704  | 	  if( tmpref->leafptr == leafptr ) {
705  | 	    already_there = 1;
706  | 	    break;
707  | 	  }
708  |         }
709  |         
710  |         if( already_there == 1 ) {
711  |           continue; /* take next element*/
712  |         }
713  |         else {
714  | 	  /* add*/
715  | 
716  | 	  dieif( wr_calloc( (void **) &datref, 
717  | 			    sizeof(rx_datref_t), 1) != UT_OK);
718  | 	  datref->leafptr = leafptr;
719  |       
720  | 	  *datleaves = g_list_prepend(*datleaves, datref);
721  | 	}
722  |     }
723  |   }
724  | 
725  |   wr_clear_list( &nodlist );
726  | 
727  |   ER_dbg_va(FAC_RX, ASP_RX_SRCH_BOT,
728  |             "RX_bin_search: found %d nodes", resnum);
729  |     
730  |   
731  |   /* the LL of answers (*datleaves) contains pointers to answer structs, 
732  |      that SHOULD BE NORMALIZED HERE (==with no redundant entries)
733  |   */
734  | 
735  | return RX_OK;
736  | }
737  | 
738  | /**************************************************************************/
739  | /*+++++++++++
740  |    helper: 
741  |    this routine goes through the list of prefixes and performs a bin_search
742  |    on each of them; attaches the results to datlist.
743  | +++++++++++*/
744  | er_ret_t
745  | rx_preflist_search (
746  |                     rx_srch_mt search_mode, 
747  |                     int par_a,
748  |                     int par_b,
749  |                     rx_tree_t  *mytree,
750  |                     GList    **preflist,
751  |                     GList    **datlist
752  |                     )
753  | 
754  | { 
755  |   char   prefstr[IP_PREFSTR_MAX];
756  |   GList   *qitem;
757  |   ip_prefix_t *querypref;
758  |   er_ret_t err;
759  |   
760  |   for( qitem = g_list_first(*preflist);
761  |        qitem != NULL;
762  |        qitem = g_list_next(qitem)) {
763  |     
764  |     querypref = qitem->data;
765  |     
766  |     if( IP_pref_b2a( querypref, prefstr, IP_PREFSTR_MAX) != IP_OK ) {
767  |       die;
768  |     }
769  |     ER_dbg_va(FAC_RX, ASP_RX_SRCH_BOT,
770  |               "rx_preflist_search: mode %d (par %d) for %s", 
771  |               search_mode, par_a, prefstr);
772  |     
773  |     if (mytree->num_nodes > 0) {
774  |       err = RX_bin_search( search_mode, par_a, par_b, mytree, querypref, 
775  |                    datlist, RX_ANS_ALL);
776  |       if( err != RX_OK ) {
777  |         return err;
778  |       }
779  |     }
780  |   }
781  |   
782  |   return RX_OK;
783  | }
784  | 
785  | /*++++
786  |   this is a helper: goes through a datlist and returns the smallest
787  |   size of a range
788  |   +++*/
789  | ip_rangesize_t
790  | rx_find_smallest_span( GList *datlist ) {
791  |   ip_rangesize_t  min_span, span;
792  |   GList *ditem;
793  | 
794  |    min_span = 0xffffffff;
795  | 
796  |     /* go through the list and find the shortest range.    */
797  |     for(ditem = g_list_first(datlist);
798  |         ditem != NULL;
799  |         ditem = g_list_next(ditem)) {
800  |       rx_datref_t *refptr = (rx_datref_t *) (ditem->data);
801  | 
802  |       span = IP_rang_span(refptr->leafptr->iprange);
803  |       
804  |       if( span < min_span ) {
805  |         min_span = span;
806  |       }
807  |     }
808  |     ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
809  |               "rx_find_smallest_span: minimal span is %d", min_span);
810  | 
811  |     return min_span;
812  | }
813  | 
814  | 
815  | /*+ helper: just appends the element pointed to by datref to anslist +*/
816  | er_ret_t
817  | rx_asc_append_datref(rx_datref_t *refptr, GList **anslist)
818  | {
819  |   er_ret_t err;
820  |   rx_datcpy_t *datcpy;
821  |   void *dataptr;
822  | 
823  |     /* OK, so we ACCEPT this result. Copy it.*/
824  | 
825  |     if( (err=wr_calloc( (void **)& datcpy, 1, sizeof(rx_datcpy_t))) != UT_OK) {
826  |       return err; /*    die;*/
827  |     }
828  |     
829  |     datcpy->leafcpy = *(refptr->leafptr);
830  |     
831  |     /* copy the immediate data too. Set the ptr.*/
832  |     
833  |     if( (err=wr_calloc( (void **) & dataptr, 1, refptr->leafptr->data_len)) 
834  |         != UT_OK) {
835  |       return err; /*    die;*/
836  |     }
837  |     memcpy(dataptr, refptr->leafptr->data_ptr, refptr->leafptr->data_len);
838  |     
839  |     datcpy->leafcpy.data_ptr = dataptr;
840  |     
841  |     *anslist = g_list_prepend(*anslist, datcpy);
842  | 
843  |     return RX_OK;
844  | }
845  | 
846  | 
847  | /*+ rx_asc_process_datlist() - helper for rx_asc_search()
848  |   
849  |   fetches the copies of objects from the radix tree into datlist
850  | 
851  |      ASSUMES LOCKED TREE
852  | 
853  |      the behaviour for a default inetnum (range) query is: 
854  |        do an exact match; 
855  |        if it fails, do an exless match on the encompassing prefix
856  |      for routes(prefixes):
857  |        do an exless match
858  |      
859  |      So if it's the default search mode on an inetnum tree,
860  |      and the key is a range, 
861  |      then an exact search is performed on one of the composing prefixes.
862  | 
863  |      Then the resulting data leaves are checked for exact matching with 
864  |      the range queried for.
865  |      Any dataleaves that do not match are discarded, and if none are left,
866  |      the procedure falls back to searching for the encompassing prefix.
867  |      (calculated in the smart_conv routine). 
868  |      Add the dataleaf copies to the list of answers, 
869  |      taking span into account 
870  | +*/
871  | er_ret_t
872  | rx_asc_process_datlist(
873  | 		       rx_srch_mt search_mode,
874  | 		       rx_fam_t   fam_id,
875  | 		       GList    **datlist,
876  | 		       ip_range_t *testrang,
877  | 		       GList    **anslist )
878  | {
879  |   ip_rangesize_t  min_span, span;
880  |   GList    *ditem;
881  | 
882  |   /* EXLESS search of INETNUMS: the smallest span must be found */
883  |   if( fam_id == RX_FAM_IN && search_mode == RX_SRCH_EXLESS ) {
884  |     min_span = rx_find_smallest_span(*datlist);
885  |   }
886  | 
887  |   /* Process the dataleaf copies and add to the list of answers. */
888  |   for(ditem = g_list_first(*datlist);
889  |       ditem != NULL;
890  |       ditem = g_list_next(ditem)) {
891  |     er_ret_t err;
892  |     rx_datref_t *refptr = (rx_datref_t *) (ditem->data);
893  |     
894  |     /* EXLESS search of INETNUMS: the smallest span must be returned */
895  |     if( fam_id == RX_FAM_IN && search_mode == RX_SRCH_EXLESS
896  |         && (span = IP_rang_span(refptr->leafptr->iprange)) != min_span ) {
897  |       
898  |       ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
899  |                 "rx_asc_search: discarded object with span %d", span);
900  |       continue;
901  |     }
902  |     
903  |     /* EXACT search on INETNUMS - discard if the range does not match */
904  |     if( search_mode == RX_SRCH_EXACT && fam_id == RX_FAM_IN 
905  | 	&& memcmp( & refptr->leafptr->iprange, 
906  | 		   testrang, sizeof(ip_range_t)) != 0) {
907  |       
908  |       ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
909  | 		"rx_asc_search: discarded an object from exact/inetnum/prefix search");
910  |       continue;
911  |     }
912  |     
913  |     /* OK, so we ACCEPT this result.*/
914  |     if( (err = rx_asc_append_datref(refptr, anslist)) != RX_OK ) {
915  |       return err;
916  |     }
917  |   }
918  |   return RX_OK;
919  | }    
920  |   
921  | /* 
922  |    rx_asc_make_preflist() is a helper function for rx_asc_search().
923  | 
924  |    makes a list of prefixes to search for, 
925  | 
926  |    special treatment for inetnum/exact:
927  |    + a range that is equivalent to the search key (which may be a prefix)
928  |      is made, to be used later for comparisons
929  |      
930  |    special treatment for inetnum/exless/composed:
931  |    + the first pass mode is set to exact (otherwise to search_mode)
932  | 
933  |    a few optimisations are made:
934  |    + for a route/composed_range/exact : the search is nuked
935  |    + for an inetnum/composed_range/(exless|exact) : the list is truncated
936  |      to one prefix, because in an exact search, it must be there anyway, 
937  |      and for the exless, the smallest encompassing one must match
938  |    
939  |      
940  |   */
941  |   
942  | 
943  | er_ret_t 
944  | rx_asc_make_preflist(
945  |                rx_srch_mt search_mode, 
946  |                char *key,          
947  |                rx_fam_t   fam_id,  
948  |                GList **preflist,
949  | 	       ip_range_t *testrang,
950  | 	       rx_srch_mt *first_pass_mode
951  | 	       ) 
952  | {
953  |   er_ret_t   err; 
954  |   ip_keytype_t key_type;
955  |   int prefcount;
956  |   
957  |   /* parse the key */
958  |   if( ( err = IP_smart_conv(key, 0, 0,
959  | 			    preflist, IP_EXPN, &key_type)) != IP_OK ) {
960  |     /* XXX operational trouble (UT_*) or invalid key (IP_INVARG)*/
961  |     return err; 
962  |   }
963  |   prefcount = g_list_length(*preflist);
964  | 
965  |   /* EXACT search of a route tree for a composed range makes no sense */
966  |   if( fam_id == RX_FAM_RT && search_mode == RX_SRCH_EXACT 
967  |       && key_type == IPK_RANGE && prefcount > 1 ) {
968  |     /* abort search - i.e. clear the preflist*/
969  | 
970  |       wr_clear_list( preflist);
971  |       
972  |       ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
973  | 	"rx_asc_make_preflist: route/exact/composed - preflist cleared");
974  |   }
975  |   
976  |   /*+ inetnum / exact|exless specific :
977  |     optimise: (composed range) 
978  |     
979  |       perform a separate first pass, with just one exact search on one of 
980  |       the composing prefixes - the object must be found if it's in the 
981  |       database.
982  |       
983  |       So a little cheat: remove all but one prefixes from preflist
984  |       and force a different search mode
985  |       +*/ 
986  |   if( fam_id == RX_FAM_IN  
987  |       && (search_mode == RX_SRCH_EXLESS || search_mode == RX_SRCH_EXACT) 
988  |       && key_type == IPK_RANGE && prefcount > 1 ) { 
989  | 
990  |       wr_clear_list(  & ((*preflist)->next) );
991  | 
992  |       ER_dbg_va(FAC_RX, ASP_RX_SRCH_DET,
993  | 	 "rx_asc_make_preflist: inet/ex***/composed - preflist truncated, first pass EXACT forced");
994  | 
995  |       *first_pass_mode = RX_SRCH_EXACT;
996  |   } /* inetnum / exact|exless specific */
997  | 
998  |   /* exact: set range so a comparison can be performed */
999  |   if( fam_id == RX_FAM_IN && 
1000 |       ( search_mode == RX_SRCH_EXACT || *first_pass_mode == RX_SRCH_EXACT) ) {
1001 |     if( key_type == IPK_RANGE ) {
1002 |       /* must succeed after smart_conv succeeded */
1003 |       dieif(IP_rang_a2b(testrang, key) != IP_OK );
1004 |     }
1005 |     else { 
1006 |       /* construct a range equivalent to this prefix */
1007 |       IP_pref_2_rang( testrang, (*preflist)->data );
1008 |     }
1009 |   }
1010 | 
1011 |   return RX_OK;
1012 | }
1013 | /**************************************************************************/