Graphviz  2.35.20130930.0449
position.c
Go to the documentation of this file.
1 /* $Id$ $Revision$ */
2 /* vim:set shiftwidth=4 ts=8: */
3 
4 /*************************************************************************
5  * Copyright (c) 2011 AT&T Intellectual Property
6  * All rights reserved. This program and the accompanying materials
7  * are made available under the terms of the Eclipse Public License v1.0
8  * which accompanies this distribution, and is available at
9  * http://www.eclipse.org/legal/epl-v10.html
10  *
11  * Contributors: See CVS logs. Details at http://www.graphviz.org/
12  *************************************************************************/
13 
14 
15 /*
16  * position(g): set ND_coord(n) (x and y) for all nodes n of g, using GD_rank(g).
17  * (the graph may be modified by merging certain edges with a common endpoint.)
18  * the coordinates are computed by constructing and ranking an auxiliary graph.
19  * then leaf nodes are inserted in the fast graph. cluster boundary nodes are
20  * created and correctly separated.
21  */
22 
23 #include "dot.h"
24 #include "aspect.h"
25 
26 static int nsiter2(graph_t * g);
27 static void create_aux_edges(graph_t * g);
28 static void remove_aux_edges(graph_t * g);
29 static void set_xcoords(graph_t * g);
30 static void set_ycoords(graph_t * g);
31 static void set_aspect(graph_t * g, aspect_t* );
32 static void expand_leaves(graph_t * g);
33 static void make_lrvn(graph_t * g);
34 static void contain_nodes(graph_t * g);
35 static boolean idealsize(graph_t * g, double);
36 
37 #ifdef DEBUG
38 static void
39 dumpNS (graph_t * g)
40 {
41  node_t* n = GD_nlist(g);
42  elist el;
43  edge_t* e;
44  int i;
45 
46  while (n) {
47  el = ND_out(n);
48  for (i = 0; i < el.size; i++) {
49  e = el.list[i];
50  fprintf (stderr, "%s(%x) -> ", agnameof(agtail(e)),agtail(e));
51  fprintf (stderr, "%s(%x) : %d\n", agnameof(aghead(e)), aghead(e),
52  ED_minlen(e));
53  }
54  n = ND_next(n);
55  }
56 }
57 #endif
58 
59 static double
60 largeMinlen (double l)
61 {
62  agerr (AGERR, "Edge length %f larger than maximum %u allowed.\nCheck for overwide node(s).\n", l, USHRT_MAX);
63  return (double)USHRT_MAX;
64 }
65 
66 /* connectGraph:
67  * When source and/or sink nodes are defined, it is possible that
68  * after the auxiliary edges are added, the graph may still have 2 or
69  * 3 components. To fix this, we put trivial constraints connecting the
70  * first items of each rank.
71  */
72 static void
73 connectGraph (graph_t* g)
74 {
75  int i, j, r, found;
76  node_t* tp;
77  node_t* hp;
78  node_t* sn;
79  edge_t* e;
80  rank_t* rp;
81 
82  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
83  rp = GD_rank(g)+r;
84  found =FALSE;
85  tp = NULL;
86  for (i = 0; i < rp->n; i++) {
87  tp = rp->v[i];
88  if (ND_save_out(tp).list) {
89  for (j = 0; (e = ND_save_out(tp).list[j]); j++) {
90  if ((ND_rank(aghead(e)) > r) || (ND_rank(agtail(e)) > r)) {
91  found = TRUE;
92  break;
93  }
94  }
95  if (found) break;
96  }
97  if (ND_save_in(tp).list) {
98  for (j = 0; (e = ND_save_in(tp).list[j]); j++) {
99  if ((ND_rank(agtail(e)) > r) || (ND_rank(aghead(e)) > r)) {
100  found = TRUE;
101  break;
102  }
103  }
104  if (found) break;
105  }
106  }
107  if (found || !tp) continue;
108  tp = rp->v[0];
109  if (r < GD_maxrank(g)) hp = (rp+1)->v[0];
110  else hp = (rp-1)->v[0];
111  assert (hp);
112  sn = virtual_node(g);
113  ND_node_type(sn) = SLACKNODE;
114  make_aux_edge(sn, tp, 0, 0);
115  make_aux_edge(sn, hp, 0, 0);
116  ND_rank(sn) = MIN(ND_rank(tp), ND_rank(hp));
117  }
118 }
119 
121 {
122  if (GD_nlist(g) == NULL)
123  return; /* ignore empty graph */
124  mark_lowclusters(g); /* we could remove from splines.c now */
125  set_ycoords(g);
126  if (Concentrate)
127  dot_concentrate(g);
128  expand_leaves(g);
129  if (flat_edges(g))
130  set_ycoords(g);
131  create_aux_edges(g);
132  if (rank(g, 2, nsiter2(g))) { /* LR balance == 2 */
133  connectGraph (g);
134  assert(rank(g, 2, nsiter2(g)) == 0);
135  }
136  set_xcoords(g);
137  set_aspect(g, asp);
138  remove_aux_edges(g); /* must come after set_aspect since we now
139  * use GD_ln and GD_rn for bbox width.
140  */
141 }
142 
143 static int nsiter2(graph_t * g)
144 {
145  int maxiter = INT_MAX;
146  char *s;
147 
148  if ((s = agget(g, "nslimit")))
149  maxiter = atof(s) * agnnodes(g);
150  return maxiter;
151 }
152 
153 static int go(node_t * u, node_t * v)
154 {
155  int i;
156  edge_t *e;
157 
158  if (u == v)
159  return TRUE;
160  for (i = 0; (e = ND_out(u).list[i]); i++) {
161  if (go(aghead(e), v))
162  return TRUE;
163  }
164  return FALSE;
165 }
166 
167 static int canreach(node_t * u, node_t * v)
168 {
169  return go(u, v);
170 }
171 
172 edge_t *make_aux_edge(node_t * u, node_t * v, double len, int wt)
173 {
174  edge_t *e;
175 
176 #ifndef WITH_CGRAPH
177  e = NEW(edge_t);
178 #else
179  Agedgepair_t* e2 = NEW(Agedgepair_t);
180  AGTYPE(&(e2->in)) = AGINEDGE;
181  AGTYPE(&(e2->out)) = AGOUTEDGE;
182  e2->out.base.data = (Agrec_t*)NEW(Agedgeinfo_t);
183  e = &(e2->out);
184 #endif /* WITH_CGRAPH */
185 
186  agtail(e) = u;
187  aghead(e) = v;
188  if (len > USHRT_MAX)
189  len = largeMinlen (len);
190  ED_minlen(e) = ROUND(len);
191  ED_weight(e) = wt;
192  fast_edge(e);
193  return e;
194 }
195 
196 static void allocate_aux_edges(graph_t * g)
197 {
198  int i, j, n_in;
199  node_t *n;
200 
201  /* allocate space for aux edge lists */
202  for (n = GD_nlist(g); n; n = ND_next(n)) {
203  ND_save_in(n) = ND_in(n);
204  ND_save_out(n) = ND_out(n);
205  for (i = 0; ND_out(n).list[i]; i++);
206  for (j = 0; ND_in(n).list[j]; j++);
207  n_in = i + j;
208  alloc_elist(n_in + 3, ND_in(n));
209  alloc_elist(3, ND_out(n));
210  }
211 }
212 
213 /* make_LR_constraints:
214  */
215 static void
216 make_LR_constraints(graph_t * g)
217 {
218  int i, j, k;
219  int sw; /* self width */
220  int m0, m1;
221  double width;
222  int sep[2];
223  int nodesep; /* separation between nodes on same rank */
224  edge_t *e, *e0, *e1, *ff;
225  node_t *u, *v, *t0, *h0;
226  rank_t *rank = GD_rank(g);
227 
228  /* Use smaller separation on odd ranks if g has edge labels */
229  if (GD_has_labels(g) & EDGE_LABEL) {
230  sep[0] = GD_nodesep(g);
231  sep[1] = 5;
232  }
233  else {
234  sep[1] = sep[0] = GD_nodesep(g);
235  }
236  /* make edges to constrain left-to-right ordering */
237  for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
238  double last;
239  last = ND_rank(rank[i].v[0]) = 0;
240  nodesep = sep[i & 1];
241  for (j = 0; j < rank[i].n; j++) {
242  u = rank[i].v[j];
243  ND_mval(u) = ND_rw(u); /* keep it somewhere safe */
244  if (ND_other(u).size > 0) { /* compute self size */
245  /* FIX: dot assumes all self-edges go to the right. This
246  * is no longer true, though makeSelfEdge still attempts to
247  * put as many as reasonable on the right. The dot code
248  * should be modified to allow a box reflecting the placement
249  * of all self-edges, and use that to reposition the nodes.
250  * Note that this would not only affect left and right
251  * positioning but may also affect interrank spacing.
252  */
253  sw = 0;
254  for (k = 0; (e = ND_other(u).list[k]); k++) {
255  if (agtail(e) == aghead(e)) {
256  sw += selfRightSpace (e);
257  }
258  }
259  ND_rw(u) += sw; /* increment to include self edges */
260  }
261  v = rank[i].v[j + 1];
262  if (v) {
263  width = ND_rw(u) + ND_lw(v) + nodesep;
264  e0 = make_aux_edge(u, v, width, 0);
265  last = (ND_rank(v) = last + width);
266  }
267 
268  /* constraints from labels of flat edges on previous rank */
269  if ((e = (edge_t*)ND_alg(u))) {
270  e0 = ND_save_out(u).list[0];
271  e1 = ND_save_out(u).list[1];
272  if (ND_order(aghead(e0)) > ND_order(aghead(e1))) {
273  ff = e0;
274  e0 = e1;
275  e1 = ff;
276  }
277  m0 = (ED_minlen(e) * GD_nodesep(g)) / 2;
278  m1 = m0 + ND_rw(aghead(e0)) + ND_lw(agtail(e0));
279  /* these guards are needed because the flat edges
280  * work very poorly with cluster layout */
281  if (canreach(agtail(e0), aghead(e0)) == FALSE)
282  make_aux_edge(aghead(e0), agtail(e0), m1,
283  ED_weight(e));
284  m1 = m0 + ND_rw(agtail(e1)) + ND_lw(aghead(e1));
285  if (canreach(aghead(e1), agtail(e1)) == FALSE)
286  make_aux_edge(agtail(e1), aghead(e1), m1,
287  ED_weight(e));
288  }
289 
290  /* position flat edge endpoints */
291  for (k = 0; k < ND_flat_out(u).size; k++) {
292  e = ND_flat_out(u).list[k];
293  if (ND_order(agtail(e)) < ND_order(aghead(e))) {
294  t0 = agtail(e);
295  h0 = aghead(e);
296  } else {
297  t0 = aghead(e);
298  h0 = agtail(e);
299  }
300 
301  width = ND_rw(t0) + ND_lw(h0);
302  m0 = ED_minlen(e) * GD_nodesep(g) + width;
303 
304  if ((e0 = find_fast_edge(t0, h0))) {
305  /* flat edge between adjacent neighbors
306  * ED_dist contains the largest label width.
307  */
308  m0 = MAX(m0, width + GD_nodesep(g) + ROUND(ED_dist(e)));
309  if (m0 > USHRT_MAX)
310  m0 = largeMinlen (m0);
311  ED_minlen(e0) = MAX(ED_minlen(e0), m0);
312  }
313  else if (!ED_label(e)) {
314  /* unlabeled flat edge between non-neighbors
315  * ED_minlen(e) is max of ED_minlen of all equivalent
316  * edges.
317  */
318  make_aux_edge(t0, h0, m0, ED_weight(e));
319  }
320  /* labeled flat edges between non-neighbors have already
321  * been constrained by the label above.
322  */
323  }
324  }
325  }
326 }
327 
328 /* make_edge_pairs: make virtual edge pairs corresponding to input edges */
329 static void make_edge_pairs(graph_t * g)
330 {
331  int i, m0, m1;
332  node_t *n, *sn;
333  edge_t *e;
334 
335  for (n = GD_nlist(g); n; n = ND_next(n)) {
336  if (ND_save_out(n).list)
337  for (i = 0; (e = ND_save_out(n).list[i]); i++) {
338  sn = virtual_node(g);
339  ND_node_type(sn) = SLACKNODE;
340  m0 = (ED_head_port(e).p.x - ED_tail_port(e).p.x);
341  if (m0 > 0)
342  m1 = 0;
343  else {
344  m1 = -m0;
345  m0 = 0;
346  }
347 #ifdef NOTDEF
348 /* was trying to improve LR balance */
349  if ((ND_save_out(n).size % 2 == 0)
350  && (i == ND_save_out(n).size / 2 - 1)) {
351  node_t *u = ND_save_out(n).list[i]->head;
352  node_t *v = ND_save_out(n).list[i + 1]->head;
353  double width = ND_rw(u) + ND_lw(v) + GD_nodesep(g);
354  m0 = width / 2 - 1;
355  }
356 #endif
357  make_aux_edge(sn, agtail(e), m0 + 1, ED_weight(e));
358  make_aux_edge(sn, aghead(e), m1 + 1, ED_weight(e));
359  ND_rank(sn) =
360  MIN(ND_rank(agtail(e)) - m0 - 1,
361  ND_rank(aghead(e)) - m1 - 1);
362  }
363  }
364 }
365 
366 static void contain_clustnodes(graph_t * g)
367 {
368  int c;
369  edge_t *e;
370 
371  if (g != agroot(g)) {
372  contain_nodes(g);
373  if ((e = find_fast_edge(GD_ln(g),GD_rn(g)))) /* maybe from lrvn()?*/
374  ED_weight(e) += 128;
375  else
376  make_aux_edge(GD_ln(g), GD_rn(g), 1, 128); /* clust compaction edge */
377  }
378  for (c = 1; c <= GD_n_cluster(g); c++)
379  contain_clustnodes(GD_clust(g)[c]);
380 }
381 
382 static int vnode_not_related_to(graph_t * g, node_t * v)
383 {
384  edge_t *e;
385 
386  if (ND_node_type(v) != VIRTUAL)
387  return FALSE;
388  for (e = ND_save_out(v).list[0]; ED_to_orig(e); e = ED_to_orig(e));
389  if (agcontains(g, agtail(e)))
390  return FALSE;
391  if (agcontains(g, aghead(e)))
392  return FALSE;
393  return TRUE;
394 }
395 
396 /* keepout_othernodes:
397  * Guarantee nodes outside the cluster g are placed outside of it.
398  * This is done by adding constraints to make sure such nodes have
399  * a gap of margin from the left or right bounding box node ln or rn.
400  *
401  * We could probably reduce some of these constraints by checking if
402  * the node is in a cluster, since elsewhere we make constrain a
403  * separate between clusters. Also, we should be able to skip the
404  * first loop if g is the root graph.
405  */
406 static void keepout_othernodes(graph_t * g)
407 {
408  int i, c, r, margin;
409  node_t *u, *v;
410 
411  margin = late_int (g, G_margin, CL_OFFSET, 0);
412  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
413  if (GD_rank(g)[r].n == 0)
414  continue;
415  v = GD_rank(g)[r].v[0];
416  if (v == NULL)
417  continue;
418  for (i = ND_order(v) - 1; i >= 0; i--) {
419  u = GD_rank(agroot(g))[r].v[i];
420  /* can't use "is_a_vnode_of" because elists are swapped */
421  if ((ND_node_type(u) == NORMAL) || vnode_not_related_to(g, u)) {
422  make_aux_edge(u, GD_ln(g), margin + ND_rw(u), 0);
423  break;
424  }
425  }
426  for (i = ND_order(v) + GD_rank(g)[r].n; i < GD_rank(agroot(g))[r].n;
427  i++) {
428  u = GD_rank(agroot(g))[r].v[i];
429  if ((ND_node_type(u) == NORMAL) || vnode_not_related_to(g, u)) {
430  make_aux_edge(GD_rn(g), u, margin + ND_lw(u), 0);
431  break;
432  }
433  }
434  }
435 
436  for (c = 1; c <= GD_n_cluster(g); c++)
437  keepout_othernodes(GD_clust(g)[c]);
438 }
439 
440 /* contain_subclust:
441  * Make sure boxes of subclusters of g are offset from the
442  * box of g. This is done by a constraint between the left and
443  * right bounding box nodes ln and rn of g and a subcluster.
444  * The gap needs to include any left or right labels.
445  */
446 static void contain_subclust(graph_t * g)
447 {
448  int margin, c;
449  graph_t *subg;
450 
451  margin = late_int (g, G_margin, CL_OFFSET, 0);
452  make_lrvn(g);
453  for (c = 1; c <= GD_n_cluster(g); c++) {
454  subg = GD_clust(g)[c];
455  make_lrvn(subg);
456  make_aux_edge(GD_ln(g), GD_ln(subg),
457  margin + GD_border(g)[LEFT_IX].x, 0);
458  make_aux_edge(GD_rn(subg), GD_rn(g),
459  margin + GD_border(g)[RIGHT_IX].x, 0);
460  contain_subclust(subg);
461  }
462 }
463 
464 /* separate_subclust:
465  * Guarantee space between subcluster of g.
466  * This is done by adding a constraint between the right bbox node rn
467  * of the left cluster and the left bbox node ln of the right cluster.
468  * This is only done if the two clusters overlap in some rank.
469  */
470 static void separate_subclust(graph_t * g)
471 {
472  int i, j, margin;
473  graph_t *low, *high;
474  graph_t *left, *right;
475 
476  margin = late_int (g, G_margin, CL_OFFSET, 0);
477  for (i = 1; i <= GD_n_cluster(g); i++)
478  make_lrvn(GD_clust(g)[i]);
479  for (i = 1; i <= GD_n_cluster(g); i++) {
480  for (j = i + 1; j <= GD_n_cluster(g); j++) {
481  low = GD_clust(g)[i];
482  high = GD_clust(g)[j];
483  if (GD_minrank(low) > GD_minrank(high)) {
484  graph_t *temp = low;
485  low = high;
486  high = temp;
487  }
488  if (GD_maxrank(low) < GD_minrank(high))
489  continue;
490  if (ND_order(GD_rank(low)[GD_minrank(high)].v[0])
491  < ND_order(GD_rank(high)[GD_minrank(high)].v[0])) {
492  left = low;
493  right = high;
494  } else {
495  left = high;
496  right = low;
497  }
498  make_aux_edge(GD_rn(left), GD_ln(right), margin, 0);
499  }
500  separate_subclust(GD_clust(g)[i]);
501  }
502 }
503 
504 /* pos_clusters: create constraints for:
505  * node containment in clusters,
506  * cluster containment in clusters,
507  * separation of sibling clusters.
508  */
509 static void pos_clusters(graph_t * g)
510 {
511  if (GD_n_cluster(g) > 0) {
512  contain_clustnodes(g);
513  keepout_othernodes(g);
514  contain_subclust(g);
515  separate_subclust(g);
516  }
517 }
518 
519 static void compress_graph(graph_t * g)
520 {
521  double x;
522  pointf p;
523 
524  if (GD_drawing(g)->ratio_kind != R_COMPRESS)
525  return;
526  p = GD_drawing(g)->size;
527  if (p.x * p.y <= 1)
528  return;
529  contain_nodes(g);
530  if (GD_flip(g) == FALSE)
531  x = p.x;
532  else
533  x = p.y;
534 
535  /* Guard against huge size attribute since max. edge length is USHRT_MAX
536  * A warning might be called for. Also, one could check that the graph
537  * already fits GD_drawing(g)->size and return immediately.
538  */
539  x = MIN(x,USHRT_MAX);
540  make_aux_edge(GD_ln(g), GD_rn(g), x, 1000);
541 }
542 
543 static void create_aux_edges(graph_t * g)
544 {
545  allocate_aux_edges(g);
546  make_LR_constraints(g);
547  make_edge_pairs(g);
548  pos_clusters(g);
549  compress_graph(g);
550 }
551 
552 static void remove_aux_edges(graph_t * g)
553 {
554  int i;
555  node_t *n, *nnext, *nprev;
556  edge_t *e;
557 
558  for (n = GD_nlist(g); n; n = ND_next(n)) {
559  for (i = 0; (e = ND_out(n).list[i]); i++) {
560 #ifdef WITH_CGRAPH
561  free(e->base.data);
562 #endif
563  free(e);
564  }
565  free_list(ND_out(n));
566  free_list(ND_in(n));
567  ND_out(n) = ND_save_out(n);
568  ND_in(n) = ND_save_in(n);
569  }
570  /* cannot be merged with previous loop */
571  nprev = NULL;
572  for (n = GD_nlist(g); n; n = nnext) {
573  nnext = ND_next(n);
574  if (ND_node_type(n) == SLACKNODE) {
575  if (nprev)
576  ND_next(nprev) = nnext;
577  else
578  GD_nlist(g) = nnext;
579 #ifdef WITH_CGRAPH
580  free(n->base.data);
581 #endif
582  free(n);
583  } else
584  nprev = n;
585  }
586  ND_prev(GD_nlist(g)) = NULL;
587 }
588 
589 /* set_xcoords:
590  * Set x coords of nodes.
591  */
592 static void
593 set_xcoords(graph_t * g)
594 {
595  int i, j;
596  node_t *v;
597  rank_t *rank = GD_rank(g);
598 
599  for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
600  for (j = 0; j < rank[i].n; j++) {
601  v = rank[i].v[j];
602  ND_coord(v).x = ND_rank(v);
603  ND_rank(v) = i;
604  }
605  }
606 }
607 
608 /* adjustEqual:
609  * Expand cluster g vertically by delta, assuming ranks
610  * are equally spaced. We first try to split delta evenly
611  * using any available space at the top and bottom. If there
612  * is not enough, we have to widen the space between the ranks.
613  * We divide delta equally within the ranks of g plus its ht1
614  * and ht2. To preserve equality of ranks, we add this space
615  * between every pair of ranks.
616  *
617  * There is probably some way to add less than delta, by using
618  * whatever available space there is at top and bottom, but for
619  * now, trying to figure that out seems more trouble than it is worth.
620  */
621 static void adjustEqual(graph_t * g, int delta)
622 {
623  int r, avail, half, deltop, delbottom;
624  graph_t *root = agroot(g);
625  rank_t *rank = GD_rank(root);
626  int maxr = GD_maxrank(g);
627  int minr = GD_minrank(g);
628 
629  deltop = rank[minr].ht2 - GD_ht2(g);
630  delbottom = rank[maxr].ht1 - GD_ht1(g);
631  avail = deltop + delbottom;
632  if (avail >= delta) {
633  half = (delta+1) / 2;
634  if (deltop <= delbottom) {
635  if (half <= deltop) {
636  GD_ht2(g) += half;
637  GD_ht1(g) += (delta - half);
638  }
639  else {
640  GD_ht2(g) += deltop;
641  GD_ht1(g) += (delta - deltop);
642  }
643  }
644  else {
645  if (half <= delbottom) {
646  GD_ht1(g) += half;
647  GD_ht2(g) += (delta - half);
648  }
649  else {
650  GD_ht1(g) += delbottom;
651  GD_ht2(g) += (delta - delbottom);
652  }
653  }
654  }
655  else {
656  int gaps = maxr - minr + 2;
657  int yoff = (delta + (gaps - 1)) / gaps;
658  int y = yoff;
659  for (r = GD_maxrank(root) - 1; r >= GD_minrank(root); r--) {
660  if (rank[r].n > 0)
661  ND_coord(rank[r].v[0]).y += y;
662  y += yoff;
663  }
664  GD_ht2(g) += yoff;
665  GD_ht1(g) += yoff;
666  }
667 }
668 
669 /* adjustSimple:
670  * Expand cluster height by delta, adding half to top
671  * and half to bottom. If the bottom expansion exceeds the
672  * ht1 of the rank, shift the ranks in the cluster up.
673  * If the top expansion, including any shift from the bottom
674  * expansion, exceeds to ht2 of the rank, shift the ranks above
675  * the cluster up.
676  */
677 static void adjustSimple(graph_t * g, int delta)
678 {
679  int r, bottom, deltop, delbottom;
680  graph_t *root = agroot(g);
681  rank_t *rank = GD_rank(root);
682  int maxr = GD_maxrank(g);
683  int minr = GD_minrank(g);
684 
685  bottom = (delta+1) / 2;
686  delbottom = GD_ht1(g) + bottom - rank[maxr].ht1;
687  if (delbottom > 0) {
688  for (r = maxr; r >= minr; r--) {
689  if (rank[r].n > 0)
690  ND_coord(rank[r].v[0]).y += delbottom;
691  }
692  deltop = GD_ht2(g) + (delta-bottom) + delbottom - rank[minr].ht2;
693  }
694  else
695  deltop = GD_ht2(g) + (delta-bottom) - rank[minr].ht2;
696  if (deltop > 0) {
697  for (r = minr-1; r >= GD_minrank(root); r--) {
698  if (rank[r].n > 0)
699  ND_coord(rank[r].v[0]).y += deltop;
700  }
701  }
702  GD_ht2(g) += (delta - bottom);
703  GD_ht1(g) += bottom;
704 }
705 
706 /* adjustRanks:
707  * Recursively adjust ranks to take into account
708  * wide cluster labels when rankdir=LR.
709  * We divide the extra space between the top and bottom.
710  * Adjust the ht1 and ht2 values in the process.
711  */
712 static void adjustRanks(graph_t * g, int equal)
713 {
714  int lht; /* label height */
715  int rht; /* height between top and bottom ranks */
716  int delta, maxr, minr, margin;
717  int c, ht1, ht2;
718 
719  rank_t *rank = GD_rank(agroot(g));
720  margin = late_int (g, G_margin, CL_OFFSET, 0);
721 
722  ht1 = GD_ht1(g);
723  ht2 = GD_ht2(g);
724 
725  for (c = 1; c <= GD_n_cluster(g); c++) {
726  graph_t *subg = GD_clust(g)[c];
727  adjustRanks(subg, equal);
728  if (GD_maxrank(subg) == GD_maxrank(g))
729  ht1 = MAX(ht1, GD_ht1(subg) + margin);
730  if (GD_minrank(subg) == GD_minrank(g))
731  ht2 = MAX(ht2, GD_ht2(subg) + margin);
732  }
733 
734  GD_ht1(g) = ht1;
735  GD_ht2(g) = ht2;
736 
737  if ((g != agroot(g)) && GD_label(g)) {
738  lht = MAX(GD_border(g)[LEFT_IX].y, GD_border(g)[RIGHT_IX].y);
739  maxr = GD_maxrank(g);
740  minr = GD_minrank(g);
741  rht = ND_coord(rank[minr].v[0]).y - ND_coord(rank[maxr].v[0]).y;
742  delta = lht - (rht + ht1 + ht2);
743  if (delta > 0) {
744  if (equal)
745  adjustEqual(g, delta);
746  else
747  adjustSimple(g, delta);
748  }
749  }
750 
751  /* update the global ranks */
752  if (g != agroot(g)) {
753  rank[GD_minrank(g)].ht2 = MAX(rank[GD_minrank(g)].ht2, GD_ht2(g));
754  rank[GD_maxrank(g)].ht1 = MAX(rank[GD_maxrank(g)].ht1, GD_ht1(g));
755  }
756 }
757 
758 /* clust_ht:
759  * recursively compute cluster ht requirements. assumes GD_ht1(subg) and ht2
760  * are computed from primitive nodes only. updates ht1 and ht2 to reflect
761  * cluster nesting and labels. also maintains global rank ht1 and ht2.
762  * Return true if some cluster has a label.
763  */
764 static int clust_ht(Agraph_t * g)
765 {
766  int c, ht1, ht2;
767  graph_t *subg;
768  rank_t *rank = GD_rank(agroot(g));
769  int margin, haveClustLabel = 0;
770 
771  if (g == agroot(g))
772  margin = CL_OFFSET;
773  else
774  margin = late_int (g, G_margin, CL_OFFSET, 0);
775 
776  ht1 = GD_ht1(g);
777  ht2 = GD_ht2(g);
778 
779  /* account for sub-clusters */
780  for (c = 1; c <= GD_n_cluster(g); c++) {
781  subg = GD_clust(g)[c];
782  haveClustLabel |= clust_ht(subg);
783  if (GD_maxrank(subg) == GD_maxrank(g))
784  ht1 = MAX(ht1, GD_ht1(subg) + margin);
785  if (GD_minrank(subg) == GD_minrank(g))
786  ht2 = MAX(ht2, GD_ht2(subg) + margin);
787  }
788 
789  /* account for a possible cluster label in clusters */
790  /* room for root graph label is handled in dotneato_postprocess */
791  if ((g != agroot(g)) && GD_label(g)) {
792  haveClustLabel = 1;
793  if (!GD_flip(agroot(g))) {
794  ht1 += GD_border(g)[BOTTOM_IX].y;
795  ht2 += GD_border(g)[TOP_IX].y;
796  }
797  }
798  GD_ht1(g) = ht1;
799  GD_ht2(g) = ht2;
800 
801  /* update the global ranks */
802  if (g != agroot(g)) {
803  rank[GD_minrank(g)].ht2 = MAX(rank[GD_minrank(g)].ht2, ht2);
804  rank[GD_maxrank(g)].ht1 = MAX(rank[GD_maxrank(g)].ht1, ht1);
805  }
806 
807  return haveClustLabel;
808 }
809 
810 /* set y coordinates of nodes, a rank at a time */
811 static void set_ycoords(graph_t * g)
812 {
813  int i, j, r, ht2, maxht, delta, d0, d1;
814  node_t *n;
815  edge_t *e;
816  rank_t *rank = GD_rank(g);
817  graph_t *clust;
818  int lbl;
819 
820  ht2 = maxht = 0;
821 
822  /* scan ranks for tallest nodes. */
823  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
824  for (i = 0; i < rank[r].n; i++) {
825  n = rank[r].v[i];
826 
827  /* assumes symmetry, ht1 = ht2 */
828  ht2 = (ROUND(ND_ht(n)) + 1) / 2;
829 
830 
831  /* have to look for high self-edge labels, too */
832  if (ND_other(n).list)
833  for (j = 0; (e = ND_other(n).list[j]); j++) {
834  if (agtail(e) == aghead(e)) {
835  if (ED_label(e))
836  ht2 = MAX(ht2, ED_label(e)->dimen.y / 2);
837  }
838  }
839 
840  /* update global rank ht */
841  if (rank[r].pht2 < ht2)
842  rank[r].pht2 = rank[r].ht2 = ht2;
843  if (rank[r].pht1 < ht2)
844  rank[r].pht1 = rank[r].ht1 = ht2;
845 
846  /* update nearest enclosing cluster rank ht */
847  if ((clust = ND_clust(n))) {
848  int yoff = (clust == g ? 0 : late_int (clust, G_margin, CL_OFFSET, 0));
849  if (ND_rank(n) == GD_minrank(clust))
850  GD_ht2(clust) = MAX(GD_ht2(clust), ht2 + yoff);
851  if (ND_rank(n) == GD_maxrank(clust))
852  GD_ht1(clust) = MAX(GD_ht1(clust), ht2 + yoff);
853  }
854  }
855  }
856 
857  /* scan sub-clusters */
858  lbl = clust_ht(g);
859 
860  /* make the initial assignment of ycoords to leftmost nodes by ranks */
861  maxht = 0;
862  r = GD_maxrank(g);
863  (ND_coord(rank[r].v[0])).y = rank[r].ht1;
864  while (--r >= GD_minrank(g)) {
865  d0 = rank[r + 1].pht2 + rank[r].pht1 + GD_ranksep(g); /* prim node sep */
866  d1 = rank[r + 1].ht2 + rank[r].ht1 + CL_OFFSET; /* cluster sep */
867  delta = MAX(d0, d1);
868  if (rank[r].n > 0) /* this may reflect some problem */
869  (ND_coord(rank[r].v[0])).y = (ND_coord(rank[r + 1].v[0])).y + delta;
870 #ifdef DEBUG
871  else
872  fprintf(stderr, "dot set_ycoords: rank %d is empty\n",
873  rank[r].n);
874 #endif
875  maxht = MAX(maxht, delta);
876  }
877 
878  /* re-assign if ranks are equally spaced */
879  if (GD_exact_ranksep(g)) {
880  for (r = GD_maxrank(g) - 1; r >= GD_minrank(g); r--)
881  if (rank[r].n > 0) /* this may reflect the same problem :-() */
882  (ND_coord(rank[r].v[0])).y=
883  (ND_coord(rank[r + 1].v[0])).y + maxht;
884  }
885 
886  if (lbl && GD_flip(g))
887  adjustRanks(g, GD_exact_ranksep(g));
888 
889  /* copy ycoord assignment from leftmost nodes to others */
890  for (n = GD_nlist(g); n; n = ND_next(n))
891  ND_coord(n).y = (ND_coord(rank[ND_rank(n)].v[0])).y;
892 }
893 
894 /* dot_compute_bb:
895  * Compute bounding box of g.
896  * The x limits of clusters are given by the x positions of ln and rn.
897  * This information is stored in the rank field, since it was calculated
898  * using network simplex.
899  * For the root graph, we don't enforce all the constraints on lr and
900  * rn, so we traverse the nodes and subclusters.
901  */
902 static void dot_compute_bb(graph_t * g, graph_t * root)
903 {
904  int r, c, x, offset;
905  node_t *v;
906  pointf LL, UR;
907 
908  if (g == agroot(g)) {
909  LL.x = (double)(INT_MAX);
910  UR.x = (double)(-INT_MAX);
911  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
912  int rnkn = GD_rank(g)[r].n;
913  if (rnkn == 0)
914  continue;
915  if ((v = GD_rank(g)[r].v[0]) == NULL)
916  continue;
917  for (c = 1; (ND_node_type(v) != NORMAL) && c < rnkn; c++)
918  v = GD_rank(g)[r].v[c];
919  if (ND_node_type(v) == NORMAL) {
920  x = ND_coord(v).x - ND_lw(v);
921  LL.x = MIN(LL.x, x);
922  }
923  else continue;
924  /* At this point, we know the rank contains a NORMAL node */
925  v = GD_rank(g)[r].v[rnkn - 1];
926  for (c = rnkn-2; ND_node_type(v) != NORMAL; c--)
927  v = GD_rank(g)[r].v[c];
928  x = ND_coord(v).x + ND_rw(v);
929  UR.x = MAX(UR.x, x);
930  }
931  offset = CL_OFFSET;
932  for (c = 1; c <= GD_n_cluster(g); c++) {
933  x = (double)(GD_bb(GD_clust(g)[c]).LL.x - offset);
934  LL.x = MIN(LL.x, x);
935  x = (double)(GD_bb(GD_clust(g)[c]).UR.x + offset);
936  UR.x = MAX(UR.x, x);
937  }
938  } else {
939  LL.x = (double)(ND_rank(GD_ln(g)));
940  UR.x = (double)(ND_rank(GD_rn(g)));
941  }
942  LL.y = ND_coord(GD_rank(root)[GD_maxrank(g)].v[0]).y - GD_ht1(g);
943  UR.y = ND_coord(GD_rank(root)[GD_minrank(g)].v[0]).y + GD_ht2(g);
944  GD_bb(g).LL = LL;
945  GD_bb(g).UR = UR;
946 }
947 
948 static void rec_bb(graph_t * g, graph_t * root)
949 {
950  int c;
951  for (c = 1; c <= GD_n_cluster(g); c++)
952  rec_bb(GD_clust(g)[c], root);
953  dot_compute_bb(g, root);
954 }
955 
956 /* scale_bb:
957  * Recursively rescale all bounding boxes using scale factors
958  * xf and yf. We assume all the bboxes have been computed.
959  */
960 static void scale_bb(graph_t * g, graph_t * root, double xf, double yf)
961 {
962  int c;
963 
964  for (c = 1; c <= GD_n_cluster(g); c++)
965  scale_bb(GD_clust(g)[c], root, xf, yf);
966  GD_bb(g).LL.x *= xf;
967  GD_bb(g).LL.y *= yf;
968  GD_bb(g).UR.x *= xf;
969  GD_bb(g).UR.y *= yf;
970 }
971 
972 /* adjustAspectRatio:
973  */
974 static void adjustAspectRatio (graph_t* g, aspect_t* asp)
975 {
976  double AR = (GD_bb(g).UR.x - GD_bb(g).LL.x)/(GD_bb(g).UR.y - GD_bb(g).LL.y);
977  if (Verbose) {
978  fprintf(stderr, "AR=%0.4lf\t Area= %0.4lf\t", AR, (double)(GD_bb(g).UR.x - GD_bb(g).LL.x)*(GD_bb(g).UR.y - GD_bb(g).LL.y)/10000.0);
979  fprintf(stderr, "Dummy=%d\n", countDummyNodes(g));
980  }
981  if (AR > 1.1*asp->targetAR) {
982  asp->nextIter = (int)(asp->targetAR * (double)(asp->curIterations - asp->prevIterations)/(AR));
983  }
984  else if (AR <= 0.8 * asp->targetAR) {
985  asp->nextIter = -1;
986  if (Verbose)
987  fprintf(stderr, "Going to apply another expansion.\n");
988  }
989  else {
990  asp->nextIter = 0;
991  }
992  if (Verbose)
993  fprintf(stderr, "next#iter=%d\n", asp->nextIter);
994 }
995 
996 /* set_aspect:
997  * Set bounding boxes and, if ratio is set, rescale graph.
998  * Note that if some dimension shrinks, there may be problems
999  * with labels.
1000  */
1001 static void set_aspect(graph_t * g, aspect_t* asp)
1002 {
1003  double xf = 0.0, yf = 0.0, actual, desired;
1004  node_t *n;
1005  boolean scale_it, filled;
1006  point sz;
1007 
1008  rec_bb(g, g);
1009  if ((GD_maxrank(g) > 0) && (GD_drawing(g)->ratio_kind)) {
1010  sz.x = GD_bb(g).UR.x - GD_bb(g).LL.x;
1011  sz.y = GD_bb(g).UR.y - GD_bb(g).LL.y; /* normalize */
1012  if (GD_flip(g)) {
1013  int t = sz.x;
1014  sz.x = sz.y;
1015  sz.y = t;
1016  }
1017  scale_it = TRUE;
1018  if (GD_drawing(g)->ratio_kind == R_AUTO)
1019  filled = idealsize(g, .5);
1020  else
1021  filled = (GD_drawing(g)->ratio_kind == R_FILL);
1022  if (filled) {
1023  /* fill is weird because both X and Y can stretch */
1024  if (GD_drawing(g)->size.x <= 0)
1025  scale_it = FALSE;
1026  else {
1027  xf = (double) GD_drawing(g)->size.x / (double) sz.x;
1028  yf = (double) GD_drawing(g)->size.y / (double) sz.y;
1029  if ((xf < 1.0) || (yf < 1.0)) {
1030  if (xf < yf) {
1031  yf = yf / xf;
1032  xf = 1.0;
1033  } else {
1034  xf = xf / yf;
1035  yf = 1.0;
1036  }
1037  }
1038  }
1039  } else if (GD_drawing(g)->ratio_kind == R_EXPAND) {
1040  if (GD_drawing(g)->size.x <= 0)
1041  scale_it = FALSE;
1042  else {
1043  xf = (double) GD_drawing(g)->size.x /
1044  (double) GD_bb(g).UR.x;
1045  yf = (double) GD_drawing(g)->size.y /
1046  (double) GD_bb(g).UR.y;
1047  if ((xf > 1.0) && (yf > 1.0)) {
1048  double scale = MIN(xf, yf);
1049  xf = yf = scale;
1050  } else
1051  scale_it = FALSE;
1052  }
1053  } else if (GD_drawing(g)->ratio_kind == R_VALUE) {
1054  desired = GD_drawing(g)->ratio;
1055  actual = ((double) sz.y) / ((double) sz.x);
1056  if (actual < desired) {
1057  yf = desired / actual;
1058  xf = 1.0;
1059  } else {
1060  xf = actual / desired;
1061  yf = 1.0;
1062  }
1063  } else
1064  scale_it = FALSE;
1065  if (scale_it) {
1066  if (GD_flip(g)) {
1067  double t = xf;
1068  xf = yf;
1069  yf = t;
1070  }
1071  for (n = GD_nlist(g); n; n = ND_next(n)) {
1072  ND_coord(n).x = ROUND(ND_coord(n).x * xf);
1073  ND_coord(n).y = ROUND(ND_coord(n).y * yf);
1074  }
1075  scale_bb(g, g, xf, yf);
1076  }
1077  }
1078 
1079  if (asp) adjustAspectRatio (g, asp);
1080 }
1081 
1082 static point resize_leaf(node_t * leaf, point lbound)
1083 {
1084  gv_nodesize(leaf, GD_flip(agraphof(leaf)));
1085  ND_coord(leaf).y = lbound.y;
1086  ND_coord(leaf).x = lbound.x + ND_lw(leaf);
1087  lbound.x = lbound.x + ND_lw(leaf) + ND_rw(leaf) + GD_nodesep(agraphof(leaf));
1088  return lbound;
1089 }
1090 
1091 static point place_leaf(node_t * leaf, point lbound, int order)
1092 {
1093  node_t *leader;
1094  graph_t *g = agraphof(leaf);
1095 
1096  leader = UF_find(leaf);
1097  if (leaf != leader)
1098  fast_nodeapp(leader, leaf);
1099  ND_order(leaf) = order;
1100  ND_rank(leaf) = ND_rank(leader);
1101  GD_rank(g)[ND_rank(leaf)].v[ND_order(leaf)] = leaf;
1102  return resize_leaf(leaf, lbound);
1103 }
1104 
1105 /* make space for the leaf nodes of each rank */
1106 static void make_leafslots(graph_t * g)
1107 {
1108  int i, j, r;
1109  node_t *v;
1110 
1111  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1112  j = 0;
1113  for (i = 0; i < GD_rank(g)[r].n; i++) {
1114  v = GD_rank(g)[r].v[i];
1115  ND_order(v) = j;
1116  if (ND_ranktype(v) == LEAFSET)
1117  j = j + ND_UF_size(v);
1118  else
1119  j++;
1120  }
1121  if (j <= GD_rank(g)[r].n)
1122  continue;
1123  GD_rank(g)[r].v = ALLOC(j + 1, GD_rank(g)[r].v, node_t *);
1124  for (i = GD_rank(g)[r].n - 1; i >= 0; i--) {
1125  v = GD_rank(g)[r].v[i];
1126  GD_rank(g)[r].v[ND_order(v)] = v;
1127  }
1128  GD_rank(g)[r].n = j;
1129  GD_rank(g)[r].v[j] = NULL;
1130  }
1131 }
1132 
1133 static void do_leaves(graph_t * g, node_t * leader)
1134 {
1135  int j;
1136  point lbound;
1137  node_t *n;
1138  edge_t *e;
1139 
1140  if (ND_UF_size(leader) <= 1)
1141  return;
1142  lbound.x = ND_coord(leader).x - ND_lw(leader);
1143  lbound.y = ND_coord(leader).y;
1144  lbound = resize_leaf(leader, lbound);
1145  if (ND_out(leader).size > 0) { /* in-edge leaves */
1146  n = aghead(ND_out(leader).list[0]);
1147  j = ND_order(leader) + 1;
1148  for (e = agfstin(g, n); e; e = agnxtin(g, e)) {
1149 #ifndef WITH_CGRAPH
1150  edge_t *e1 = e;
1151 #else
1152  edge_t *e1 = AGMKOUT(e);
1153 #endif
1154  if ((agtail(e1) != leader) && (UF_find(agtail(e1)) == leader)) {
1155  lbound = place_leaf(agtail(e1), lbound, j++);
1156  unmerge_oneway(e1);
1157  elist_append(e1, ND_in(aghead(e1)));
1158  }
1159  }
1160  } else { /* out edge leaves */
1161  n = agtail(ND_in(leader).list[0]);
1162  j = ND_order(leader) + 1;
1163  for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
1164  if ((aghead(e) != leader) && (UF_find(aghead(e)) == leader)) {
1165  lbound = place_leaf(aghead(e), lbound, j++);
1166  unmerge_oneway(e);
1167  elist_append(e, ND_out(agtail(e)));
1168  }
1169  }
1170  }
1171 }
1172 
1173 int ports_eq(edge_t * e, edge_t * f)
1174 {
1175  return ((ED_head_port(e).defined == ED_head_port(f).defined)
1176  && (((ED_head_port(e).p.x == ED_head_port(f).p.x) &&
1177  (ED_head_port(e).p.y == ED_head_port(f).p.y))
1178  || (ED_head_port(e).defined == FALSE))
1179  && (((ED_tail_port(e).p.x == ED_tail_port(f).p.x) &&
1180  (ED_tail_port(e).p.y == ED_tail_port(f).p.y))
1181  || (ED_tail_port(e).defined == FALSE))
1182  );
1183 }
1184 
1185 static void expand_leaves(graph_t * g)
1186 {
1187  int i, d;
1188  node_t *n;
1189  edge_t *e, *f;
1190 
1191  make_leafslots(g);
1192  for (n = GD_nlist(g); n; n = ND_next(n)) {
1193  if (ND_inleaf(n))
1194  do_leaves(g, ND_inleaf(n));
1195  if (ND_outleaf(n))
1196  do_leaves(g, ND_outleaf(n));
1197  if (ND_other(n).list)
1198  for (i = 0; (e = ND_other(n).list[i]); i++) {
1199  if ((d = ND_rank(aghead(e)) - ND_rank(aghead(e))) == 0)
1200  continue;
1201  f = ED_to_orig(e);
1202  if (ports_eq(e, f) == FALSE) {
1203  zapinlist(&(ND_other(n)), e);
1204  if (d == 1)
1205  fast_edge(e);
1206  /*else unitize(e); ### */
1207  i--;
1208  }
1209  }
1210  }
1211 }
1212 
1213 /* make_lrvn:
1214  * Add left and right slacknodes to a cluster which
1215  * are used in the LP to constrain nodes not in g but
1216  * sharing its ranks to be to the left or right of g
1217  * by a specified amount.
1218  * The slacknodes ln and rn give the x position of the
1219  * left and right side of the cluster's bounding box. In
1220  * particular, any cluster labels on the left or right side
1221  * are inside.
1222  * If a cluster has a label, and we have rankdir!=LR, we make
1223  * sure the cluster is wide enough for the label. Note that
1224  * if the label is wider than the cluster, the nodes in the
1225  * cluster may not be centered.
1226  */
1227 static void make_lrvn(graph_t * g)
1228 {
1229  node_t *ln, *rn;
1230 
1231  if (GD_ln(g))
1232  return;
1233  ln = virtual_node(agroot(g));
1234  ND_node_type(ln) = SLACKNODE;
1235  rn = virtual_node(agroot(g));
1236  ND_node_type(rn) = SLACKNODE;
1237 
1238  if (GD_label(g) && (g != agroot(g)) && !GD_flip(agroot(g))) {
1239  int w = MAX(GD_border(g)[BOTTOM_IX].x, GD_border(g)[TOP_IX].x);
1240  make_aux_edge(ln, rn, w, 0);
1241  }
1242 
1243  GD_ln(g) = ln;
1244  GD_rn(g) = rn;
1245 }
1246 
1247 /* contain_nodes:
1248  * make left and right bounding box virtual nodes ln and rn
1249  * constrain interior nodes
1250  */
1251 static void contain_nodes(graph_t * g)
1252 {
1253  int margin, r;
1254  node_t *ln, *rn, *v;
1255 
1256  margin = late_int (g, G_margin, CL_OFFSET, 0);
1257  make_lrvn(g);
1258  ln = GD_ln(g);
1259  rn = GD_rn(g);
1260  for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1261  if (GD_rank(g)[r].n == 0)
1262  continue;
1263  v = GD_rank(g)[r].v[0];
1264  if (v == NULL) {
1265  agerr(AGERR, "contain_nodes clust %s rank %d missing node\n",
1266  agnameof(g), r);
1267  continue;
1268  }
1269  make_aux_edge(ln, v,
1270  ND_lw(v) + margin + GD_border(g)[LEFT_IX].x, 0);
1271  v = GD_rank(g)[r].v[GD_rank(g)[r].n - 1];
1272  make_aux_edge(v, rn,
1273  ND_rw(v) + margin + GD_border(g)[RIGHT_IX].x, 0);
1274  }
1275 }
1276 
1277 /* idealsize:
1278  * set g->drawing->size to a reasonable default.
1279  * returns a boolean to indicate if drawing is to
1280  * be scaled and filled */
1281 static boolean idealsize(graph_t * g, double minallowed)
1282 {
1283  double xf, yf, f, R;
1284  pointf b, relpage, margin;
1285 
1286  /* try for one page */
1287  relpage = GD_drawing(g)->page;
1288  if (relpage.x < 0.001 || relpage.y < 0.001)
1289  return FALSE; /* no page was specified */
1290  margin = GD_drawing(g)->margin;
1291  relpage = sub_pointf(relpage, margin);
1292  relpage = sub_pointf(relpage, margin);
1293  b.x = GD_bb(g).UR.x;
1294  b.y = GD_bb(g).UR.y;
1295  xf = relpage.x / b.x;
1296  yf = relpage.y / b.y;
1297  if ((xf >= 1.0) && (yf >= 1.0))
1298  return FALSE; /* fits on one page */
1299 
1300  f = MIN(xf, yf);
1301  xf = yf = MAX(f, minallowed);
1302 
1303  R = ceil((xf * b.x) / relpage.x);
1304  xf = ((R * relpage.x) / b.x);
1305  R = ceil((yf * b.y) / relpage.y);
1306  yf = ((R * relpage.y) / b.y);
1307  GD_drawing(g)->size.x = b.x * xf;
1308  GD_drawing(g)->size.y = b.y * yf;
1309  return TRUE;
1310 }