distributor: remove deprecated code
[dpdk.git] / lib / librte_distributor / rte_distributor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <sys/queue.h>
7 #include <string.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_function_versioning.h>
12 #include <rte_memzone.h>
13 #include <rte_errno.h>
14 #include <rte_string_fns.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_pause.h>
17 #include <rte_tailq.h>
18
19 #include "rte_distributor.h"
20 #include "rte_distributor_v20.h"
21 #include "distributor_private.h"
22
23 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
24
25 static struct rte_tailq_elem rte_dist_burst_tailq = {
26         .name = "RTE_DIST_BURST",
27 };
28 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
29
30 /**** APIs called by workers ****/
31
32 /**** Burst Packet APIs called by workers ****/
33
34 void
35 rte_distributor_request_pkt(struct rte_distributor *d,
36                 unsigned int worker_id, struct rte_mbuf **oldpkt,
37                 unsigned int count)
38 {
39         struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
40         unsigned int i;
41
42         volatile int64_t *retptr64;
43
44         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
45                 rte_distributor_request_pkt_v20(d->d_v20,
46                         worker_id, oldpkt[0]);
47                 return;
48         }
49
50         retptr64 = &(buf->retptr64[0]);
51         /* Spin while handshake bits are set (scheduler clears it).
52          * Sync with worker on GET_BUF flag.
53          */
54         while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
55                         & RTE_DISTRIB_GET_BUF)) {
56                 rte_pause();
57                 uint64_t t = rte_rdtsc()+100;
58
59                 while (rte_rdtsc() < t)
60                         rte_pause();
61         }
62
63         /*
64          * OK, if we've got here, then the scheduler has just cleared the
65          * handshake bits. Populate the retptrs with returning packets.
66          */
67
68         for (i = count; i < RTE_DIST_BURST_SIZE; i++)
69                 buf->retptr64[i] = 0;
70
71         /* Set Return bit for each packet returned */
72         for (i = count; i-- > 0; )
73                 buf->retptr64[i] =
74                         (((int64_t)(uintptr_t)(oldpkt[i])) <<
75                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
76
77         /*
78          * Finally, set the GET_BUF  to signal to distributor that cache
79          * line is ready for processing
80          * Sync with distributor to release retptrs
81          */
82         __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
83                         __ATOMIC_RELEASE);
84 }
85
86 int
87 rte_distributor_poll_pkt(struct rte_distributor *d,
88                 unsigned int worker_id, struct rte_mbuf **pkts)
89 {
90         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
91         uint64_t ret;
92         int count = 0;
93         unsigned int i;
94
95         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
96                 pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
97                 return (pkts[0]) ? 1 : 0;
98         }
99
100         /* If bit is set, return
101          * Sync with distributor to acquire bufptrs
102          */
103         if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
104                 & RTE_DISTRIB_GET_BUF)
105                 return -1;
106
107         /* since bufptr64 is signed, this should be an arithmetic shift */
108         for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
109                 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
110                         ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
111                         pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
112                 }
113         }
114
115         /*
116          * so now we've got the contents of the cacheline into an  array of
117          * mbuf pointers, so toggle the bit so scheduler can start working
118          * on the next cacheline while we're working.
119          * Sync with distributor on GET_BUF flag. Release bufptrs.
120          */
121         __atomic_store_n(&(buf->bufptr64[0]),
122                 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
123
124         return count;
125 }
126
127 int
128 rte_distributor_get_pkt(struct rte_distributor *d,
129                 unsigned int worker_id, struct rte_mbuf **pkts,
130                 struct rte_mbuf **oldpkt, unsigned int return_count)
131 {
132         int count;
133
134         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
135                 if (return_count <= 1) {
136                         pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
137                                 worker_id, oldpkt[0]);
138                         return (pkts[0]) ? 1 : 0;
139                 } else
140                         return -EINVAL;
141         }
142
143         rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
144
145         count = rte_distributor_poll_pkt(d, worker_id, pkts);
146         while (count == -1) {
147                 uint64_t t = rte_rdtsc() + 100;
148
149                 while (rte_rdtsc() < t)
150                         rte_pause();
151
152                 count = rte_distributor_poll_pkt(d, worker_id, pkts);
153         }
154         return count;
155 }
156
157 int
158 rte_distributor_return_pkt(struct rte_distributor *d,
159                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
160 {
161         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
162         unsigned int i;
163
164         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
165                 if (num == 1)
166                         return rte_distributor_return_pkt_v20(d->d_v20,
167                                 worker_id, oldpkt[0]);
168                 else
169                         return -EINVAL;
170         }
171
172         /* Sync with distributor to acquire retptrs */
173         __atomic_thread_fence(__ATOMIC_ACQUIRE);
174         for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
175                 /* Switch off the return bit first */
176                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
177
178         for (i = num; i-- > 0; )
179                 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
180                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
181
182         /* set the GET_BUF but even if we got no returns.
183          * Sync with distributor on GET_BUF flag. Release retptrs.
184          */
185         __atomic_store_n(&(buf->retptr64[0]),
186                 buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
187
188         return 0;
189 }
190
191 /**** APIs called on distributor core ***/
192
193 /* stores a packet returned from a worker inside the returns array */
194 static inline void
195 store_return(uintptr_t oldbuf, struct rte_distributor *d,
196                 unsigned int *ret_start, unsigned int *ret_count)
197 {
198         if (!oldbuf)
199                 return;
200         /* store returns in a circular buffer */
201         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
202                         = (void *)oldbuf;
203         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
204         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
205 }
206
207 /*
208  * Match then flow_ids (tags) of the incoming packets to the flow_ids
209  * of the inflight packets (both inflight on the workers and in each worker
210  * backlog). This will then allow us to pin those packets to the relevant
211  * workers to give us our atomic flow pinning.
212  */
213 void
214 find_match_scalar(struct rte_distributor *d,
215                         uint16_t *data_ptr,
216                         uint16_t *output_ptr)
217 {
218         struct rte_distributor_backlog *bl;
219         uint16_t i, j, w;
220
221         /*
222          * Function overview:
223          * 1. Loop through all worker ID's
224          * 2. Compare the current inflights to the incoming tags
225          * 3. Compare the current backlog to the incoming tags
226          * 4. Add any matches to the output
227          */
228
229         for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
230                 output_ptr[j] = 0;
231
232         for (i = 0; i < d->num_workers; i++) {
233                 bl = &d->backlog[i];
234
235                 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
236                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
237                                 if (d->in_flight_tags[i][j] == data_ptr[w]) {
238                                         output_ptr[j] = i+1;
239                                         break;
240                                 }
241                 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
242                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
243                                 if (bl->tags[j] == data_ptr[w]) {
244                                         output_ptr[j] = i+1;
245                                         break;
246                                 }
247         }
248
249         /*
250          * At this stage, the output contains 8 16-bit values, with
251          * each non-zero value containing the worker ID on which the
252          * corresponding flow is pinned to.
253          */
254 }
255
256
257 /*
258  * When the handshake bits indicate that there are packets coming
259  * back from the worker, this function is called to copy and store
260  * the valid returned pointers (store_return).
261  */
262 static unsigned int
263 handle_returns(struct rte_distributor *d, unsigned int wkr)
264 {
265         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
266         uintptr_t oldbuf;
267         unsigned int ret_start = d->returns.start,
268                         ret_count = d->returns.count;
269         unsigned int count = 0;
270         unsigned int i;
271
272         /* Sync on GET_BUF flag. Acquire retptrs. */
273         if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
274                 & RTE_DISTRIB_GET_BUF) {
275                 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
276                         if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
277                                 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
278                                         RTE_DISTRIB_FLAG_BITS));
279                                 /* store returns in a circular buffer */
280                                 store_return(oldbuf, d, &ret_start, &ret_count);
281                                 count++;
282                                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
283                         }
284                 }
285                 d->returns.start = ret_start;
286                 d->returns.count = ret_count;
287                 /* Clear for the worker to populate with more returns.
288                  * Sync with distributor on GET_BUF flag. Release retptrs.
289                  */
290                 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
291         }
292         return count;
293 }
294
295 /*
296  * This function releases a burst (cache line) to a worker.
297  * It is called from the process function when a cacheline is
298  * full to make room for more packets for that worker, or when
299  * all packets have been assigned to bursts and need to be flushed
300  * to the workers.
301  * It also needs to wait for any outstanding packets from the worker
302  * before sending out new packets.
303  */
304 static unsigned int
305 release(struct rte_distributor *d, unsigned int wkr)
306 {
307         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
308         unsigned int i;
309
310         /* Sync with worker on GET_BUF flag */
311         while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
312                 & RTE_DISTRIB_GET_BUF))
313                 rte_pause();
314
315         handle_returns(d, wkr);
316
317         buf->count = 0;
318
319         for (i = 0; i < d->backlog[wkr].count; i++) {
320                 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
321                                 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
322                 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
323         }
324         buf->count = i;
325         for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
326                 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
327                 d->in_flight_tags[wkr][i] = 0;
328         }
329
330         d->backlog[wkr].count = 0;
331
332         /* Clear the GET bit.
333          * Sync with worker on GET_BUF flag. Release bufptrs.
334          */
335         __atomic_store_n(&(buf->bufptr64[0]),
336                 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
337         return  buf->count;
338
339 }
340
341
342 /* process a set of packets to distribute them to workers */
343 int
344 rte_distributor_process(struct rte_distributor *d,
345                 struct rte_mbuf **mbufs, unsigned int num_mbufs)
346 {
347         unsigned int next_idx = 0;
348         static unsigned int wkr;
349         struct rte_mbuf *next_mb = NULL;
350         int64_t next_value = 0;
351         uint16_t new_tag = 0;
352         uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
353         unsigned int i, j, w, wid;
354
355         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
356                 /* Call the old API */
357                 return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
358         }
359
360         if (unlikely(num_mbufs == 0)) {
361                 /* Flush out all non-full cache-lines to workers. */
362                 for (wid = 0 ; wid < d->num_workers; wid++) {
363                         /* Sync with worker on GET_BUF flag. */
364                         if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
365                                 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
366                                 release(d, wid);
367                                 handle_returns(d, wid);
368                         }
369                 }
370                 return 0;
371         }
372
373         while (next_idx < num_mbufs) {
374                 uint16_t matches[RTE_DIST_BURST_SIZE];
375                 unsigned int pkts;
376
377                 /* Sync with worker on GET_BUF flag. */
378                 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
379                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
380                         d->bufs[wkr].count = 0;
381
382                 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
383                         pkts = num_mbufs - next_idx;
384                 else
385                         pkts = RTE_DIST_BURST_SIZE;
386
387                 for (i = 0; i < pkts; i++) {
388                         if (mbufs[next_idx + i]) {
389                                 /* flows have to be non-zero */
390                                 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
391                         } else
392                                 flows[i] = 0;
393                 }
394                 for (; i < RTE_DIST_BURST_SIZE; i++)
395                         flows[i] = 0;
396
397                 switch (d->dist_match_fn) {
398                 case RTE_DIST_MATCH_VECTOR:
399                         find_match_vec(d, &flows[0], &matches[0]);
400                         break;
401                 default:
402                         find_match_scalar(d, &flows[0], &matches[0]);
403                 }
404
405                 /*
406                  * Matches array now contain the intended worker ID (+1) of
407                  * the incoming packets. Any zeroes need to be assigned
408                  * workers.
409                  */
410
411                 for (j = 0; j < pkts; j++) {
412
413                         next_mb = mbufs[next_idx++];
414                         next_value = (((int64_t)(uintptr_t)next_mb) <<
415                                         RTE_DISTRIB_FLAG_BITS);
416                         /*
417                          * User is advocated to set tag value for each
418                          * mbuf before calling rte_distributor_process.
419                          * User defined tags are used to identify flows,
420                          * or sessions.
421                          */
422                         /* flows MUST be non-zero */
423                         new_tag = (uint16_t)(next_mb->hash.usr) | 1;
424
425                         /*
426                          * Uncommenting the next line will cause the find_match
427                          * function to be optimized out, making this function
428                          * do parallel (non-atomic) distribution
429                          */
430                         /* matches[j] = 0; */
431
432                         if (matches[j]) {
433                                 struct rte_distributor_backlog *bl =
434                                                 &d->backlog[matches[j]-1];
435                                 if (unlikely(bl->count ==
436                                                 RTE_DIST_BURST_SIZE)) {
437                                         release(d, matches[j]-1);
438                                 }
439
440                                 /* Add to worker that already has flow */
441                                 unsigned int idx = bl->count++;
442
443                                 bl->tags[idx] = new_tag;
444                                 bl->pkts[idx] = next_value;
445
446                         } else {
447                                 struct rte_distributor_backlog *bl =
448                                                 &d->backlog[wkr];
449                                 if (unlikely(bl->count ==
450                                                 RTE_DIST_BURST_SIZE)) {
451                                         release(d, wkr);
452                                 }
453
454                                 /* Add to current worker worker */
455                                 unsigned int idx = bl->count++;
456
457                                 bl->tags[idx] = new_tag;
458                                 bl->pkts[idx] = next_value;
459                                 /*
460                                  * Now that we've just added an unpinned flow
461                                  * to a worker, we need to ensure that all
462                                  * other packets with that same flow will go
463                                  * to the same worker in this burst.
464                                  */
465                                 for (w = j; w < pkts; w++)
466                                         if (flows[w] == new_tag)
467                                                 matches[w] = wkr+1;
468                         }
469                 }
470                 wkr++;
471                 if (wkr >= d->num_workers)
472                         wkr = 0;
473         }
474
475         /* Flush out all non-full cache-lines to workers. */
476         for (wid = 0 ; wid < d->num_workers; wid++)
477                 /* Sync with worker on GET_BUF flag. */
478                 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
479                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
480                         release(d, wid);
481
482         return num_mbufs;
483 }
484
485 /* return to the caller, packets returned from workers */
486 int
487 rte_distributor_returned_pkts(struct rte_distributor *d,
488                 struct rte_mbuf **mbufs, unsigned int max_mbufs)
489 {
490         struct rte_distributor_returned_pkts *returns = &d->returns;
491         unsigned int retval = (max_mbufs < returns->count) ?
492                         max_mbufs : returns->count;
493         unsigned int i;
494
495         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
496                 /* Call the old API */
497                 return rte_distributor_returned_pkts_v20(d->d_v20,
498                                 mbufs, max_mbufs);
499         }
500
501         for (i = 0; i < retval; i++) {
502                 unsigned int idx = (returns->start + i) &
503                                 RTE_DISTRIB_RETURNS_MASK;
504
505                 mbufs[i] = returns->mbufs[idx];
506         }
507         returns->start += i;
508         returns->count -= i;
509
510         return retval;
511 }
512
513 /*
514  * Return the number of packets in-flight in a distributor, i.e. packets
515  * being worked on or queued up in a backlog.
516  */
517 static inline unsigned int
518 total_outstanding(const struct rte_distributor *d)
519 {
520         unsigned int wkr, total_outstanding = 0;
521
522         for (wkr = 0; wkr < d->num_workers; wkr++)
523                 total_outstanding += d->backlog[wkr].count;
524
525         return total_outstanding;
526 }
527
528 /*
529  * Flush the distributor, so that there are no outstanding packets in flight or
530  * queued up.
531  */
532 int
533 rte_distributor_flush(struct rte_distributor *d)
534 {
535         unsigned int flushed;
536         unsigned int wkr;
537
538         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
539                 /* Call the old API */
540                 return rte_distributor_flush_v20(d->d_v20);
541         }
542
543         flushed = total_outstanding(d);
544
545         while (total_outstanding(d) > 0)
546                 rte_distributor_process(d, NULL, 0);
547
548         /* wait 10ms to allow all worker drain the pkts */
549         rte_delay_us(10000);
550
551         /*
552          * Send empty burst to all workers to allow them to exit
553          * gracefully, should they need to.
554          */
555         rte_distributor_process(d, NULL, 0);
556
557         for (wkr = 0; wkr < d->num_workers; wkr++)
558                 handle_returns(d, wkr);
559
560         return flushed;
561 }
562
563 /* clears the internal returns array in the distributor */
564 void
565 rte_distributor_clear_returns(struct rte_distributor *d)
566 {
567         unsigned int wkr;
568
569         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
570                 /* Call the old API */
571                 rte_distributor_clear_returns_v20(d->d_v20);
572                 return;
573         }
574
575         /* throw away returns, so workers can exit */
576         for (wkr = 0; wkr < d->num_workers; wkr++)
577                 /* Sync with worker. Release retptrs. */
578                 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
579                                 __ATOMIC_RELEASE);
580 }
581
582 /* creates a distributor instance */
583 struct rte_distributor *
584 rte_distributor_create(const char *name,
585                 unsigned int socket_id,
586                 unsigned int num_workers,
587                 unsigned int alg_type)
588 {
589         struct rte_distributor *d;
590         struct rte_dist_burst_list *dist_burst_list;
591         char mz_name[RTE_MEMZONE_NAMESIZE];
592         const struct rte_memzone *mz;
593         unsigned int i;
594
595         /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
596
597         /* compilation-time checks */
598         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
599         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
600
601         if (name == NULL || num_workers >=
602                 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
603                 rte_errno = EINVAL;
604                 return NULL;
605         }
606
607         if (alg_type == RTE_DIST_ALG_SINGLE) {
608                 d = malloc(sizeof(struct rte_distributor));
609                 if (d == NULL) {
610                         rte_errno = ENOMEM;
611                         return NULL;
612                 }
613                 d->d_v20 = rte_distributor_create_v20(name,
614                                 socket_id, num_workers);
615                 if (d->d_v20 == NULL) {
616                         free(d);
617                         /* rte_errno will have been set */
618                         return NULL;
619                 }
620                 d->alg_type = alg_type;
621                 return d;
622         }
623
624         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
625         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
626         if (mz == NULL) {
627                 rte_errno = ENOMEM;
628                 return NULL;
629         }
630
631         d = mz->addr;
632         strlcpy(d->name, name, sizeof(d->name));
633         d->num_workers = num_workers;
634         d->alg_type = alg_type;
635
636         d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
637 #if defined(RTE_ARCH_X86)
638         d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
639 #endif
640
641         /*
642          * Set up the backlog tags so they're pointing at the second cache
643          * line for performance during flow matching
644          */
645         for (i = 0 ; i < num_workers ; i++)
646                 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
647
648         dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
649                                           rte_dist_burst_list);
650
651
652         rte_mcfg_tailq_write_lock();
653         TAILQ_INSERT_TAIL(dist_burst_list, d, next);
654         rte_mcfg_tailq_write_unlock();
655
656         return d;
657 }