Skip to content

autora.theorist.darts.operations

Cosine

Bases: Module

A pytorch module implementing the cosine function.

\[ x = \cos(x) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
class Cosine(nn.Module):
    r"""
    A pytorch module implementing the cosine function.

    $$
    x = \cos(x)
    $$
    """

    def __init__(self):
        """
        Initializes the cosine function.
        """
        super(Cosine, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the cosine function.

        Arguments:
            x: input tensor
        """
        return torch.cos(x)

__init__()

Initializes the cosine function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
321
322
323
324
325
def __init__(self):
    """
    Initializes the cosine function.
    """
    super(Cosine, self).__init__()

forward(x)

Forward pass of the cosine function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
327
328
329
330
331
332
333
334
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the cosine function.

    Arguments:
        x: input tensor
    """
    return torch.cos(x)

Exponential

Bases: Module

A pytorch module implementing the exponential function.

\[ x = e^x \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
class Exponential(nn.Module):
    """
    A pytorch module implementing the exponential function.

    $$
    x = e^x
    $$
    """

    def __init__(self):
        """
        Initializes the exponential function.
        """
        super(Exponential, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the exponential function.

        Arguments:
            x: input tensor
        """
        return torch.exp(x)

__init__()

Initializes the exponential function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
296
297
298
299
300
def __init__(self):
    """
    Initializes the exponential function.
    """
    super(Exponential, self).__init__()

forward(x)

Forward pass of the exponential function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
302
303
304
305
306
307
308
309
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the exponential function.

    Arguments:
        x: input tensor
    """
    return torch.exp(x)

Identity

Bases: Module

A pytorch module implementing the identity function.

\[ x = x \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
class Identity(nn.Module):
    """
    A pytorch module implementing the identity function.

    $$
    x = x
    $$
    """

    def __init__(self):
        """
        Initializes the identify function.
        """
        super(Identity, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the identity function.

        Arguments:
            x: input tensor
        """
        return x

__init__()

Initializes the identify function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
246
247
248
249
250
def __init__(self):
    """
    Initializes the identify function.
    """
    super(Identity, self).__init__()

forward(x)

Forward pass of the identity function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
252
253
254
255
256
257
258
259
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the identity function.

    Arguments:
        x: input tensor
    """
    return x

MultInverse

Bases: Module

A pytorch module implementing the multiplicative inverse.

\[ x = \frac{1}{x} \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
class MultInverse(nn.Module):
    r"""
    A pytorch module implementing the multiplicative inverse.

    $$
    x = \frac{1}{x}
    $$
    """

    def __init__(self):
        """
        Initializes the multiplicative inverse.
        """
        super(MultInverse, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the multiplicative inverse.

        Arguments:
            x: input tensor
        """
        return torch.pow(x, -1)

__init__()

Initializes the multiplicative inverse.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
430
431
432
433
434
def __init__(self):
    """
    Initializes the multiplicative inverse.
    """
    super(MultInverse, self).__init__()

forward(x)

Forward pass of the multiplicative inverse.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
436
437
438
439
440
441
442
443
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the multiplicative inverse.

    Arguments:
        x: input tensor
    """
    return torch.pow(x, -1)

NatLogarithm

Bases: Module

A pytorch module implementing the natural logarithm function.

\[ x = \ln(x) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
class NatLogarithm(nn.Module):
    r"""
    A pytorch module implementing the natural logarithm function.

    $$
    x = \ln(x)
    $$

    """

    def __init__(self):
        """
        Initializes the natural logarithm function.
        """
        super(NatLogarithm, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the natural logarithm function.

        Arguments:
            x: input tensor
        """
        # make sure x is in domain of natural logarithm
        mask = x.clone()
        mask[(x <= 0.0).detach()] = 0
        mask[(x > 0.0).detach()] = 1

        epsilon = 1e-10
        result = torch.log(nn.functional.relu(x) + epsilon) * mask

        return result

__init__()

Initializes the natural logarithm function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
397
398
399
400
401
def __init__(self):
    """
    Initializes the natural logarithm function.
    """
    super(NatLogarithm, self).__init__()

forward(x)

Forward pass of the natural logarithm function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the natural logarithm function.

    Arguments:
        x: input tensor
    """
    # make sure x is in domain of natural logarithm
    mask = x.clone()
    mask[(x <= 0.0).detach()] = 0
    mask[(x > 0.0).detach()] = 1

    epsilon = 1e-10
    result = torch.log(nn.functional.relu(x) + epsilon) * mask

    return result

NegIdentity

Bases: Module

A pytorch module implementing the inverse of an identity function.

\[ x = -x \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
class NegIdentity(nn.Module):
    """
    A pytorch module implementing the inverse of an identity function.

    $$
    x = -x
    $$
    """

    def __init__(self):
        """
        Initializes the inverse of an identity function.
        """
        super(NegIdentity, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the inverse of an identity function.

        Arguments:
            x: input tensor
        """
        return -x

__init__()

Initializes the inverse of an identity function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
271
272
273
274
275
def __init__(self):
    """
    Initializes the inverse of an identity function.
    """
    super(NegIdentity, self).__init__()

forward(x)

Forward pass of the inverse of an identity function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
277
278
279
280
281
282
283
284
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the inverse of an identity function.

    Arguments:
        x: input tensor
    """
    return -x

Sine

Bases: Module

A pytorch module implementing the sine function.

\[ x = \sin(x) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
class Sine(nn.Module):
    r"""
    A pytorch module implementing the sine function.

    $$
    x = \sin(x)
    $$
    """

    def __init__(self):
        """
        Initializes the sine function.
        """
        super(Sine, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the sine function.

        Arguments:
            x: input tensor
        """
        return torch.sin(x)

__init__()

Initializes the sine function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
346
347
348
349
350
def __init__(self):
    """
    Initializes the sine function.
    """
    super(Sine, self).__init__()

forward(x)

Forward pass of the sine function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
352
353
354
355
356
357
358
359
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the sine function.

    Arguments:
        x: input tensor
    """
    return torch.sin(x)

Softminus

Bases: Module

A pytorch module implementing the softminus function:

\[ \operatorname{Softminus}(x) = x - \operatorname{log} \left( 1 + e^{β x} \right) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
class Softminus(nn.Module):
    """
    A pytorch module implementing the softminus function:

    $$
    \\operatorname{Softminus}(x) = x - \\operatorname{log} \\left( 1 + e^{β x} \\right)
    $$
    """

    # This docstring is a normal string, so backslashes need to be escaped

    def __init__(self):
        """
        Initializes the softminus function.
        """
        super(Softminus, self).__init__()
        # self.beta = nn.Linear(1, 1, bias=False)
        self.beta = nn.Parameter(torch.ones(1))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the softminus function.

        Arguments:
            x: input tensor
        """
        y = x - torch.log(1 + torch.exp(self.beta * x)) / self.beta
        return y

__init__()

Initializes the softminus function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
519
520
521
522
523
524
525
def __init__(self):
    """
    Initializes the softminus function.
    """
    super(Softminus, self).__init__()
    # self.beta = nn.Linear(1, 1, bias=False)
    self.beta = nn.Parameter(torch.ones(1))

forward(x)

Forward pass of the softminus function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
527
528
529
530
531
532
533
534
535
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the softminus function.

    Arguments:
        x: input tensor
    """
    y = x - torch.log(1 + torch.exp(self.beta * x)) / self.beta
    return y

Softplus

Bases: Module

A pytorch module implementing the softplus function:

\[ \operatorname{Softplus}(x) = \frac{1}{β} \operatorname{log} \left( 1 + e^{β x} \right) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
class Softplus(nn.Module):
    r"""
    A pytorch module implementing the softplus function:

    $$
    \operatorname{Softplus}(x) = \frac{1}{β} \operatorname{log} \left( 1 + e^{β x} \right)
    $$
    """

    # This docstring is a raw-string (it starts `r"""` rather than `"""`)
    # so backslashes need not be escaped

    def __init__(self):
        """
        Initializes the softplus function.
        """
        super(Softplus, self).__init__()
        # self.beta = nn.Linear(1, 1, bias=False)
        self.beta = nn.Parameter(torch.ones(1))
        # elf.softplus = nn.Softplus(beta=self.beta)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the softplus function.

        Arguments:
            x: input tensor
        """
        y = torch.log(1 + torch.exp(self.beta * x)) / self.beta
        # y = self.softplus(x)
        return y

__init__()

Initializes the softplus function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
487
488
489
490
491
492
493
def __init__(self):
    """
    Initializes the softplus function.
    """
    super(Softplus, self).__init__()
    # self.beta = nn.Linear(1, 1, bias=False)
    self.beta = nn.Parameter(torch.ones(1))

forward(x)

Forward pass of the softplus function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
496
497
498
499
500
501
502
503
504
505
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the softplus function.

    Arguments:
        x: input tensor
    """
    y = torch.log(1 + torch.exp(self.beta * x)) / self.beta
    # y = self.softplus(x)
    return y

Tangens_Hyperbolicus

Bases: Module

A pytorch module implementing the tangens hyperbolicus function.

\[ x = \tanh(x) \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
class Tangens_Hyperbolicus(nn.Module):
    r"""
    A pytorch module implementing the tangens hyperbolicus function.

    $$
    x = \tanh(x)
    $$
    """

    def __init__(self):
        """
        Initializes the tangens hyperbolicus function.
        """
        super(Tangens_Hyperbolicus, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the tangens hyperbolicus function.

        Arguments:
            x: input tensor
        """
        return torch.tanh(x)

__init__()

Initializes the tangens hyperbolicus function.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
371
372
373
374
375
def __init__(self):
    """
    Initializes the tangens hyperbolicus function.
    """
    super(Tangens_Hyperbolicus, self).__init__()

forward(x)

Forward pass of the tangens hyperbolicus function.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
377
378
379
380
381
382
383
384
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the tangens hyperbolicus function.

    Arguments:
        x: input tensor
    """
    return torch.tanh(x)

Zero

Bases: Module

A pytorch module implementing the zero operation (i.e., a null operation). A zero operation presumes that there is no relationship between the input and output.

\[ x = 0 \]
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
class Zero(nn.Module):
    """
    A pytorch module implementing the zero operation (i.e., a null operation). A zero operation
    presumes that there is no relationship between the input and output.

    $$
    x = 0
    $$
    """

    def __init__(self, stride):
        """
        Initializes the zero operation.
        """
        super(Zero, self).__init__()
        self.stride = stride

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass of the zero operation.

        Arguments:
            x: input tensor
        """
        if self.stride == 1:
            return x.mul(0.0)
        return x[:, :, :: self.stride, :: self.stride].mul(0.0)

__init__(stride)

Initializes the zero operation.

Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
456
457
458
459
460
461
def __init__(self, stride):
    """
    Initializes the zero operation.
    """
    super(Zero, self).__init__()
    self.stride = stride

forward(x)

Forward pass of the zero operation.

Parameters:

Name Type Description Default
x Tensor

input tensor

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
463
464
465
466
467
468
469
470
471
472
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass of the zero operation.

    Arguments:
        x: input tensor
    """
    if self.stride == 1:
        return x.mul(0.0)
    return x[:, :, :: self.stride, :: self.stride].mul(0.0)

get_operation_label(op_name, params_org, decimals=4, input_var='x', output_format='console')

Returns a complete string describing a DARTS operation.

Parameters:

Name Type Description Default
op_name str

name of the operation

required
params_org List

original parameters of the operation

required
decimals int

number of decimals to be used for converting the parameters into string format

4
input_var str

name of the input variable

'x'
output_format Literal['latex', 'console']

format of the output string (either "latex" or "console")

'console'

Examples:

>>> get_operation_label("classifier", [1], decimals=2)
'1.00 * x'
>>> import numpy as np
>>> print(get_operation_label("classifier_concat", np.array([1, 2, 3]),
...     decimals=2, output_format="latex"))
x \circ \left(1.00\right) + \left(2.00\right) + \left(3.00\right)
>>> get_operation_label("classifier_concat", np.array([1, 2, 3]),
...     decimals=2, output_format="console")
'x .* (1.00) .+ (2.00) .+ (3.00)'
>>> get_operation_label("linear_exp", [1,2], decimals=2)
'exp(1.00 * x + 2.00)'
>>> get_operation_label("none", [])
''
>>> get_operation_label("reciprocal", [1], decimals=0)
'1 / x'
>>> get_operation_label("linear_reciprocal", [1, 2], decimals=0)
'1 / (1 * x + 2)'
>>> get_operation_label("linear_relu", [1], decimals=0)
'ReLU(1 * x)'
>>> print(get_operation_label("linear_relu", [1], decimals=0, output_format="latex"))
\operatorname{ReLU}\left(1x\right)
>>> get_operation_label("linear", [1, 2], decimals=0)
'1 * x + 2'
>>> get_operation_label("linear", [1, 2], decimals=0, output_format="latex")
'1 x + 2'
>>> get_operation_label("linrelu", [1], decimals=0)  # Mistyped operation name
Traceback (most recent call last):
...
NotImplementedError: operation 'linrelu' is not defined for output_format 'console'
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def get_operation_label(
    op_name: str,
    params_org: typing.List,
    decimals: int = 4,
    input_var: str = "x",
    output_format: typing.Literal["latex", "console"] = "console",
) -> str:
    r"""
    Returns a complete string describing a DARTS operation.

    Arguments:
        op_name: name of the operation
        params_org: original parameters of the operation
        decimals: number of decimals to be used for converting the parameters into string format
        input_var: name of the input variable
        output_format: format of the output string (either "latex" or "console")

    Examples:
        >>> get_operation_label("classifier", [1], decimals=2)
        '1.00 * x'
        >>> import numpy as np
        >>> print(get_operation_label("classifier_concat", np.array([1, 2, 3]),
        ...     decimals=2, output_format="latex"))
        x \circ \left(1.00\right) + \left(2.00\right) + \left(3.00\right)
        >>> get_operation_label("classifier_concat", np.array([1, 2, 3]),
        ...     decimals=2, output_format="console")
        'x .* (1.00) .+ (2.00) .+ (3.00)'
        >>> get_operation_label("linear_exp", [1,2], decimals=2)
        'exp(1.00 * x + 2.00)'
        >>> get_operation_label("none", [])
        ''
        >>> get_operation_label("reciprocal", [1], decimals=0)
        '1 / x'
        >>> get_operation_label("linear_reciprocal", [1, 2], decimals=0)
        '1 / (1 * x + 2)'
        >>> get_operation_label("linear_relu", [1], decimals=0)
        'ReLU(1 * x)'
        >>> print(get_operation_label("linear_relu", [1], decimals=0, output_format="latex"))
        \operatorname{ReLU}\left(1x\right)
        >>> get_operation_label("linear", [1, 2], decimals=0)
        '1 * x + 2'
        >>> get_operation_label("linear", [1, 2], decimals=0, output_format="latex")
        '1 x + 2'
        >>> get_operation_label("linrelu", [1], decimals=0)  # Mistyped operation name
        Traceback (most recent call last):
        ...
        NotImplementedError: operation 'linrelu' is not defined for output_format 'console'
    """
    if output_format != "latex" and output_format != "console":
        raise ValueError("output_format must be either 'latex' or 'console'")

    params = params_org.copy()

    format_string = "{:." + "{:.0f}".format(decimals) + "f}"

    classifier_str = ""
    if op_name == "classifier":
        value = params[0]
        classifier_str = f"{format_string.format(value)} * {input_var}"
        return classifier_str

    if op_name == "classifier_concat":
        if output_format == "latex":
            classifier_str = input_var + " \\circ \\left("
        else:
            classifier_str = input_var + " .* ("
        for param_idx, param in enumerate(params):
            if param_idx > 0:
                if output_format == "latex":
                    classifier_str += " + \\left("
                else:
                    classifier_str += " .+ ("

            if isiterable(param.tolist()):
                param_formatted = list()
                for value in param.tolist():
                    param_formatted.append(format_string.format(value))

                for value_idx, value in enumerate(param_formatted):
                    if value_idx < len(param) - 1:
                        classifier_str += value + " + "
                    else:
                        if output_format == "latex":
                            classifier_str += value + "\\right)"
                        else:
                            classifier_str += value + ")"

            else:
                value = format_string.format(param)

                if output_format == "latex":
                    classifier_str += value + "\\right)"
                else:
                    classifier_str += value + ")"

        return classifier_str

    num_params = len(params)

    c = [str(format_string.format(p)) for p in params_org]
    c.extend(["", "", ""])

    if num_params == 1:  # without bias
        if output_format == "console":
            labels = {
                "none": "",
                "add": f"+ {input_var}",
                "subtract": f"- {input_var}",
                "mult": f"{c[0]} * {input_var}",
                "linear": f"{c[0]} * {input_var}",
                "relu": f"ReLU({input_var})",
                "linear_relu": f"ReLU({c[0]} * {input_var})",
                "logistic": f"logistic({input_var})",
                "linear_logistic": f"logistic({c[0]} * {input_var})",
                "exp": f"exp({input_var})",
                "linear_exp": f"exp({c[0]} * {input_var})",
                "reciprocal": f"1 / {input_var}",
                "linear_reciprocal": f"1 / ({c[0]} * {input_var})",
                "ln": f"ln({input_var})",
                "linear_ln": f"ln({c[0]} * {input_var})",
                "cos": f"cos({input_var})",
                "linear_cos": f"cos({c[0]} * {input_var})",
                "sin": f"sin({input_var})",
                "linear_sin": f"sin({c[0]} * {input_var})",
                "tanh": f"tanh({input_var})",
                "linear_tanh": f"tanh({c[0]} * {input_var})",
                "classifier": classifier_str,
            }
        elif output_format == "latex":
            labels = {
                "none": "",
                "add": f"+ {input_var}",
                "subtract": f"- {input_var}",
                "mult": f"{c[0]} {input_var}",
                "linear": c[0] + "" + input_var,
                "relu": f"\\operatorname{{ReLU}}\\left({input_var}\\right)",
                "linear_relu": f"\\operatorname{{ReLU}}\\left({c[0]}{input_var}\\right)",
                "logistic": f"\\sigma\\left({input_var}\\right)",
                "linear_logistic": f"\\sigma\\left({c[0]} {input_var} \\right)",
                "exp": f"+ e^{input_var}",
                "linear_exp": f"e^{{{c[0]} {input_var} }}",
                "reciprocal": f"\\frac{{1}}{{{input_var}}}",
                "linear_reciprocal": f"\\frac{{1}}{{{c[0]} {input_var} }}",
                "ln": f"\\ln\\left({input_var}\\right)",
                "linear_ln": f"\\ln\\left({c[0]} {input_var} \\right)",
                "cos": f"\\cos\\left({input_var}\\right)",
                "linear_cos": f"\\cos\\left({c[0]} {input_var} \\right)",
                "sin": f"\\sin\\left({input_var}\\right)",
                "linear_sin": f"\\sin\\left({c[0]} {input_var} \\right)",
                "tanh": f"\\tanh\\left({input_var}\\right)",
                "linear_tanh": f"\\tanh\\left({c[0]} {input_var} \\right)",
                "classifier": classifier_str,
            }
    else:  # with bias
        if output_format == "console":
            labels = {
                "none": "",
                "add": f"+ {input_var}",
                "subtract": f"- {input_var}",
                "mult": f"{c[0]} * {input_var}",
                "linear": f"{c[0]} * {input_var} + {c[1]}",
                "relu": f"ReLU({input_var})",
                "linear_relu": f"ReLU({c[0]} * {input_var} + {c[1]} )",
                "logistic": f"logistic({input_var})",
                "linear_logistic": f"logistic({c[0]} * {input_var} + {c[1]})",
                "exp": f"exp({input_var})",
                "linear_exp": f"exp({c[0]} * {input_var} + {c[1]})",
                "reciprocal": f"1 / {input_var}",
                "linear_reciprocal": f"1 / ({c[0]} * {input_var} + {c[1]})",
                "ln": f"ln({input_var})",
                "linear_ln": f"ln({c[0]} * {input_var} + {c[1]})",
                "cos": f"cos({input_var})",
                "linear_cos": f"cos({c[0]} * {input_var} + {c[1]})",
                "sin": f"sin({input_var})",
                "linear_sin": f"sin({c[0]} * {input_var} + {c[1]})",
                "tanh": f"tanh({input_var})",
                "linear_tanh": f"tanh({c[0]} * {input_var} + {c[1]})",
                "classifier": classifier_str,
            }
        elif output_format == "latex":
            labels = {
                "none": "",
                "add": f"+ {input_var}",
                "subtract": f"- {input_var}",
                "mult": f"{c[0]} * {input_var}",
                "linear": f"{c[0]} {input_var} + {c[1]}",
                "relu": f"\\operatorname{{ReLU}}\\left( {input_var}\\right)",
                "linear_relu": f"\\operatorname{{ReLU}}\\left({c[0]}{input_var} + {c[1]} \\right)",
                "logistic": f"\\sigma\\left( {input_var} \\right)",
                "linear_logistic": f"\\sigma\\left( {c[0]} {input_var} + {c[1]} \\right)",
                "exp": f"e^{input_var}",
                "linear_exp": f"e^{{ {c[0]} {input_var} + {c[1]} }}",
                "reciprocal": f"\\frac{{1}}{{{input_var}}}",
                "linear_reciprocal": f"\\frac{{1}} {{ {c[0]}{input_var} + {c[1]} }}",
                "ln": f"\\ln\\left({input_var}\\right)",
                "linear_ln": f"\\ln\\left({c[0]} {input_var} + {c[1]} \\right)",
                "cos": f"\\cos\\left({input_var}\\right)",
                "linear_cos": f"\\cos\\left({c[0]} {input_var} + {c[1]} \\right)",
                "sin": f"\\sin\\left({input_var}\\right)",
                "linear_sin": f"\\sin\\left({c[0]} {input_var} + {c[1]} \\right)",
                "tanh": f"\\tanh\\left({input_var}\\right)",
                "linear_tanh": f"\\tanh\\left({c[0]} {input_var} + {c[1]} \\right)",
                "classifier": classifier_str,
            }

    if op_name not in labels:
        raise NotImplementedError(
            f"operation '{op_name}' is not defined for output_format '{output_format}'"
        )

    return labels[op_name]

isiterable(p_object)

Checks if an object is iterable.

Parameters:

Name Type Description Default
p_object Any

object to be checked

required
Source code in temp_dir/darts/src/autora/theorist/darts/operations.py
10
11
12
13
14
15
16
17
18
19
20
21
def isiterable(p_object: typing.Any) -> bool:
    """
    Checks if an object is iterable.

    Arguments:
        p_object: object to be checked
    """
    try:
        iter(p_object)
    except TypeError:
        return False
    return True