Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-0.4549 -0.1398 -1.0047 -0.7178 -0.8060 -0.0321  0.7830 -0.7003 -0.6135 -0.9191
#> -0.8385  1.5758  2.6711  1.5588 -0.6169 -1.4964 -1.1829 -0.3387 -0.6650  0.7601
#> -1.6185 -0.1392 -0.6900  0.5984  2.1502 -1.8544 -1.5861  0.5919  0.1759 -0.8455
#>  0.5296 -0.7523  1.5946 -1.1501 -0.2728  0.1091 -0.3997  0.4994  0.2952  0.5781
#>  1.0755  2.1849  1.7126 -0.1137 -0.1978 -1.2907 -0.5851 -0.7340  1.1924  1.9563
#>  0.8110 -0.0180 -0.6739 -1.3154 -0.7041 -0.3938 -1.2320 -0.7840 -1.0517  2.1076
#>  2.1627  0.1856 -0.6921 -0.2721  0.5870  0.6844  2.2912  0.5013  0.3775 -0.5315
#> -1.0022 -0.4284  2.1071 -1.0520 -0.3663  1.6166 -0.1370 -0.3083  1.1686  0.4968
#>  0.4399  1.4106 -0.2619 -0.5332 -0.7987  0.0672  0.9113 -0.0578  0.2700 -0.4342
#> -0.9817  0.4672  0.3418 -1.3629  0.7969 -0.2742 -2.2088  0.3453  0.7701 -0.9413
#>  1.2393  0.0514  1.0590  1.5640  1.4744  0.3185 -0.9035 -0.7252 -0.2449  0.0699
#> -0.2789  0.0220  0.2666 -0.1673 -1.3826  1.5125 -0.2238 -1.1082 -0.6384  0.3131
#>  0.4231 -0.0360 -0.0353 -0.0810 -1.2656 -1.7806  1.9488 -1.2305 -1.1672 -0.5863
#> -0.0135 -1.6663 -0.0508 -0.6024  1.8186  0.9275  0.5305  0.9016 -0.8834  0.6896
#>  1.3515  0.4183  0.0995  0.4833 -0.4979 -0.0024  0.8740 -0.1578 -0.6952  0.9214
#>  0.1399  0.0323  0.4199 -2.1567 -1.9431 -0.4902 -0.6290  0.0949 -1.3042 -1.1717
#>  0.4538  1.2390  1.7215  0.2371  0.3234 -0.4940 -1.2967 -0.9484  0.3714  0.2299
#> -1.1899  0.4899 -0.1216 -1.3644 -1.8527  0.3499  0.8627 -1.1497  1.5390 -0.5623
#>  1.1694 -0.1931  0.2720 -0.3031 -0.2556  1.5983  0.1095  0.8584  0.5161 -1.1175
#>  0.8231  1.3434 -1.3116 -0.9301 -1.3980 -0.0558 -0.3305 -1.3796 -0.2204 -1.8987
#>  0.0391 -0.4324 -1.9816  1.2055 -0.5249  0.1980  0.6856  0.1354  0.4197  0.6871
#> -0.9600  0.5105  1.8189  0.6006 -1.6762 -1.0775  0.8277  1.0235  1.3500 -0.4710
#>  0.8358  0.4768 -0.6924  0.3381  0.2404 -0.3531 -0.4523  0.1145  1.5026  0.6283
#> -1.5765 -0.4676 -1.7735  0.0946  0.1954 -0.1159  0.6945  0.5388  1.4407 -0.8176
#>  0.7489  1.0810  0.0526 -0.3178  0.3366 -1.1074 -0.2208  0.2637  0.7211 -0.6860
#>  0.2860  0.4166 -2.2942  0.1662  0.1758 -0.9454  0.3404  0.9025 -0.4128  0.4384
#>  0.4451 -0.2588 -0.2669 -0.5930 -0.6711 -0.6000  1.2648  1.1503 -1.8591  0.3655
#>  0.8134 -1.3613  2.2754  0.7789  2.0540 -0.1526 -1.2822  0.4464 -0.4650  0.0224
#>  0.6703 -0.3402 -0.2007 -0.5535  1.1624  0.8967  1.1060  0.5988  0.9298 -0.2902
#>  1.3976  0.7477 -0.7355 -0.3221 -0.5493 -0.9383 -0.8045  0.5160  0.3720 -0.6395
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]