Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10 0.3414  1.4172 -0.2245 -2.3498  0.8088 -0.5900 -1.4348 -0.2555  0.0856 -0.1470
#> -0.2562  1.0186  0.7340 -0.9071  0.1262  0.0185  0.2693 -0.2455 -1.2903  1.3679
#> -0.1227  0.3036 -1.7116  1.3567 -1.0850 -0.1995 -0.1796  1.1058 -0.4257  0.3216
#> -0.0304 -0.0993 -0.8617  0.5962 -1.2919 -1.0631  0.5723 -2.6218  1.9393 -1.5536
#> -0.5744 -0.0140 -0.3859  1.6819 -0.3882 -0.9478  0.4032 -0.9905 -1.2374 -2.1449
#>  0.3335  0.4189  0.5678 -1.1395  0.3034 -1.1231 -0.1766 -0.1657 -0.7484 -0.2134
#> -1.9903 -1.6371 -0.8343 -0.4999 -1.1450 -0.7339  0.5514  0.6359 -0.2671 -1.1508
#>  0.9898 -0.3173  1.1422  0.0984  0.6608  0.9813 -0.9789 -2.4036 -0.3874 -0.2408
#> -0.6683  1.0776  0.8150  0.1223  2.0755  0.8961 -1.3912 -0.1158  0.6846 -1.1518
#>  0.6964  0.1632  0.2014 -1.0950  1.1358 -0.3988 -0.3358  1.2969 -0.1528  0.3344
#>  2.3188  1.2347 -1.3994  0.1436 -1.0340  0.3865  0.4184  1.4298  1.0784  1.0048
#> -0.8209 -0.2545  1.9278  0.3264  0.0207  1.5441 -2.5865  1.5996  1.4066 -1.1861
#>  1.0962 -0.9140  0.5624  0.0875  0.0536 -1.9282 -0.7041 -1.1322  1.9905  1.0092
#>  0.9830 -1.8805  0.5195 -0.0745 -1.2066 -0.2288  0.0403  0.5113 -1.8514 -0.7671
#> -0.2433  1.0515 -1.9236 -0.0143  0.7319  0.2881 -0.4955  1.2336  0.4039  0.8045
#>  0.6902  1.0722 -1.7630 -0.3296  0.9708  0.8026 -0.7594 -1.2242  1.0917  1.5090
#>  0.1586 -1.1633 -1.3019  2.0981 -1.2106 -0.4412  0.0987 -0.7431 -2.3836 -0.4060
#> -1.1836 -0.5273 -1.2248  1.8811 -0.0855  0.3938 -0.7996  0.0970 -0.8964  0.2675
#>  1.5628 -0.2681 -0.7136  0.1082  0.6869  0.6946  0.8965 -1.3609 -0.4226 -0.7592
#> -0.0218 -1.8922 -0.5585 -0.5389 -0.8711  1.8041  0.2643 -0.7478  0.5371  1.2493
#>  0.0994  0.7220 -1.1397 -0.3484  1.0617 -0.0595  0.9764  1.3327  0.5193 -0.9787
#> -0.1262 -1.0548 -1.8387 -1.4496  0.8483 -1.6491 -0.1907 -0.3284 -1.4446  0.0115
#> -0.1807  0.6286  1.0333 -0.7512 -1.6034 -0.4044  0.4790  0.4853  0.8527  1.4714
#> -0.9737  0.1319  1.1866  1.8487  0.1008 -0.0325  0.5450  0.1374 -1.6942  1.5386
#> -0.3499  0.5352 -0.9529  1.3397  0.3577  0.9674  1.0757  0.4940 -1.1408 -0.5594
#>  2.1339 -1.6276 -0.6136 -0.9999 -0.0202  0.4095  0.8060  0.9502  0.3855 -0.6868
#>  1.1898  0.6490  0.3671 -0.2081  2.3352 -1.0646  1.0199 -0.5126 -0.5850 -0.7029
#>  0.2236  0.4740 -0.7601 -0.1738 -0.4172 -0.4636 -0.2691  0.8853  0.3416  0.1001
#>  1.7272 -0.2192 -1.1331  0.1856 -2.1165  1.8084  1.2823  0.3418 -0.2047  2.0500
#> -0.1034  2.3951  0.7393 -0.3647  1.2854 -0.7929 -1.0363  0.8989 -0.6022  0.9659
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]