Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-0.2787 -0.9834 -0.6188 -0.2791  0.8884  0.1878  0.2976 -2.9988  0.1845 -2.1581
#> -0.0384 -0.1011 -0.5504 -0.7248 -0.6579  0.0759 -1.5380 -1.5640  0.2440  1.2480
#> -0.8327  1.0228  0.4853  1.7132 -1.4429 -0.4752  0.0869  0.9424 -0.8087  0.8835
#> -0.2827  1.1923 -0.2399  1.6664 -1.5052 -0.0997  2.7545 -0.5823  0.5105 -0.5634
#>  0.1195 -0.0103  1.6720  1.1616  0.6461 -0.5101  0.6062 -0.8938  0.9929 -1.1437
#> -0.0707  0.0586  1.3464 -0.1811 -0.0052  0.2070  1.1008  0.2369 -0.3690 -0.2291
#>  1.0942 -0.8464  1.1028 -2.1436 -1.3896  0.1858  0.3903  0.0307 -1.3655 -0.1036
#> -1.3659  0.3108 -0.2351 -3.1751  0.4301 -0.2746 -0.0085 -1.1157  0.8035 -0.4155
#>  0.9532 -1.0880 -1.0963 -1.9701  0.6924 -0.6996  2.7517 -1.0013 -0.2163 -0.8934
#> -2.1180 -0.7322  0.9450  1.0836 -1.1577  0.8713 -1.0938 -1.5625 -0.5750 -0.5198
#>  1.3496 -1.0237  0.4848  0.9574 -1.7476  0.2555  0.2705 -0.6386  2.0567 -0.9271
#> -0.0585  0.9011 -2.2642  1.5073  0.6271 -0.0815 -0.1684  0.9974 -1.6034  0.0697
#> -0.5647 -0.4162 -0.3568 -0.9207  0.2625  1.2908 -1.0942  1.2588  1.4331 -1.1277
#>  1.2634  0.1872 -0.5621  1.7371 -0.2193 -0.8888 -0.0074 -2.2032  2.0243 -1.6950
#>  0.6034 -0.9054 -0.2400 -1.4059  1.0607  0.6055 -1.6829  0.2024 -0.0197  0.9397
#> -1.9015  0.1482 -1.2143 -0.4966 -0.0237  0.6577 -0.3283 -0.3463  0.2279  0.4532
#> -0.3383 -1.6638  0.8932  0.6198 -0.3760  1.2134  0.6558  2.0490  0.2099  0.5571
#>  1.0669  0.8394  0.7871 -2.3825  2.5432 -0.0592 -0.7658 -0.7233  0.8735 -0.1338
#>  2.2742  1.5531  0.0850  0.7217  0.9868 -0.6018 -1.1765 -1.9885 -1.0000  0.0485
#> -1.8584  0.7507 -0.1966 -1.9736 -0.9920  0.4731  1.1212  0.2191 -1.0398 -0.8360
#> -0.2367 -0.2409 -1.6313  1.0380  1.3557 -0.7850  0.3908  1.5505  0.2372 -0.5961
#>  0.1625  1.5591 -0.4206 -1.8614 -0.2811 -0.0857  1.9030 -1.5774 -0.6202 -0.7399
#> -0.8026 -2.2925  0.2506  1.3411 -1.1845 -1.7539 -1.4895  1.0664  0.9415 -0.2747
#> -1.4919 -0.9782 -0.2116 -0.3027 -0.3562 -1.6048 -1.0615  1.1195 -0.4835  0.0859
#> -0.4831 -1.3131  0.1236 -0.9675 -0.5423  0.1666 -0.2475  0.1604 -1.1910 -0.9818
#>  0.6336  0.5266  0.1657  0.6974  0.8299 -0.0637 -0.5124 -0.1976 -0.1285 -0.0699
#> -0.4511  1.6924  0.7609  0.8204 -0.6559 -0.0469  0.5124 -1.9499  0.9375 -0.3147
#> -0.3964 -1.9832  1.6079  0.2773  0.1526 -0.0571 -1.4917 -0.7881 -1.6935  0.5154
#>  0.5006  0.3948  0.3777 -0.8555  0.9662  0.2718 -0.2701  1.8521  1.0771 -1.2083
#> -0.6743 -0.4485  0.2643 -1.2473 -1.1114  1.4072  1.2592 -0.2888  2.2765 -0.2501
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]